content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
''' written by Emanuel Ramirez ([email protected]) '''
class LanguageFlagNotFound(Exception):
pass
class AlgorithmFlagNotFound(Exception):
pass
| 20.625 | 59 | 0.721212 | [
"MIT"
] | emanuel2718/algocli | algocli/errors.py | 165 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Update handling."""
from __future__ import print_function, unicode_literals, absolute_import
import re, time, os, threading, zipfile, tarfile
try: # Python 2
# pylint:disable=import-error, no-name-in-module
from urllib import quote, unquote
from urlparse import urlparse
except ImportError: # Python 3
# pylint:disable=import-error, no-name-in-module,ungrouped-imports
from urllib.parse import quote, unquote, urlparse
from .lnp import lnp
from . import launcher, paths, download, log
from .json_config import JSONConfiguration
def updates_configured():
"""Returns True if update checking have been configured."""
return prepare_updater() is not None
def check_update():
"""Checks for updates using the URL specified in PyLNP.json."""
if not updates_configured():
return
if not lnp.userconfig.has_value('updateDays'):
interval = lnp.config.get_value('updates/defaultInterval', -1)
if interval != -1 and lnp.ui.on_request_update_permission(interval):
next_update(interval)
else:
next_update(-1)
if lnp.userconfig.get_value('updateDays', -1) == -1:
return
if lnp.userconfig.get_number('nextUpdate') < time.time():
t = threading.Thread(target=perform_update_check)
t.daemon = True
t.start()
def perform_update_check():
"""Performs the actual update check. Runs in a thread."""
# pylint:disable=bare-except
prepare_updater()
if lnp.updater.update_needed():
lnp.new_version = lnp.updater.get_version()
lnp.ui.on_update_available()
def prepare_updater():
"""Returns an Updater object for the configured updater."""
if lnp.updater:
return lnp.updater
updaters = {'regex': RegexUpdater, 'json': JSONUpdater, 'dffd': DFFDUpdater}
updater_id = lnp.config.get('updates/updateMethod', None)
if updater_id is None:
#TODO: Remove this after packs have had time to migrate
log.w(
'Update method not configured in PyLNP.json! Will attempt to '
'auto-detect. Please set this value correctly, auto-detection will '
'go away eventually!')
if lnp.config.get_string('updates/dffdID'):
updater_id = 'dffd'
log.w('Updater detected: dffd')
elif lnp.config.get_string('updates/versionRegex'):
updater_id = 'regex'
log.w('Updater detected: regex')
elif lnp.config.get_string('updates/versionJsonPath'):
updater_id = 'json'
log.w('Updater detected: json')
else:
log.w('Could not detect update method, updates will not work')
return None
elif updater_id == '' or not lnp.config.get('updates'):
return None
if updater_id not in updaters:
log.e('Unknown update method: '+updater_id)
return None
lnp.updater = updaters[updater_id]()
return lnp.updater
def next_update(days):
"""Sets the next update check to occur in <days> days."""
lnp.userconfig['nextUpdate'] = (time.time() + days * 24 * 60 * 60)
lnp.userconfig['updateDays'] = days
lnp.save_config()
def start_update():
"""Launches a webbrowser to the specified update URL."""
launcher.open_url(lnp.updater.get_download_url())
def download_df_baseline(immediate=False):
"""Download the current version of DF from Bay12 Games to serve as a
baseline, in LNP/Baselines/"""
filename = lnp.df_info.get_archive_name()
url = 'http://www.bay12games.com/dwarves/' + filename
target = os.path.join(paths.get('baselines'), filename)
queue_name = 'immediate' if immediate else 'baselines'
download.download(queue_name, url, target)
def direct_download_pack():
"""Directly download a new version of the pack to the current BASEDIR"""
url = lnp.updater.get_direct_url()
fname = lnp.updater.get_direct_filename()
target = os.path.join(lnp.BASEDIR, fname)
download.download('updates', url, target,
end_callback=extract_new_pack)
def extract_new_pack(_, fname, bool_val):
"""Extract a downloaded new pack to a sibling dir of the current pack."""
exts = ('.zip', '.bz2', '.gz', '.7z', '.xz')
if not bool_val or not any(fname.endswith(ext) for ext in exts):
return None
archive = os.path.join(lnp.BASEDIR, os.path.basename(fname))
return extract_archive(archive, os.path.join(lnp.BASEDIR, '..'))
def extract_archive(fname, target):
"""Extract the archive fname to dir target, avoiding explosions."""
if zipfile.is_zipfile(fname):
zf = zipfile.ZipFile(fname)
namelist = zf.namelist()
topdir = namelist[0].split(os.path.sep)[0]
if not all(f.startswith(topdir) for f in namelist):
target = os.path.join(target, os.path.basename(fname).split('.')[0])
zf.extractall(target)
os.remove(fname)
return True
if tarfile.is_tarfile(fname):
tf = tarfile.open(fname)
namelist = tf.getmembers()
topdir = namelist[0].split(os.path.sep)[0]
if not all(f.startswith(topdir) for f in namelist):
target = os.path.join(target, fname.split('.')[0])
tf.extractall(target)
os.remove(fname)
return True
# TODO: support '*.xz' and '*.7z' files.
return False
#pylint: disable=attribute-defined-outside-init, no-self-use
class Updater(object):
"""General class for checking for updates."""
def update_needed(self):
"""Checks if an update is necessary."""
self.text = download.download_str(self.get_check_url())
if self.text is None:
log.e("Error checking for updates, could not download text")
curr_version = lnp.config.get_string('updates/packVersion')
if not curr_version:
log.e("Current pack version is not set, cannot check for updates")
return False
return self.get_version() != curr_version
def get_check_url(self):
"""Returns the URL used to check for updates."""
return lnp.config.get_string('updates/checkURL')
def get_version(self):
"""Returns the version listed at the update URL. Must be overridden by
subclasses."""
pass
def get_download_url(self):
"""Returns a URL from which the user can download the update."""
return lnp.config.get_string('updates/downloadURL')
def get_direct_url(self):
"""Returns a URL pointing directly to the update, for download by the
program."""
return lnp.config.get_string('updates/directURL')
def get_direct_filename(self):
"""Returns the filename that should be used for direct downloads."""
directFilename = lnp.config.get_string('updates/directFilename')
if directFilename:
return directFilename
url_fragments = urlparse(self.get_direct_url())
return os.path.basename(unquote(url_fragments.path))
class RegexUpdater(Updater):
"""Updater class which uses regular expressions to locate the version (and
optionally also the download URLs)."""
def get_version(self):
versionRegex = lnp.config.get_string('updates/versionRegex')
if not versionRegex:
log.e('Version regex not configured!')
return re.search(versionRegex, self.text).group(1)
def get_download_url(self):
urlRegex = lnp.config.get_string('updates/downloadURLRegex')
result = ''
if urlRegex:
result = re.search(urlRegex, self.text).group(1)
if result:
return result
else:
return super(RegexUpdater, self).get_download_url()
def get_direct_url(self):
urlRegex = lnp.config.get_string('updates/directURLRegex')
result = ''
if urlRegex:
result = re.search(urlRegex, self.text).group(1)
if result:
return result
else:
return super(RegexUpdater, self).get_direct_url()
class JSONUpdater(Updater):
"""Updater class which uses a JSON object to locate the version (and
optionally also the download URLs)."""
def get_version(self):
self.json = JSONConfiguration.from_text(self.text)
jsonPath = lnp.config.get_string('updates/versionJsonPath')
if not jsonPath:
log.e('JSON path to version not configured!')
return self.json.get_string(jsonPath)
def get_download_url(self):
jsonPath = lnp.config.get_string('updates/downloadURLJsonPath')
result = ''
if jsonPath:
result = self.json.get_string(jsonPath)
if result:
return result
else:
return super(JSONUpdater, self).get_download_url()
def get_direct_url(self):
jsonPath = lnp.config.get_string('updates/directURLJsonPath')
result = ''
if jsonPath:
result = self.json.get_string(jsonPath)
if result:
return result
else:
return super(JSONUpdater, self).get_direct_url()
def get_direct_filename(self):
jsonPath = lnp.config.get_string('updates/directFilenameJsonPath')
result = ''
if jsonPath:
result = self.json.get_string(jsonPath)
if result:
return result
else:
return super(JSONUpdater, self).get_direct_filename()
class DFFDUpdater(Updater):
"""Updater class for DFFD-hosted downloads."""
def get_check_url(self):
self.dffd_id = lnp.config.get_string('updates/dffdID')
if not self.dffd_id:
log.e('Field "updates/dffdID" must be set in PyLNP.json')
return 'http://dffd.bay12games.com/file_data/{}.json'.format(
self.dffd_id)
def get_version(self):
self.json = JSONConfiguration.from_text(self.text)
return self.json.get_string('version')
def get_download_url(self):
return 'http://dffd.bay12games.com/file.php?id='+self.dffd_id
def get_direct_url(self):
result = 'http://dffd.bay12games.com/download.php?id={0}&f={1}'
return result.format(
self.dffd_id, quote(self.json.get_string('filename')))
def get_direct_filename(self):
return self.json.get_string('filename')
| 37.89011 | 80 | 0.649072 | [
"ISC"
] | McArcady/python-lnp | core/update.py | 10,344 | Python |
import lightgbm as lgb
import xgboost as xgb
from sklearn.metrics import mean_squared_error, mean_absolute_error, explained_variance_score, r2_score
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
def compile_model(network):
"""
:param network dict: dictionary with network parameters
:return: compiled model
"""
model = lgb.LGBMRegressor(num_leaves=network.get('num_leaves', 31),
learning_rate=network.get('learning_rate', 0.1),
n_estimators=network.get('n_estimators', 20),
max_bin=network.get('max_bin', 1000),
colsample_bytree=network.get('colsample_bytree', 0.5),
subsample_for_bin=network.get('subsample_for_bin', 200000),
boosting_type=network.get('boosting_type', 'gbdt'),
num_iterations=network.get('num_iterations', 100),
extra_trees=network.get('extra_trees', False),
reg_sqrt= network.get('reg_sqrt', False),
bagging_freq = network.get('bagging_freq', 1),
bagging_fraction = network.get('bagging_fraction', 0.1))
return model
def train_and_score(network, x_train, y_train, x_test, y_test):
"""
:param network dict: dictionary with network parameters
:param x_train array: numpy array with features for traning
:param y_train array: numpy array with labels for traning
:param x_test array: numpy array with labels for test
:param y_test array: numpy array with labels for test
:return float: score
"""
model = compile_model(network)
model.fit(x_train, y_train)
y_pred = model.predict(np.array(x_test))
true = y_test
pred = y_pred
print(' R2 = ', r2_score(true, pred))
return r2_score(true, pred), model
| 35.927273 | 103 | 0.613866 | [
"MIT"
] | EvanBagis/gb_rf_evolution | gb_rf_evolution/gb_train.py | 1,976 | Python |
from datetime import datetime
from pydantic.main import BaseModel
from factory import db
from utils.models import OrmBase
from typing import List
class Post(db.Model):
__tablename__ = "post"
id = db.Column(db.Integer, primary_key=True)
text = db.Column(db.UnicodeText)
created = db.Column(db.DateTime, default=datetime.utcnow)
author_id = db.Column(db.Integer, db.ForeignKey("user.id"))
def __repr__(self) -> str:
return f"<Post {self.id}>"
class PostCreate(BaseModel):
text: str
class PostResponse(OrmBase):
text: str
created: datetime
author_id: int
class PostResponseList(BaseModel):
page: int
pages: int
total: int
posts: List[PostResponse]
| 19.486486 | 63 | 0.699029 | [
"BSD-3-Clause"
] | ThiNepo/neps-guide-flask-1 | API_course/models/post.py | 721 | Python |
from logging import getLogger
from mpi4py import MPI
logger = getLogger('om.mpi_ctrl')
WORKTAG = 0
DIETAG = 1
class MpiMaster(object):
def __init__(self, run_control, comm, rank, size):
self.run_control = run_control
self.comm = comm
self.rank = rank
self.size = size
logger.info('Initialized MPI master: {}/{}', rank, size)
def run(self):
task_master = self.run_control._task_master
status = MPI.Status()
# Launch all tasks initially.
if self.size > len(task_master.pending_tasks):
logger.warning('MPI size > # of pending tasks, not sure what will happen')
waiting_dests = list(range(1, self.size)[::-1])
# TODO: should handle exception in slave by consuming all data and issuing dies.
# Farm out rest of work when a worker reports back that it's done.
while True:
try:
task = task_master.get_next_pending()
if not task:
# There are tasks with unmet dependencies.
waiting_dests.append(dest)
logger.debug('appended waiting dests: {}', waiting_dests)
except StopIteration:
logger.debug('All tasks sent')
break
need_to_block = not waiting_dests or not task
if need_to_block:
# Block until notified of completion.
rdata = self.comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
logger.info('Data received from {}', status.Get_source())
logger.debug('data: {}', rdata)
if rdata['command'] == 'error':
logger.error('Rank {} raised error', status.Get_source())
logger.error(rdata['msg'])
raise Exception('Unrecoverable error')
received_task = rdata['task'] # reconstituted via pickle.
task_master.update_task(received_task.index, received_task.status)
if task:
if waiting_dests:
# Clear backlog of waiting dests.
logger.debug('pop waiting dests: {}', waiting_dests)
dest = waiting_dests.pop()
else:
dest = status.Get_source()
data = {'command': 'run_task', 'task': task}
logger.info('Sending data to {}', dest)
logger.debug('data: {}', data)
self.comm.send(data, dest=dest, tag=WORKTAG)
# We are done! Listen for final data responses.
for dest in range(1, self.size - len(waiting_dests)):
rdata = self.comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
if rdata['command'] == 'error':
logger.error('Rank {} raised error', status.Get_source())
logger.error(rdata['msg'])
raise Exception('Unrecoverable error')
received_task = rdata['task'] # reconstituted via pickle.
task_master.update_task(received_task.index, received_task.status)
logger.info('Final data received from {}', status.Get_source())
logger.debug('data: {}', rdata)
# Send all slaves a die command.
for dest in range(1, self.size):
data = {'command': 'die'}
logger.info('Sending die to {}', dest)
self.comm.send(data, dest=dest, tag=DIETAG)
logger.info('Finished')
class MpiSlave(object):
def __init__(self, run_control, comm, rank, size):
self.run_control = run_control
self.comm = comm
self.rank = rank
self.size = size
logger.info('Initialized MPI slave: {}/{}', rank, size)
def listen(self):
try:
status = MPI.Status()
while True:
logger.debug('Waiting for data')
data = self.comm.recv(source=0, tag=MPI.ANY_TAG, status=status)
logger.debug('Received data: {}', data)
if status.Get_tag() == DIETAG:
break
else:
self.run_control.run_task(data['task'])
data['task'].status = 'done'
self.comm.send(data, dest=0, tag=WORKTAG)
logger.debug('Finished')
except Exception as e:
logger.error(e)
data = {'command': 'error', 'msg': str(e)}
self.comm.send(data, dest=0, tag=WORKTAG)
return
| 39.53913 | 93 | 0.548933 | [
"Apache-2.0"
] | markmuetz/omnium | omnium/run_control/mpi_control.py | 4,547 | Python |
# needs mayavi2
# run with ipython -wthread
import networkx as nx
import numpy as np
from enthought.mayavi import mlab
# some graphs to try
#H=nx.krackhardt_kite_graph()
#H=nx.Graph();H.add_edge('a','b');H.add_edge('a','c');H.add_edge('a','d')
#H=nx.grid_2d_graph(4,5)
H=nx.cycle_graph(20)
# reorder nodes from 0,len(G)-1
G=nx.convert_node_labels_to_integers(H)
# 3d spring layout
pos=nx.spring_layout(G,dim=3)
# numpy array of x,y,z positions in sorted node order
xyz=np.array([pos[v] for v in sorted(G)])
# scalar colors
scalars=np.array(list(G.nodes()))+5
mlab.figure(1, bgcolor=(0, 0, 0))
mlab.clf()
pts = mlab.points3d(xyz[:,0], xyz[:,1], xyz[:,2],
scalars,
scale_factor=0.1,
scale_mode='none',
colormap='Blues',
resolution=20)
pts.mlab_source.dataset.lines = np.array(list(G.edges()))
tube = mlab.pipeline.tube(pts, tube_radius=0.01)
mlab.pipeline.surface(tube, color=(0.8, 0.8, 0.8))
mlab.savefig('mayavi2_spring.png')
# mlab.show() # interactive window
| 29.026316 | 73 | 0.625567 | [
"BSD-3-Clause"
] | AllenDowney/networkx | examples/3d_drawing/mayavi2_spring.py | 1,103 | Python |
#!/usr/bin/env python
import getopt
import sys
from coapthon.server.coap import CoAP
from exampleresources import BasicResource, Long, Separate, Storage, Big, voidResource, XMLResource, ETAGResource, \
Child, \
MultipleEncodingResource, AdvancedResource, AdvancedResourceSeparate, DynamicResource
__author__ = 'Giacomo Tanganelli'
class CoAPServer(CoAP):
def __init__(self, host, port, multicast=False):
CoAP.__init__(self, (host, port), multicast)
self.add_resource('basic/', BasicResource())
self.add_resource('storage/', Storage())
self.add_resource('separate/', Separate())
self.add_resource('long/', Long())
self.add_resource('big/', Big())
self.add_resource('void/', voidResource())
self.add_resource('xml/', XMLResource())
self.add_resource('encoding/', MultipleEncodingResource())
self.add_resource('etag/', ETAGResource())
self.add_resource('child/', Child())
self.add_resource('advanced/', AdvancedResource())
self.add_resource('advancedSeparate/', AdvancedResourceSeparate())
self.add_resource('dynamic/', DynamicResource())
print "CoAP Server start on " + host + ":" + str(port)
print self.root.dump()
def usage(): # pragma: no cover
print "coapserver.py -i <ip address> -p <port>"
def main(argv): # pragma: no cover
ip = "0.0.0.0"
port = 5683
multicast = False
try:
opts, args = getopt.getopt(argv, "hi:p:m", ["ip=", "port=", "multicast"])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
usage()
sys.exit()
elif opt in ("-i", "--ip"):
ip = arg
elif opt in ("-p", "--port"):
port = int(arg)
elif opt in ("-m", "--multicast"):
multicast = True
server = CoAPServer(ip, port, multicast)
try:
server.listen(10)
except KeyboardInterrupt:
print "Server Shutdown"
server.close()
print "Exiting..."
if __name__ == "__main__": # pragma: no cover
main(sys.argv[1:])
| 32.173913 | 117 | 0.588739 | [
"MIT"
] | Dalanke/CoAPthon | coapserver.py | 2,220 | Python |
"""
Prime Developer Trial
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.QuotesAPIforDigitalPortals.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.QuotesAPIforDigitalPortals.exceptions import ApiAttributeError
def lazy_import():
from fds.sdk.QuotesAPIforDigitalPortals.model.prices_trading_schedule_event_list_data_filter import PricesTradingScheduleEventListDataFilter
globals()['PricesTradingScheduleEventListDataFilter'] = PricesTradingScheduleEventListDataFilter
class PricesTradingScheduleEventListData(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'id': (str,), # noqa: E501
'filter': (PricesTradingScheduleEventListDataFilter,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'id': 'id', # noqa: E501
'filter': 'filter', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, id, *args, **kwargs): # noqa: E501
"""PricesTradingScheduleEventListData - a model defined in OpenAPI
Args:
id (str): Identifier of the notation.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
filter (PricesTradingScheduleEventListDataFilter): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.id = id
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, id, *args, **kwargs): # noqa: E501
"""PricesTradingScheduleEventListData - a model defined in OpenAPI
Args:
id (str): Identifier of the notation.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
filter (PricesTradingScheduleEventListDataFilter): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.id = id
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 43.511029 | 144 | 0.581158 | [
"Apache-2.0"
] | factset/enterprise-sdk | code/python/QuotesAPIforDigitalPortals/v2/fds/sdk/QuotesAPIforDigitalPortals/model/prices_trading_schedule_event_list_data.py | 11,835 | Python |
#!/usr/bin/env python3
"""Define public exports."""
__all__ = ["OutputFileExists", "InvalidDomain", "FileWriteError", "NoDomains"]
class NoDomains(Exception):
"""Raise when no domains are passed to findcdn main."""
def __init__(self, error):
"""Instantiate super class with passed message."""
self.message = "No domains were passed!"
super().__init__(self.message)
class OutputFileExists(Exception):
"""Raise when file already exists when writing in findcdn."""
def __init__(self, outFile):
"""Instantiate super class with passed message with passed in filename."""
self.message = "A file with the name " + outFile + " already exists!"
super().__init__(self.message)
class InvalidDomain(Exception):
"""Raise when an invalid domain is inputted in findcnd.main()."""
def __init__(self, item):
"""Instantiate super class with passed message with passed in item."""
self.message = item + " is not a valid domain in findcdn.main()"
super().__init__(self.message)
class FileWriteError(Exception):
"""Raise when there is a problem writing to a file in findcnd."""
def __init__(self, error):
"""Instantiate super class with passed message using passed in error."""
self.message = (
"The following error occurred in findcdn while file writing:\n"
+ repr(error)
)
super().__init__(self.message)
| 33.136364 | 82 | 0.657064 | [
"CC0-1.0"
] | Pascal-0x90/findCDN | src/findcdn/findcdn_err.py | 1,458 | Python |
import pandas as pd
from utils.config import Config
import numpy as np
import pandas as pd
def fun_clean_categogy1(array, keyw1, index, BOW):
compty = 0
c = 0
for elm in array:
if elm == "oui" or elm == "parfois":
BOW[c].append(keyw1[index])
compty += 1
c += 1
# print(compty)
return BOW
#Ajout des keywords de la catégorie 2 ATTENTION, ici j'ajoute tout le contenu des colonnes, donc il peut y avoir
# une grande variété de mots qui sugissent à cause d'ici. De plus, ce sont souvent des mots composés ou des
# séquences de mots. On peut envisager de ne sélectionner que le premier mot par exemple.
def fun_clean_categogy2(array, BOW):
compty = 0
c = 0
for elm in array:
if not elm == "":
if not BOW[c].__contains__(elm):
BOW[c].append(elm)
compty += 1
c += 1
# print(compty)
return BOW
def fun_clean_categogy3(array, keyw3, index, BOW, list_THR):
compty = 0
c = 0
for elm in array:
# print(elm)
if not np.isnan(float(str(elm).replace(",", "."))):
if float(str(elm).replace(",", ".")) > list_THR[index]:
if not BOW[c].__contains__(elm):
BOW[c].append(keyw3[index])
compty += 1
c += 1
print(compty)
return BOW
if __name__ == '__main__':
# %%
df = pd.read_csv(Config.csv_files[-1], sep=';', encoding='ISO-8859-1')
df.columns
#
# d = {'col1': [1, 2], 'col2': [3, 4]}
# df = pd.DataFrame(data=d)
List_cat1 = ["difficulté endormisst", "fatigue au reveil", "hyperacousie", "surdité", "SDE", "vertiges",
"depression", "anxiété"]
#Keywords à associer aux colonnes de la catégorie 1
keyw1 = ["endormissement", "fatigue", "hyperacousie", "surdité", "somnolence", "vertige", "dépression", "anxiété"]
List_cat2 = ["timbre acouphène", "type de douleurs", "type otalgie", "type de vertiges",
"caractere particulier", "mode apparition"]
List_cat3 = ["EVA depression", "epworth", "EVA anxiété", "EVA douleurs", "EVA hyperac", "EVA hypoac",
"EVA Otalgie 1", "EVA SADAM", "EVA vertiges", "ISI", "score khalfa hyperacousie", "EVA concentration"]
# Keywords à associer aux colonnes de la catégorie 3
keyw3 = ["dépression", "somnolence", "anxiété", "douleurs", "hyperacousie", "hypoacousie", "otalgie", "mâchoire",
"vertige", "sommeil", "hyperacousie", "concentration"]
# seuils de sélections à associer aux colonnes de la catégorie 3
List_THR = [5, 10, 5, 5, 5, 5, 4, 3, 3, 12, 20, 5]
cat4 = ["intensité ac"]
compt = 0
#Liste de mots clés associés à chaque patient. Une liste par patient
BOW = [[] for i in range(len(df[df.columns[0]]))]
#ajout des keywords de la categorie 1 à la liste des bag of words BOW
for colname in List_cat1:
# print(df[colname]) # show value before
print(colname)
BOW = fun_clean_categogy1(df[colname], keyw1, compt, BOW)
compt += 1
# ajout des keywords de la categorie 2 à la liste des bag of words BOW
compt=0
for colname in List_cat2:
print(colname)
BOW = fun_clean_categogy2(df[colname], BOW)
compt += 1
# ajout des keywords de la categorie 3 à la liste des bag of words BOW
compt=0
for colname in List_cat3:
print(colname)
BOW = fun_clean_categogy3(df[colname], keyw3, compt, BOW, List_THR)
compt += 1
#Nettoyage des valeurs "NaN" copiées par erreur par la catégorie 2
for elm in BOW:
if elm.__contains__(np.nan):
elm.pop(elm.index(np.nan))
print(BOW[:200]) # petit extrait de la liste des bag of words
BOW2=[]
for elm in BOW:
stri=""
for st in elm:
stri = stri + " " + st
BOW2.append(stri)
df2 = pd.DataFrame(BOW2)
df2.to_csv('lettres_persanes.csv', sep=';', encoding='ISO-8859-1')
print(df2)
| 33.625 | 120 | 0.596778 | [
"MIT"
] | lkorczowski/cleandata | notebooks/template_preprocessing_columns.py | 4,073 | Python |
import os
import re
import subprocess
import tempfile
def backups(destination, prefix):
name_re = re.compile(r'^{}(?:\.[0-9]+)?$'.format(prefix))
def _key(name):
return [int(char) if char.isdigit() else char
for char in re.split(r'([0-9]+)', name)]
paths = []
for name in sorted(os.listdir(destination), key=_key):
path = os.path.join(destination, name)
if os.path.isdir(path) and name_re.match(name):
paths.append(path)
return paths
def rotate(paths):
for _ in range(len(paths)):
path = paths.pop()
prefix, _, suffix = os.path.basename(path).partition('.')
suffix = str(int(suffix or 0) + 1)
rotated = os.path.join(os.path.dirname(path), prefix + '.' + suffix)
os.rename(path, rotated)
paths.insert(0, rotated)
def sync(rsync, source, destination, link_dest=None, exclude=None):
command = [rsync, '-v', '-a', '--no-D', '--delete', '--ignore-existing']
if link_dest:
command.extend(['--link-dest', link_dest])
exclude = exclude or []
if exclude:
command.extend([arg for path in exclude for arg in ['--exclude', path]])
if os.path.isdir(source) and source[-1] != os.path.sep:
source += os.path.sep
command.extend([source, destination])
subprocess.check_call(command)
def touch(path, times=None):
with open(path, 'a'):
os.utime(path, times)
def delete(rsync, directory):
empty = tempfile.mkdtemp() + os.path.sep
command = [rsync, '-v', '-r', '--delete', empty, directory]
subprocess.check_call(command)
os.rmdir(directory)
os.rmdir(empty)
| 23.871429 | 80 | 0.60383 | [
"MIT"
] | neuroid/poor-mans-time-machine | poor_mans_time_machine/__init__.py | 1,671 | Python |
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import sphinx_rtd_theme
from sphinx.domains.python import PythonDomain
sys.path.insert(0, os.path.abspath('..'))
from dival import __version__
# -- Project information -----------------------------------------------------
project = 'Deep Inversion Validation Library'
copyright = ('2020, Johannes Leuschner, Maximilian Schmidt, '
'Daniel Otero Baguer, David Erzmann')
author = ('Johannes Leuschner, Maximilian Schmidt, '
'Daniel Otero Baguer, David Erzmann')
# The short X.Y version
version = __version__
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinx_rtd_theme'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'DeepInversionValidationLibrarydoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'DeepInversionValidationLibrary.tex',
'Deep Inversion Validation Library Documentation',
('Johannes Leuschner, Maximilian Schmidt, '
'Daniel Otero Baguer, David Erzmann'), 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'deepinversionvalidationlibrary',
'Deep Inversion Validation Library Documentation', [author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'DeepInversionValidationLibrary',
'Deep Inversion Validation Library Documentation', author,
'DeepInversionValidationLibrary', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
autodoc_member_order = 'bysource'
def skip(app, what, name, obj, would_skip, options):
if name == "__init__":
return False
return would_skip
class MyPythonDomain(PythonDomain):
def find_obj(self, env, modname, classname, name, type, searchmode=0):
orig_matches = PythonDomain.find_obj(self, env, modname, classname,
name, type, searchmode)
# longest match is supposed to be original definition
return sorted(orig_matches, key=lambda m: len(m[0]))[-1:]
def setup(app):
app.connect("autodoc-skip-member", skip)
app.add_domain(MyPythonDomain, override=True)
app.add_css_file('css/custom.css')
| 31.072072 | 79 | 0.658307 | [
"MIT"
] | MBaltz/dival | docs/conf.py | 6,898 | Python |
import time
import scipy.io.wavfile as wavfile
import numpy as np
import speech_recognition as sr
import librosa
import argparse
import os
from glob import glob
from pydub import AudioSegment
from pydub.silence import split_on_silence, detect_nonsilent
from pydub.playback import play
import pysrt
import math
import shutil
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--video', type=str, required=True, help='Path to video *.mp4 file')
parser.add_argument('-o', '--output', type=str, default='output/', help='Output file location')
parser.add_argument('-l', '--lang', type=str, default='en', help='Language of the video file')
arguments = parser.parse_args()
return arguments
def recognize(wav_filename, lang):
data, s = librosa.load(wav_filename)
librosa.output.write_wav('output/tmp.wav', data, s)
y = (np.iinfo(np.int32).max * (data/np.abs(data).max())).astype(np.int32)
wavfile.write('output/tmp_32.wav', s, y)
r = sr.Recognizer()
with sr.AudioFile('output/tmp_32.wav') as source:
audio = r.record(source)
print('Audio file has been loaded')
try:
result = r.recognize_google(audio, language = lang).lower()
except sr.UnknownValueError:
print("Failed to determine audio file")
result = ''
# finally:
# os.remove(wav_filename)
return result
def get_audio(videofile, audiofile):
os.system('ffmpeg -y -threads 4 -i {} -f wav -ab 192000 -vn {}'.format(videofile, audiofile))
def split_into_frames(audiofile, samplesLocation):
os.system('rm {}/*'.format(samplesLocation))
time.sleep(2.0)
data, sr = librosa.load(audiofile)
duration = librosa.get_duration(data, sr)
print('video duration, hours: {}'.format(duration/3600))
for i in range(0,int(duration-1),20):
tmp_batch = data[(i)*sr:sr*(i+20)]
librosa.output.write_wav('{}/{}.wav'.format(samplesLocation, chr(int(i/20)+65)), tmp_batch, sr)
def separate_music_voice(audioFile, outputLocation):
os.system('spleeter separate -i {} -p spleeter:2stems -o {}'.format(audioFile, outputLocation))
# Define a function to normalize a chunk to a target amplitude.
def match_target_amplitude(aChunk, target_dBFS):
''' Normalize given audio chunk '''
change_in_dBFS = target_dBFS - aChunk.dBFS
return aChunk.apply_gain(change_in_dBFS)
def get_timestamp(duration):
hr = math.floor(duration / 3600000)
total_min = duration % 3600000
mins = math.floor(total_min / 60000)
total_secs = total_min % 60000
secs = math.floor(total_secs / 1000)
milisecs = total_min % 1000
return "{:02d}:{:02d}:{:02d},{:03d}".format(hr, mins, secs, milisecs)
def gen_subtitle(wavFile, samplesLocation, srtFile, lang):
srt_file = pysrt.SubRipFile()
# Load your audio.
print("loading wav file...")
# song = AudioSegment.from_mp3("your_audio.mp3")
#song = AudioSegment.from_wav("vocals.wav")
song = AudioSegment.from_file(wavFile, format="wav")
# play(song)
dBFS = song.dBFS
# Nonsilence track start and end positions.
nonsilence = detect_nonsilent(
song,
min_silence_len = 500,
silence_thresh = dBFS-16
)
file_count = len(nonsilence)
print("Nonsilence chunk length {}".format(str(file_count)))
# for [start, end] in nonsilence:
# print("start: {0} end: {1}".format(get_timestamp(start), get_timestamp(end)))
# Split track where the silence is 2 seconds or more and get chunks using
# the imported function.
print("Start spliting file...")
chunks = split_on_silence(
song,
min_silence_len = 500,
silence_thresh = dBFS-16,
# optional
keep_silence = 250
)
print("Spliting done..." + str(len(chunks)))
# Process each chunk with your parameters
for i, chunk in enumerate(chunks):
# Create a silence chunk that's 0.5 seconds (or 500 ms) long for padding.
silence_chunk = AudioSegment.silent(duration=1000)
# Add the padding chunk to beginning and end of the entire chunk.
audio_chunk = silence_chunk + chunk + silence_chunk
# audio_chunk = chunk
# Normalize the entire chunk.
normalized_chunk = match_target_amplitude(audio_chunk, -20.0)
# Export the audio chunk with new bitrate.
starttime = nonsilence[i][0]
endtime = nonsilence[i][1]
print("\n>>{} of {}, Exporting {}chunk{}.wav start: {} end: {}".format(i, file_count, samplesLocation, i, starttime, endtime))
chunk_file_path = "{}chunk{}.wav".format(samplesLocation, str(i))
normalized_chunk.export(
chunk_file_path,
bitrate = "192k",
format = "wav"
)
time.sleep(2)
print("Going to generete the dialogs of file {}".format(chunk_file_path))
dialogs = recognize(chunk_file_path, lang)
print("{} file dialog is: {}".format(chunk_file_path, dialogs))
start_time = get_timestamp(starttime)
end_time = get_timestamp(endtime)
sub = pysrt.SubRipItem((i+1), start=start_time, end=end_time, text="{} {}".format(str(i+1), dialogs))
srt_file.append(sub)
srt_file.save(srtFile)
if __name__ == '__main__':
outputLoc = 'output/'
inputWaveFile = 'current.wav'
vocals_file = 'current/vocals.wav'
samples_location = 'samples/'
srt_file = '.srt'
start = time.time()
args = get_arguments()
outputLoc = args.output
shutil.rmtree(outputLoc)
time.sleep(2)
os.makedirs(outputLoc, exist_ok=True)
inputWaveFile = outputLoc + inputWaveFile
vocals_file = outputLoc + vocals_file
samples_location = outputLoc + samples_location
os.makedirs(samples_location, exist_ok=True)
srt_file = os.path.splitext(args.video)[0] + srt_file
print('srt file will be {}'.format(srt_file))
time.sleep(2)
get_audio(args.video, inputWaveFile)
separate_music_voice(inputWaveFile, outputLoc)
gen_subtitle(vocals_file, samples_location, srt_file, args.lang)
end = time.time()
print('elapsed time: {}'.format(end - start))
# shutil.rmtree(outputLoc)
| 33.352941 | 134 | 0.66362 | [
"Apache-2.0"
] | whilemind/subtitle | src/subtitle.py | 6,237 | Python |
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param: root: The root of the binary search tree.
@param: A: A TreeNode in a Binary.
@param: B: A TreeNode in a Binary.
@return: Return the least common ancestor(LCA) of the two nodes.
"""
def lowestCommonAncestor(self, root, A, B):
if root is None or root is A or root is B:
return root
left = self.lowestCommonAncestor(root.left, A, B)
right = self.lowestCommonAncestor(root.right, A, B)
if left and right:
return root
if left:
return left
return right | 25.310345 | 68 | 0.594005 | [
"MIT"
] | ctc316/algorithm-python | Lintcode/Ladder_11_15_A/88. Lowest Common Ancestor of a Binary Tree.py | 734 | Python |
import numpy as np
from yt.utilities.on_demand_imports import _h5py as h5py
from yt.funcs import \
mylog
from yt.geometry.selection_routines import GridSelector
from yt.utilities.io_handler import \
BaseIOHandler
def _grid_dname(grid_id):
return "/data/grid_%010i" % grid_id
def _field_dname(grid_id, field_name):
return "%s/%s" % (_grid_dname(grid_id), field_name)
# TODO all particle bits were removed
class IOHandlerGDFHDF5(BaseIOHandler):
_dataset_type = "grid_data_format"
_offset_string = 'data:offsets=0'
_data_string = 'data:datatype=0'
def _read_fluid_selection(self, chunks, selector, fields, size):
from sys import version
rv = {}
chunks = list(chunks)
if isinstance(selector, GridSelector):
if not (len(chunks) == len(chunks[0].objs) == 1):
raise RuntimeError
grid = chunks[0].objs[0]
h5f = h5py.File(grid.filename, mode='r')
gds = h5f.get(_grid_dname(grid.id))
for ftype, fname in fields:
if self.ds.field_ordering == 1:
rv[(ftype, fname)] = gds.get(fname)[()].swapaxes(0, 2)
else:
rv[(ftype, fname)] = gds.get(fname)[()]
h5f.close()
return rv
if size is None:
size = sum((grid.count(selector) for chunk in chunks
for grid in chunk.objs))
if any((ftype != "gdf" for ftype, fname in fields)):
raise NotImplementedError
for field in fields:
ftype, fname = field
fsize = size
# check the dtype instead
rv[field] = np.empty(fsize, dtype="float64")
ngrids = sum(len(chunk.objs) for chunk in chunks)
mylog.debug("Reading %s cells of %s fields in %s blocks",
size, [fn for ft, fn in fields], ngrids)
ind = 0
for chunk in chunks:
fid = None
for grid in chunk.objs:
if grid.filename is None:
continue
if fid is None:
if version < '3':
fid = h5py.h5f.open(grid.filename,h5py.h5f.ACC_RDONLY)
else:
fid = h5py.h5f.open(bytes(grid.filename,'utf-8'),h5py.h5f.ACC_RDONLY)
if self.ds.field_ordering == 1:
# check the dtype instead
data = np.empty(grid.ActiveDimensions[::-1],
dtype="float64")
data_view = data.swapaxes(0, 2)
else:
# check the dtype instead
data_view = data = np.empty(grid.ActiveDimensions,
dtype="float64")
for field in fields:
ftype, fname = field
if version < '3':
dg = h5py.h5d.open(fid, _field_dname(grid.id, fname))
else:
dg = h5py.h5d.open(fid, bytes(_field_dname(grid.id, fname),'utf-8'))
dg.read(h5py.h5s.ALL, h5py.h5s.ALL, data)
# caches
nd = grid.select(selector, data_view, rv[field], ind)
ind += nd # I don't get that part, only last nd is added
if fid is not None:
fid.close()
return rv
| 38.355556 | 93 | 0.514195 | [
"BSD-3-Clause-Clear"
] | aemerick/yt | yt/frontends/gdf/io.py | 3,452 | Python |
#######################
# Dennis MUD #
# telnet.py #
# Copyright 2018-2021 #
# Michael D. Reiley #
#######################
# **********
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# **********
# Parts of codebase borrowed from https://github.com/TKeesh/WebSocketChat
import traceback
from lib.logger import Logger
from twisted.internet import protocol
from twisted.protocols.basic import LineReceiver
# Read the motd file.
try:
with open("motd.telnet.txt") as f:
motd = f.read()
except:
motd = None
class ServerProtocol(LineReceiver):
def __init__(self, factory):
self.factory = factory
self.peer = None
self._log = Logger("telnet")
def connectionMade(self):
p = self.transport.getPeer()
self.peer = p.host + ':' + str(p.port)
self.factory.register(self)
self._log.info("Client connected: {peer}", peer=self.peer)
if motd:
self.factory.communicate(self.peer, motd.encode('utf-8'))
def connectionLost(self, reason):
self.factory.unregister(self)
self._log.info("Client disconnected: {peer}", peer=self.peer)
def lineReceived(self, line):
# Don't log passwords.
passcheck = line.split(b' ')
if passcheck[0] == b'login' and len(passcheck) > 2:
passcheck = b' '.join(passcheck[:2] + [b'********'])
self._log.info("Client {peer} sending message: {line}", peer=self.peer, line=passcheck)
elif passcheck[0] == b'register' and len(passcheck) > 2:
passcheck = b' '.join(passcheck[:2] + [b'********'])
self._log.info("Client {peer} sending message: {line}", peer=self.peer, line=passcheck)
elif passcheck[0] == b'password' and len(passcheck) > 1:
passcheck = b' '.join(passcheck[:1] + [b'********'])
self._log.info("Client {peer} sending message: {line}", peer=self.peer, line=passcheck)
else:
self._log.info("Client {peer} sending message: {line}", peer=self.peer, line=line)
# Try to decode the line.
try:
line = line.decode('utf-8')
except:
self._log.info("Discarded garbage line from {peer}", peer=self.peer)
return
# Did we receive the quit pseudo-command?
if line == "quit":
self.transport.loseConnection()
return
# Run the command while handling errors.
try:
self.factory.router.shell.command(self.factory.router[self.peer]["console"], line)
except:
self.factory.communicate(self.peer, traceback.format_exc().encode('utf-8'))
self._log.error(traceback.format_exc())
class ServerFactory(protocol.Factory):
def __init__(self, router, *args, **kwargs):
self.router = router
self.router.telnet_factory = self
super(ServerFactory, self).__init__(*args)
self.clients = []
def buildProtocol(self, addr):
return ServerProtocol(self)
def register(self, client):
self.clients.append({'client-peer': client.peer, 'client': client})
self.router.register(client.peer, "telnet")
def unregister(self, client):
self.router.unregister(client.peer)
for c in self.clients:
if c['client-peer'] == client.peer:
self.clients.remove(c)
def communicate(self, peer, payload):
client = None
for c in self.clients:
if c['client-peer'] == peer:
client = c['client']
if client:
# Telnet wants a CRLF instead of just an LF. Some clients require this to display properly.
client.sendLine(payload.decode('utf-8').replace('\n', '\r\n').encode('utf-8'))
| 37.653543 | 103 | 0.628816 | [
"MIT"
] | seisatsu/Dennis | lib/telnet.py | 4,782 | Python |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
# Data sets
IRIS_TRAINING = os.path.join(os.path.dirname(__file__), "iris_training.csv")
IRIS_TEST = os.path.join(os.path.dirname(__file__), "iris_test.csv")
def main(unused_argv):
# Load datasets.
training_set = tf.contrib.learn.datasets.base.load_csv_with_header(
filename=IRIS_TRAINING, target_dtype=np.int, features_dtype=np.float32)
test_set = tf.contrib.learn.datasets.base.load_csv_with_header(
filename=IRIS_TEST, target_dtype=np.int, features_dtype=np.float32)
# Specify that all features have real-value data
feature_columns = [tf.contrib.layers.real_valued_column("", dimension=4)]
validation_monitor = tf.contrib.learn.monitors.ValidationMonitor(
test_set.data,
test_set.target,
every_n_steps=50)
# Build 3 layer DNN with 10, 20, 10 units respectively.
# classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
# hidden_units=[10, 20, 10],
# n_classes=3,
# model_dir="/tmp/iris_model")
classifier = tf.contrib.learn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[10],
n_classes=3,
model_dir="/tmp/iris_model",
config=tf.contrib.learn.RunConfig(save_checkpoints_secs=1))
# Fit model.
# classifier.fit(x=training_set.data,
# y=training_set.target,
# steps=2000)
classifier.fit(x=training_set.data,
y=training_set.target,
steps=2000,
monitors=[validation_monitor])
# Evaluate accuracy.
accuracy_score = classifier.evaluate(x=test_set.data,
y=test_set.target)["accuracy"]
print('Accuracy: {0:f}'.format(accuracy_score))
# Classify two new flower samples.
new_samples = np.array(
[[6.4, 3.2, 4.5, 1.5], [5.8, 3.1, 5.0, 1.7]], dtype=np.float32)
y = list(classifier.predict(new_samples, as_iterable=True))
print('Predictions: {}'.format(str(y)))
if __name__ == "__main__":
tf.app.run()
| 35.716418 | 82 | 0.633514 | [
"MIT"
] | yamamototakas/fxtrading | agents/tensorflow_iris.py | 2,393 | Python |
# encoding: utf-8
"""
@author: sherlock
@contact: [email protected]
"""
import glob
import re
import os.path as osp
from .bases import BaseImageDataset
class Market1501(BaseImageDataset):
"""
Market1501
Reference:
Zheng et al. Scalable Person Re-identification: A Benchmark. ICCV 2015.
URL: http://www.liangzheng.org/Project/project_reid.html
Dataset statistics:
# identities: 1501 (+1 for background)
# images: 12936 (train) + 3368 (query) + 15913 (gallery)
"""
dataset_dir = 'market1501'
def __init__(self, root='/raid/home/zhihui/reid_strong_baseline/data/Market-1501-fixed/', verbose=True, **kwargs):
super(Market1501, self).__init__()
self.dataset_dir = root
self.train_dir = osp.join(self.dataset_dir, 'bounding_box_train')
self.query_dir = osp.join(self.dataset_dir, 'query')
self.gallery_dir = osp.join(self.dataset_dir, 'bounding_box_test')
self._check_before_run()
train = self._process_dir(self.train_dir, relabel=True)
query = self._process_dir(self.query_dir, relabel=False)
gallery = self._process_dir(self.gallery_dir, relabel=False)
if verbose:
print("=> Market1501 loaded")
self.print_dataset_statistics(train, query, gallery)
self.train = train
self.query = query
self.gallery = gallery
self.num_train_pids, self.num_train_imgs, self.num_train_cams = self.get_imagedata_info(self.train)
self.num_query_pids, self.num_query_imgs, self.num_query_cams = self.get_imagedata_info(self.query)
self.num_gallery_pids, self.num_gallery_imgs, self.num_gallery_cams = self.get_imagedata_info(self.gallery)
def _process_dir(self, dir_path, relabel=False):
img_paths = glob.glob(osp.join(dir_path, '*.jpg'))
pattern = re.compile(r'([-\d]+)_c(\d)')
pid_container = set()
for img_path in img_paths:
pid, _ = map(int, pattern.search(img_path).groups())
if pid == -1: continue # junk images are just ignored
pid_container.add(pid)
pid2label = {pid: label for label, pid in enumerate(pid_container)}
dataset = []
for img_path in img_paths:
pid, camid = map(int, pattern.search(img_path).groups())
if pid == -1: continue # junk images are just ignored
assert 0 <= pid <= 1501 # pid == 0 means background
assert 1 <= camid <= 6
camid -= 1 # index starts from 0
if relabel: pid = pid2label[pid]
dataset.append((img_path, pid, camid))
return dataset
| 35.44 | 118 | 0.64936 | [
"Apache-2.0"
] | moranxiachong/PersonReID-VAAL | data/datasets/market1501.py | 2,658 | Python |
"""
Django settings for pets_forum project.
Generated by 'django-admin startproject' using Django 1.11.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'f9um$ll0_p4=p&p(iwkeu1hk+-en9c%q#@aul(n!7ecb^%z8x8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'pets_forum.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pets_forum.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| 25.719008 | 91 | 0.696979 | [
"MIT"
] | catnipoo/pets1 | pets_forum/pets_forum/settings.py | 3,112 | Python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 30 03:08:17 2017
@author: aditya
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 26 12:46:25 2017
@author: aditya
"""
import math
import os
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
FLAGS = None
MNIST_IMAGE_SIZE = 28
MNIST_IMAGE_PIXELS =28*28
OUTPUT_CLASSES = 10
Batch_Size = 100
LEARNING_RATE = 0.01
hiddenlayer_units =16
expno = "1"
def deepnnwithrelu(images):
#Code boorrowed from https://github.com/tensorflow/tensorflow/blob/r1.4/tensorflow/examples/tutorials/mnist/mnist.py
with tf.name_scope('hiddenlayer1'):
weights = tf.Variable(
tf.truncated_normal([MNIST_IMAGE_PIXELS, hiddenlayer_units],
stddev=1.0 / math.sqrt(float(MNIST_IMAGE_PIXELS))),
name='weights')
biases = tf.Variable(tf.zeros([hiddenlayer_units]),
name='biases')
hidden1 = tf.nn.relu(tf.matmul(images, weights) + biases)
# Hidden 2
with tf.name_scope('hiddenlayer2'):
weights = tf.Variable(
tf.truncated_normal([hiddenlayer_units, hiddenlayer_units],
stddev=1.0 / math.sqrt(float(hiddenlayer_units))),
name='weights')
biases = tf.Variable(tf.zeros([hiddenlayer_units]),
name='biases')
hidden2 = tf.nn.relu(tf.matmul(hidden1, weights) + biases)
with tf.name_scope('hiddenlayer3'):
weights = tf.Variable(
tf.truncated_normal([hiddenlayer_units, hiddenlayer_units],
stddev=1.0 / math.sqrt(float(hiddenlayer_units))),
name='weights')
biases = tf.Variable(tf.zeros([hiddenlayer_units]),
name='biases')
hidden3 = tf.nn.relu(tf.matmul(hidden2, weights) + biases)
with tf.name_scope('hiddenlayer4'):
weights = tf.Variable(
tf.truncated_normal([hiddenlayer_units, hiddenlayer_units],
stddev=1.0 / math.sqrt(float(hiddenlayer_units))),
name='weights')
biases = tf.Variable(tf.zeros([hiddenlayer_units]),
name='biases')
hidden4 = tf.nn.relu(tf.matmul(hidden3, weights) + biases)
with tf.name_scope('hiddenlayer5'):
weights = tf.Variable(
tf.truncated_normal([hiddenlayer_units, hiddenlayer_units],
stddev=1.0 / math.sqrt(float(hiddenlayer_units))),
name='weights')
biases = tf.Variable(tf.zeros([hiddenlayer_units]),
name='biases')
hidden5 = tf.nn.relu(tf.matmul(hidden4, weights) + biases)
with tf.name_scope('hiddenlayer6'):
weights = tf.Variable(
tf.truncated_normal([hiddenlayer_units, hiddenlayer_units],
stddev=1.0 / math.sqrt(float(hiddenlayer_units))),
name='weights')
biases = tf.Variable(tf.zeros([hiddenlayer_units]),
name='biases')
hidden6 = tf.nn.relu(tf.matmul(hidden5, weights) + biases)
# Dropout - controls the complexity of the model, prevents co-adaptation of
# features.
# Map the 1024 features to 10 classes, one for each digit
with tf.name_scope('finallayer'):
W_fc2 = weight_variable([hiddenlayer_units, 10])
b_fc2 = bias_variable([10])
y_output = tf.matmul(hidden6, W_fc2) + b_fc2
return y_output
def deepnnwithsigmoid(images):
with tf.name_scope('hiddenlayer1'):
weights = tf.Variable(
tf.truncated_normal([MNIST_IMAGE_PIXELS, hiddenlayer_units],
stddev=1.0 / math.sqrt(float(MNIST_IMAGE_PIXELS))),
name='weights')
biases = tf.Variable(tf.zeros([hiddenlayer_units]),
name='biases')
hidden1 = tf.nn.sigmoid(tf.matmul(images, weights) + biases)
# Hidden 2
with tf.name_scope('hiddenlayer2'):
weights = tf.Variable(
tf.truncated_normal([hiddenlayer_units, hiddenlayer_units],
stddev=1.0 / math.sqrt(float(hiddenlayer_units))),
name='weights')
biases = tf.Variable(tf.zeros([hiddenlayer_units]),
name='biases')
hidden2 = tf.nn.sigmoid(tf.matmul(hidden1, weights) + biases)
with tf.name_scope('hiddenlayer3'):
weights = tf.Variable(
tf.truncated_normal([hiddenlayer_units, hiddenlayer_units],
stddev=1.0 / math.sqrt(float(hiddenlayer_units))),
name='weights')
biases = tf.Variable(tf.zeros([hiddenlayer_units]),
name='biases')
hidden3 = tf.nn.sigmoid(tf.matmul(hidden2, weights) + biases)
with tf.name_scope('hiddenlayer4'):
weights = tf.Variable(
tf.truncated_normal([hiddenlayer_units, hiddenlayer_units],
stddev=1.0 / math.sqrt(float(hiddenlayer_units))),
name='weights')
biases = tf.Variable(tf.zeros([hiddenlayer_units]),
name='biases')
hidden4 = tf.nn.sigmoid(tf.matmul(hidden3, weights) + biases)
with tf.name_scope('hiddenlayer5'):
weights = tf.Variable(
tf.truncated_normal([hiddenlayer_units, hiddenlayer_units],
stddev=1.0 / math.sqrt(float(hiddenlayer_units))),
name='weights')
biases = tf.Variable(tf.zeros([hiddenlayer_units]),
name='biases')
hidden5 = tf.nn.sigmoid(tf.matmul(hidden4, weights) + biases)
with tf.name_scope('hiddenlayer6'):
weights = tf.Variable(
tf.truncated_normal([hiddenlayer_units, hiddenlayer_units],
stddev=1.0 / math.sqrt(float(hiddenlayer_units))),
name='weights')
biases = tf.Variable(tf.zeros([hiddenlayer_units]),
name='biases')
hidden6 = tf.nn.sigmoid(tf.matmul(hidden5, weights) + biases)
with tf.name_scope('finallayer'):
W_fc2 = weight_variable([hiddenlayer_units, 10])
b_fc2 = bias_variable([10])
y_output = tf.matmul(hidden6, W_fc2) + b_fc2
return y_output
def deepnnwithelu(images):
with tf.name_scope('hiddenlayer1'):
weights = tf.Variable(
tf.truncated_normal([MNIST_IMAGE_PIXELS, hiddenlayer_units],
stddev=1.0 / math.sqrt(float(MNIST_IMAGE_PIXELS))),
name='weights')
biases = tf.Variable(tf.zeros([hiddenlayer_units]),
name='biases')
hidden1 = tf.nn.elu(tf.matmul(images, weights) + biases)
# Hidden 2
with tf.name_scope('hiddenlayer2'):
weights = tf.Variable(
tf.truncated_normal([hiddenlayer_units, hiddenlayer_units],
stddev=1.0 / math.sqrt(float(hiddenlayer_units))),
name='weights')
biases = tf.Variable(tf.zeros([hiddenlayer_units]),
name='biases')
hidden2 = tf.nn.elu(tf.matmul(hidden1, weights) + biases)
with tf.name_scope('hiddenlayer3'):
weights = tf.Variable(
tf.truncated_normal([hiddenlayer_units, hiddenlayer_units],
stddev=1.0 / math.sqrt(float(hiddenlayer_units))),
name='weights')
biases = tf.Variable(tf.zeros([hiddenlayer_units]),
name='biases')
hidden3 = tf.nn.elu(tf.matmul(hidden2, weights) + biases)
with tf.name_scope('hiddenlayer4'):
weights = tf.Variable(
tf.truncated_normal([hiddenlayer_units, hiddenlayer_units],
stddev=1.0 / math.sqrt(float(hiddenlayer_units))),
name='weights')
biases = tf.Variable(tf.zeros([hiddenlayer_units]),
name='biases')
hidden4 = tf.nn.elu(tf.matmul(hidden3, weights) + biases)
with tf.name_scope('hiddenlayer5'):
weights = tf.Variable(
tf.truncated_normal([hiddenlayer_units, hiddenlayer_units],
stddev=1.0 / math.sqrt(float(hiddenlayer_units))),
name='weights')
biases = tf.Variable(tf.zeros([hiddenlayer_units]),
name='biases')
hidden5 = tf.nn.elu(tf.matmul(hidden4, weights) + biases)
with tf.name_scope('hiddenlayer6'):
weights = tf.Variable(
tf.truncated_normal([hiddenlayer_units, hiddenlayer_units],
stddev=1.0 / math.sqrt(float(hiddenlayer_units))),
name='weights')
biases = tf.Variable(tf.zeros([hiddenlayer_units]),
name='biases')
hidden6 = tf.nn.elu(tf.matmul(hidden5, weights) + biases)
with tf.name_scope('finallayer'):
W_fc2 = weight_variable([hiddenlayer_units, 10])
b_fc2 = bias_variable([10])
y_output = tf.matmul(hidden6, W_fc2) + b_fc2
return y_output
# Code Borrowed from https://github.com/tensorflow/tensorflow/blob/r1.4/tensorflow/examples/tutorials/mnist/mnist_deep.py
def weight_variable(shape):
"""weight_variable generates a weight variable of a given shape."""
initial = tf.truncated_normal(shape, stddev=0.1/math.sqrt(float(hiddenlayer_units)))
return tf.Variable(initial)
def bias_variable(shape):
"""bias_variable generates a bias variable of a given shape."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def appstart(stri):
# Import data
mnist = input_data.read_data_sets("../Data/MNIST_data", one_hot=True)
# Create the model
x = tf.placeholder(tf.float32, [None, 784])
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 10])
# Build the graph for the deep net
if(stri=="relu"):
y_output = deepnnwithrelu(x)
elif(stri=="elu"):
y_output = deepnnwithelu(x)
else:
y_output = deepnnwithsigmoid(x)
#Code Borrowed from https://github.com/tensorflow/tensorflow/blob/r1.4/tensorflow/examples/tutorials/mnist/mnist_deep.py
with tf.name_scope('loss'):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_,
logits=y_output)
cross_entropy = tf.reduce_mean(cross_entropy)
with tf.name_scope('adam_optimizer'):
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(tf.argmax(y_output, 1), tf.argmax(y_, 1))
correct_prediction = tf.cast(correct_prediction, tf.float32)
accuracy = tf.reduce_mean(correct_prediction)
graph_location = "tfgraphs/"+expno
print('Saving graph to: %s' % graph_location)
train_writer = tf.summary.FileWriter(graph_location)
train_writer.add_graph(tf.get_default_graph())
resultarray =[]
iterarray=[]
accarray=[]
testaccarray = []
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(20000):
batch = mnist.train.next_batch(50)
if i % 100 == 0:
train_accuracy = accuracy.eval(feed_dict={
x: batch[0], y_: batch[1]})
testaccuracy = accuracy.eval(feed_dict={
x: mnist.test.images, y_: mnist.test.labels})
#print('step %d, training accuracy %g' % (i, train_accuracy))
#print('test accuracy %g' %testaccuracy)
iterarray.append(i)
accarray.append(train_accuracy)
testaccarray.append(testaccuracy)
train_step.run(feed_dict={x: batch[0], y_: batch[1]})
resultarray.append(iterarray)
resultarray.append(accarray)
resultarray.append(testaccarray)
return resultarray
def progstart():
rarray =[]
rarray.append(appstart("sigmoid"))
rarray.append(appstart("relu"))
rarray.append(appstart("elu"))
if not os.path.exists('figures'):
os.makedirs('figures')
fig1 = plt.figure()
axes1 = fig1.add_axes([0.1,0.1,0.8,0.8])
axes1.plot(rarray[0][0],rarray[0][1],'r')
axes1.plot(rarray[0][0],rarray[1][1],'b')
axes1.plot(rarray[0][0],rarray[2][1],'g')
axes1.set_xlabel('Train Iterations')
axes1.set_ylabel('Train accuracy')
fig1.savefig('figures/'+expno+'_trainAccuracy.png')
fig2 = plt.figure()
axes2 = fig2.add_axes([0.1,0.1,0.8,0.8])
axes2.plot(rarray[0][0],rarray[0][2],'r')
axes2.plot(rarray[0][0],rarray[1][2],'b')
axes2.plot(rarray[0][0],rarray[2][2],'g')
axes2.set_xlabel('Train Iterations')
axes2.set_ylabel('Test accuracy')
fig2.savefig('figures/'+expno+'_testAccuracy.png')
plt.plot()
progstart()
| 37.509146 | 121 | 0.636024 | [
"MIT"
] | AdityaPrasadMishra/TensorflowPractice | FSL - Entire Project + Report/Final Project/Code/Exp1.py | 12,303 | Python |
from os.path import join, dirname, abspath
here = lambda *paths: join(dirname(abspath(__file__)), *paths)
PROJECT_ROOT = here('..')
root = lambda *paths: join(PROJECT_ROOT, *paths)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'
EMAIL_FILE_PATH = here('tmp', 'app-mails')
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': ':memory:', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '^1mo-5o90rn7nzy4fm94_=rtg-l9^x&tez8^9#1ktl4r6s_w^l'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'tests.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'tests.wsgi.application'
TEMPLATES = {
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
},
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_outbox',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| 33.319527 | 127 | 0.694193 | [
"MIT"
] | mberingen/django-outbox | tests/settings.py | 5,631 | Python |
from unittest.mock import MagicMock, patch, call
from tagtrain import data
from . import fake
from tagtrain.tagtrain.tt_remove import Remove
@patch('tagtrain.data.by_owner.remove_user_from_group')
def test_unknown_group(remove_user_from_group):
remove_user_from_group.side_effect = data.Group.DoesNotExist()
app, reply, message, match = fake.create_all()
Remove(app).run(reply, message, match)
remove_user_from_group.assert_called_once_with('AuthorName', 'GroupName', 'MemberName')
reply.append.assert_called_once_with('Group `GroupName` does not exist. Skipping.')
@patch('tagtrain.data.by_owner.remove_user_from_group')
def test_unknown_member(remove_user_from_group):
remove_user_from_group.side_effect = data.Member.DoesNotExist()
app, reply, message, match = fake.create_all()
Remove(app).run(reply, message, match)
remove_user_from_group.assert_called_once_with('AuthorName', 'GroupName', 'MemberName')
reply.append.assert_called_once_with('`MemberName` is not a Member of Group `GroupName`. Skipping.')
@patch('tagtrain.data.by_owner.remove_user_from_group')
def test_good(remove_user_from_group):
remove_user_from_group.return_value = fake.create_group(name='GroupName', member_count=99)
app, reply, message, match = fake.create_all()
Remove(app).run(reply, message, match)
remove_user_from_group.assert_called_once_with('AuthorName', 'GroupName', 'MemberName')
reply.append.assert_called_once_with('`MemberName` removed from Group `GroupName`, 99 total Members.')
@patch('tagtrain.data.by_owner.remove_user_from_group')
def test_good_no_members(remove_user_from_group):
remove_user_from_group.return_value = fake.create_group(name='GroupName', member_count=0)
app, reply, message, match = fake.create_all()
Remove(app).run(reply, message, match)
remove_user_from_group.assert_called_once_with('AuthorName', 'GroupName', 'MemberName')
reply.append.assert_called_once_with('`MemberName` removed from Group `GroupName`, 0 total Members.')
| 38.622642 | 106 | 0.778701 | [
"MIT"
] | c17r/TagTrain | tests/tagtrain/test_remove.py | 2,047 | Python |
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron_lib.callbacks import events as callbacks_events
from neutron_lib.callbacks import registry as callbacks_registry
from neutron_lib.callbacks import resources as callbacks_resources
from neutron_lib import constants
import testtools
from neutron.agent.common import ovs_lib
from neutron.agent.common import utils
from neutron.agent import firewall
from neutron.agent.linux.openvswitch_firewall import constants as ovsfw_consts
from neutron.agent.linux.openvswitch_firewall import exceptions
from neutron.agent.linux.openvswitch_firewall import firewall as ovsfw
from neutron.common import constants as n_const
from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants \
as ovs_consts
from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \
import ovs_bridge
from neutron.tests import base
TESTING_VLAN_TAG = 1
def create_ofport(port_dict):
ovs_port = mock.Mock(vif_mac='00:00:00:00:00:00', ofport=1,
port_name="port-name")
return ovsfw.OFPort(port_dict, ovs_port, vlan_tag=TESTING_VLAN_TAG)
class TestCreateRegNumbers(base.BaseTestCase):
def test_no_registers_defined(self):
flow = {'foo': 'bar'}
ovsfw.create_reg_numbers(flow)
self.assertEqual({'foo': 'bar'}, flow)
def test_all_registers_defined(self):
flow = {'foo': 'bar', 'reg_port': 1, 'reg_net': 2,
'reg_remote_group': 3}
expected_flow = {'foo': 'bar',
'reg{:d}'.format(ovsfw_consts.REG_PORT): 1,
'reg{:d}'.format(ovsfw_consts.REG_NET): 2,
'reg{:d}'.format(ovsfw_consts.REG_REMOTE_GROUP): 3}
ovsfw.create_reg_numbers(flow)
self.assertEqual(expected_flow, flow)
class TestSecurityGroup(base.BaseTestCase):
def setUp(self):
super(TestSecurityGroup, self).setUp()
self.sg = ovsfw.SecurityGroup('123')
self.sg.members = {'type': [1, 2, 3, 4]}
def test_update_rules_split(self):
rules = [
{'foo': 'bar', 'rule': 'all'}, {'bar': 'foo'},
{'remote_group_id': '123456', 'foo': 'bar'}]
expected_raw_rules = [{'foo': 'bar', 'rule': 'all'}, {'bar': 'foo'}]
expected_remote_rules = [{'remote_group_id': '123456', 'foo': 'bar'}]
self.sg.update_rules(rules)
self.assertEqual(expected_raw_rules, self.sg.raw_rules)
self.assertEqual(expected_remote_rules, self.sg.remote_rules)
def test_update_rules_protocols(self):
rules = [
{'foo': 'bar', 'protocol': constants.PROTO_NAME_ICMP,
'ethertype': constants.IPv4},
{'foo': 'bar', 'protocol': constants.PROTO_NAME_ICMP,
'ethertype': constants.IPv6},
{'foo': 'bar', 'protocol': constants.PROTO_NAME_IPV6_ICMP_LEGACY,
'ethertype': constants.IPv6},
{'foo': 'bar', 'protocol': constants.PROTO_NAME_TCP},
{'foo': 'bar', 'protocol': '94'},
{'foo': 'bar', 'protocol': 'baz'},
{'foo': 'no_proto'}]
self.sg.update_rules(rules)
self.assertEqual({'foo': 'no_proto'}, self.sg.raw_rules.pop())
protos = [rule['protocol'] for rule in self.sg.raw_rules]
self.assertEqual([constants.PROTO_NUM_ICMP,
constants.PROTO_NUM_IPV6_ICMP,
constants.PROTO_NUM_IPV6_ICMP,
constants.PROTO_NUM_TCP,
94,
'baz'], protos)
def test_get_ethertype_filtered_addresses(self):
addresses = self.sg.get_ethertype_filtered_addresses('type')
expected_addresses = [1, 2, 3, 4]
self.assertEqual(expected_addresses, addresses)
class TestOFPort(base.BaseTestCase):
def setUp(self):
super(TestOFPort, self).setUp()
self.ipv4_addresses = ['10.0.0.1', '192.168.0.1']
self.ipv6_addresses = ['fe80::f816:3eff:fe2e:1']
port_dict = {'device': 1,
'fixed_ips': self.ipv4_addresses + self.ipv6_addresses}
self.port = create_ofport(port_dict)
def test_ipv4_address(self):
ipv4_addresses = self.port.ipv4_addresses
self.assertEqual(self.ipv4_addresses, ipv4_addresses)
def test_ipv6_address(self):
ipv6_addresses = self.port.ipv6_addresses
self.assertEqual(self.ipv6_addresses, ipv6_addresses)
def test__get_allowed_pairs(self):
port = {
'allowed_address_pairs': [
{'mac_address': 'foo', 'ip_address': '10.0.0.1'},
{'mac_address': 'bar', 'ip_address': '192.168.0.1'},
{'mac_address': 'qux', 'ip_address': '169.254.0.0/16'},
{'mac_address': 'baz', 'ip_address': '2003::f'},
]}
allowed_pairs_v4 = ovsfw.OFPort._get_allowed_pairs(port, version=4)
allowed_pairs_v6 = ovsfw.OFPort._get_allowed_pairs(port, version=6)
expected_aap_v4 = {('foo', '10.0.0.1'), ('bar', '192.168.0.1'),
('qux', '169.254.0.0/16')}
expected_aap_v6 = {('baz', '2003::f')}
self.assertEqual(expected_aap_v4, allowed_pairs_v4)
self.assertEqual(expected_aap_v6, allowed_pairs_v6)
def test__get_allowed_pairs_empty(self):
port = {}
allowed_pairs = ovsfw.OFPort._get_allowed_pairs(port, version=4)
self.assertFalse(allowed_pairs)
def test_update(self):
old_port_dict = self.port.neutron_port_dict
new_port_dict = old_port_dict.copy()
added_ips = [1, 2, 3]
new_port_dict.update({
'fixed_ips': added_ips,
'allowed_address_pairs': [
{'mac_address': '00:00:00:00:00:01',
'ip_address': '192.168.0.1'},
{'mac_address': '00:00:00:00:00:01',
'ip_address': '2003::f'}],
})
self.port.update(new_port_dict)
self.assertEqual(new_port_dict, self.port.neutron_port_dict)
self.assertIsNot(new_port_dict, self.port.neutron_port_dict)
self.assertEqual(added_ips, self.port.fixed_ips)
self.assertEqual({('00:00:00:00:00:01', '192.168.0.1')},
self.port.allowed_pairs_v4)
self.assertIn(('00:00:00:00:00:01', '2003::f'),
self.port.allowed_pairs_v6)
class TestSGPortMap(base.BaseTestCase):
def setUp(self):
super(TestSGPortMap, self).setUp()
self.map = ovsfw.SGPortMap()
def test_get_or_create_sg_existing_sg(self):
self.map.sec_groups['id'] = mock.sentinel
sg = self.map.get_or_create_sg('id')
self.assertIs(mock.sentinel, sg)
def test_get_or_create_sg_nonexisting_sg(self):
with mock.patch.object(ovsfw, 'SecurityGroup') as sg_mock:
sg = self.map.get_or_create_sg('id')
self.assertEqual(sg_mock.return_value, sg)
def _check_port(self, port_id, expected_sg_ids):
port = self.map.ports[port_id]
expected_sgs = [self.map.sec_groups[sg_id]
for sg_id in expected_sg_ids]
self.assertEqual(port.sec_groups, expected_sgs)
def _check_sg(self, sg_id, expected_port_ids):
sg = self.map.sec_groups[sg_id]
expected_ports = {self.map.ports[port_id]
for port_id in expected_port_ids}
self.assertEqual(sg.ports, expected_ports)
def _create_ports_and_sgroups(self):
sg_1 = ovsfw.SecurityGroup(1)
sg_2 = ovsfw.SecurityGroup(2)
sg_3 = ovsfw.SecurityGroup(3)
port_a = create_ofport({'device': 'a'})
port_b = create_ofport({'device': 'b'})
self.map.ports = {'a': port_a, 'b': port_b}
self.map.sec_groups = {1: sg_1, 2: sg_2, 3: sg_3}
port_a.sec_groups = [sg_1, sg_2]
port_b.sec_groups = [sg_2, sg_3]
sg_1.ports = {port_a}
sg_2.ports = {port_a, port_b}
sg_3.ports = {port_b}
def test_create_port(self):
port = create_ofport({'device': 'a'})
sec_groups = ['1', '2']
port_dict = {'security_groups': sec_groups}
self.map.create_port(port, port_dict)
self._check_port('a', sec_groups)
self._check_sg('1', ['a'])
self._check_sg('2', ['a'])
def test_update_port_sg_added(self):
self._create_ports_and_sgroups()
port_dict = {'security_groups': [1, 2, 3]}
self.map.update_port(self.map.ports['b'], port_dict)
self._check_port('a', [1, 2])
self._check_port('b', [1, 2, 3])
self._check_sg(1, ['a', 'b'])
self._check_sg(2, ['a', 'b'])
self._check_sg(3, ['b'])
def test_update_port_sg_removed(self):
self._create_ports_and_sgroups()
port_dict = {'security_groups': [1]}
self.map.update_port(self.map.ports['b'], port_dict)
self._check_port('a', [1, 2])
self._check_port('b', [1])
self._check_sg(1, ['a', 'b'])
self._check_sg(2, ['a'])
self._check_sg(3, [])
def test_remove_port(self):
self._create_ports_and_sgroups()
self.map.remove_port(self.map.ports['a'])
self._check_port('b', [2, 3])
self._check_sg(1, [])
self._check_sg(2, ['b'])
self._check_sg(3, ['b'])
self.assertNotIn('a', self.map.ports)
def test_update_rules(self):
"""Just make sure it doesn't crash"""
self.map.update_rules(1, [])
def test_update_members(self):
"""Just make sure we doesn't crash"""
self.map.update_members(1, [])
class TestConjIdMap(base.BaseTestCase):
def setUp(self):
super(TestConjIdMap, self).setUp()
self.conj_id_map = ovsfw.ConjIdMap()
def test_get_conj_id(self):
allocated = []
for direction in [firewall.EGRESS_DIRECTION,
firewall.INGRESS_DIRECTION]:
id_ = self.conj_id_map.get_conj_id(
'sg', 'remote', direction, constants.IPv4)
allocated.append(id_)
self.assertEqual(len(set(allocated)), 2)
self.assertEqual(len(self.conj_id_map.id_map), 2)
self.assertEqual(self.conj_id_map.get_conj_id(
'sg', 'remote', firewall.EGRESS_DIRECTION, constants.IPv4),
allocated[0])
def test_get_conj_id_invalid(self):
self.assertRaises(ValueError, self.conj_id_map.get_conj_id,
'sg', 'remote', 'invalid-direction',
constants.IPv6)
def test_delete_sg(self):
test_data = [('sg1', 'sg1'), ('sg1', 'sg2')]
ids = []
for sg_id, remote_sg_id in test_data:
ids.append(self.conj_id_map.get_conj_id(
sg_id, remote_sg_id,
firewall.INGRESS_DIRECTION, constants.IPv6))
result = self.conj_id_map.delete_sg('sg1')
self.assertIn(('sg1', ids[0]), result)
self.assertIn(('sg2', ids[1]), result)
self.assertFalse(self.conj_id_map.id_map)
reallocated = self.conj_id_map.get_conj_id(
'sg-foo', 'sg-foo', firewall.INGRESS_DIRECTION,
constants.IPv6)
self.assertIn(reallocated, ids)
class TestConjIPFlowManager(base.BaseTestCase):
def setUp(self):
super(TestConjIPFlowManager, self).setUp()
self.driver = mock.Mock()
self.manager = ovsfw.ConjIPFlowManager(self.driver)
self.vlan_tag = 100
self.conj_id = 16
def test_update_flows_for_vlan(self):
remote_group = self.driver.sg_port_map.get_sg.return_value
remote_group.get_ethertype_filtered_addresses.return_value = [
'10.22.3.4']
with mock.patch.object(self.manager.conj_id_map,
'get_conj_id') as get_conj_id_mock:
get_conj_id_mock.return_value = self.conj_id
self.manager.add(self.vlan_tag, 'sg', 'remote_id',
firewall.INGRESS_DIRECTION, constants.IPv4, 0)
self.manager.add(self.vlan_tag, 'sg', 'remote_id',
firewall.INGRESS_DIRECTION, constants.IPv4, 3)
self.manager.update_flows_for_vlan(self.vlan_tag)
self.assertEqual(self.driver._add_flow.call_args_list,
[mock.call(actions='conjunction(16,1/2)', ct_state='+est-rel-rpl',
dl_type=2048, nw_src='10.22.3.4/32', priority=70,
reg_net=self.vlan_tag, table=82),
mock.call(actions='conjunction(17,1/2)', ct_state='+new-est',
dl_type=2048, nw_src='10.22.3.4/32', priority=70,
reg_net=self.vlan_tag, table=82),
mock.call(actions='conjunction(22,1/2)', ct_state='+est-rel-rpl',
dl_type=2048, nw_src='10.22.3.4/32', priority=73,
reg_net=self.vlan_tag, table=82),
mock.call(actions='conjunction(23,1/2)', ct_state='+new-est',
dl_type=2048, nw_src='10.22.3.4/32', priority=73,
reg_net=self.vlan_tag, table=82)])
def test_sg_removed(self):
with mock.patch.object(self.manager.conj_id_map,
'get_conj_id') as get_id_mock, \
mock.patch.object(self.manager.conj_id_map,
'delete_sg') as delete_sg_mock:
get_id_mock.return_value = self.conj_id
delete_sg_mock.return_value = [('remote_id', self.conj_id)]
self.manager.add(self.vlan_tag, 'sg', 'remote_id',
firewall.INGRESS_DIRECTION, constants.IPv4, 0)
self.manager.flow_state[self.vlan_tag][(
firewall.INGRESS_DIRECTION, constants.IPv4)] = {
'10.22.3.4': [self.conj_id]}
self.manager.sg_removed('sg')
self.driver._add_flow.assert_not_called()
self.driver.delete_flows_for_ip_addresses.assert_called_once_with(
{'10.22.3.4'}, firewall.INGRESS_DIRECTION, constants.IPv4,
self.vlan_tag)
class FakeOVSPort(object):
def __init__(self, name, port, mac):
self.port_name = name
self.ofport = port
self.vif_mac = mac
class TestOVSFirewallDriver(base.BaseTestCase):
def setUp(self):
super(TestOVSFirewallDriver, self).setUp()
mock_bridge = mock.patch.object(
ovs_lib, 'OVSBridge', autospec=True).start()
self.firewall = ovsfw.OVSFirewallDriver(mock_bridge)
self.mock_bridge = self.firewall.int_br
self.mock_bridge.reset_mock()
self.fake_ovs_port = FakeOVSPort('port', 1, '00:00:00:00:00:00')
self.mock_bridge.br.get_vif_port_by_id.return_value = \
self.fake_ovs_port
def _prepare_security_group(self):
security_group_rules = [
{'ethertype': constants.IPv4,
'protocol': constants.PROTO_NAME_TCP,
'direction': firewall.INGRESS_DIRECTION,
'port_range_min': 123,
'port_range_max': 123}]
self.firewall.update_security_group_rules(1, security_group_rules)
security_group_rules = [
{'ethertype': constants.IPv4,
'protocol': constants.PROTO_NAME_UDP,
'direction': firewall.EGRESS_DIRECTION},
{'ethertype': constants.IPv6,
'protocol': constants.PROTO_NAME_TCP,
'remote_group_id': 2,
'direction': firewall.EGRESS_DIRECTION}]
self.firewall.update_security_group_rules(2, security_group_rules)
@property
def port_ofport(self):
return self.mock_bridge.br.get_vif_port_by_id.return_value.ofport
@property
def port_mac(self):
return self.mock_bridge.br.get_vif_port_by_id.return_value.vif_mac
def test_callbacks_registered(self):
with mock.patch.object(callbacks_registry, "subscribe") as subscribe:
firewall = ovsfw.OVSFirewallDriver(mock.MagicMock())
subscribe.assert_called_once_with(
firewall._init_firewall_callback,
callbacks_resources.AGENT,
callbacks_events.OVS_RESTARTED)
def test_initialize_bridge(self):
br = self.firewall.initialize_bridge(self.mock_bridge)
self.assertEqual(br, self.mock_bridge.deferred.return_value)
def test__add_flow_dl_type_formatted_to_string(self):
dl_type = 0x0800
self.firewall._add_flow(dl_type=dl_type)
def test__add_flow_registers_are_replaced(self):
self.firewall._add_flow(in_port=1, reg_port=1, reg_net=2)
expected_calls = {'in_port': 1,
'reg{:d}'.format(ovsfw_consts.REG_PORT): 1,
'reg{:d}'.format(ovsfw_consts.REG_NET): 2}
self.mock_bridge.br.add_flow.assert_called_once_with(
**expected_calls)
def test__drop_all_unmatched_flows(self):
self.firewall._drop_all_unmatched_flows()
expected_calls = [
mock.call(actions='drop', priority=0,
table=ovs_consts.BASE_EGRESS_TABLE),
mock.call(actions='drop', priority=0,
table=ovs_consts.RULES_EGRESS_TABLE),
mock.call(actions='drop', priority=0,
table=ovs_consts.ACCEPT_OR_INGRESS_TABLE),
mock.call(actions='drop', priority=0,
table=ovs_consts.BASE_INGRESS_TABLE),
mock.call(actions='drop', priority=0,
table=ovs_consts.RULES_INGRESS_TABLE)]
actual_calls = self.firewall.int_br.br.add_flow.call_args_list
self.assertEqual(expected_calls, actual_calls)
def test_get_or_create_ofport_non_existing(self):
port_dict = {
'device': 'port-id',
'security_groups': [123, 456]}
port = self.firewall.get_or_create_ofport(port_dict)
sg1, sg2 = sorted(
self.firewall.sg_port_map.sec_groups.values(),
key=lambda x: x.id)
self.assertIn(port, self.firewall.sg_port_map.ports.values())
self.assertEqual(
sorted(port.sec_groups, key=lambda x: x.id), [sg1, sg2])
self.assertIn(port, sg1.ports)
self.assertIn(port, sg2.ports)
def test_get_or_create_ofport_existing(self):
port_dict = {
'device': 'port-id',
'security_groups': [123, 456]}
of_port = create_ofport(port_dict)
self.firewall.sg_port_map.ports[of_port.id] = of_port
port = self.firewall.get_or_create_ofport(port_dict)
sg1, sg2 = sorted(
self.firewall.sg_port_map.sec_groups.values(),
key=lambda x: x.id)
self.assertIs(of_port, port)
self.assertIn(port, self.firewall.sg_port_map.ports.values())
self.assertEqual(
sorted(port.sec_groups, key=lambda x: x.id), [sg1, sg2])
self.assertIn(port, sg1.ports)
self.assertIn(port, sg2.ports)
def test_get_or_create_ofport_changed(self):
port_dict = {
'device': 'port-id',
'security_groups': [123, 456]}
of_port = create_ofport(port_dict)
self.firewall.sg_port_map.ports[of_port.id] = of_port
fake_ovs_port = FakeOVSPort('port', 2, '00:00:00:00:00:00')
self.mock_bridge.br.get_vif_port_by_id.return_value = \
fake_ovs_port
port = self.firewall.get_or_create_ofport(port_dict)
self.assertEqual(port.ofport, 2)
def test_get_or_create_ofport_missing(self):
port_dict = {
'device': 'port-id',
'security_groups': [123, 456]}
self.mock_bridge.br.get_vif_port_by_id.return_value = None
with testtools.ExpectedException(exceptions.OVSFWPortNotFound):
self.firewall.get_or_create_ofport(port_dict)
def test_get_or_create_ofport_missing_nocreate(self):
port_dict = {
'device': 'port-id',
'security_groups': [123, 456]}
self.mock_bridge.br.get_vif_port_by_id.return_value = None
self.assertIsNone(self.firewall.get_ofport(port_dict))
self.assertFalse(self.mock_bridge.br.get_vif_port_by_id.called)
def test_is_port_managed_managed_port(self):
port_dict = {'device': 'port-id'}
self.firewall.sg_port_map.ports[port_dict['device']] = object()
is_managed = self.firewall.is_port_managed(port_dict)
self.assertTrue(is_managed)
def test_is_port_managed_not_managed_port(self):
port_dict = {'device': 'port-id'}
is_managed = self.firewall.is_port_managed(port_dict)
self.assertFalse(is_managed)
def test_prepare_port_filter(self):
port_dict = {'device': 'port-id',
'security_groups': [1],
'fixed_ips': ["10.0.0.1"]}
self._prepare_security_group()
self.firewall.prepare_port_filter(port_dict)
exp_egress_classifier = mock.call(
actions='set_field:{:d}->reg5,set_field:{:d}->reg6,'
'resubmit(,{:d})'.format(
self.port_ofport, TESTING_VLAN_TAG,
ovs_consts.BASE_EGRESS_TABLE),
in_port=self.port_ofport,
priority=100,
table=ovs_consts.TRANSIENT_TABLE)
exp_ingress_classifier = mock.call(
actions='set_field:{:d}->reg5,set_field:{:d}->reg6,'
'strip_vlan,resubmit(,{:d})'.format(
self.port_ofport, TESTING_VLAN_TAG,
ovs_consts.BASE_INGRESS_TABLE),
dl_dst=self.port_mac,
dl_vlan='0x%x' % TESTING_VLAN_TAG,
priority=90,
table=ovs_consts.TRANSIENT_TABLE)
filter_rule = mock.call(
actions='ct(commit,zone=NXM_NX_REG6[0..15]),'
'output:{:d},resubmit(,{:d})'.format(
self.port_ofport,
ovs_consts.ACCEPTED_INGRESS_TRAFFIC_TABLE),
dl_type="0x{:04x}".format(n_const.ETHERTYPE_IP),
nw_proto=constants.PROTO_NUM_TCP,
priority=77,
reg5=self.port_ofport,
ct_state=ovsfw_consts.OF_STATE_NEW_NOT_ESTABLISHED,
table=ovs_consts.RULES_INGRESS_TABLE,
tcp_dst='0x007b')
calls = self.mock_bridge.br.add_flow.call_args_list
for call in exp_ingress_classifier, exp_egress_classifier, filter_rule:
self.assertIn(call, calls)
def test_prepare_port_filter_port_security_disabled(self):
port_dict = {'device': 'port-id',
'security_groups': [1],
'port_security_enabled': False}
self._prepare_security_group()
with mock.patch.object(
self.firewall, 'initialize_port_flows') as m_init_flows:
self.firewall.prepare_port_filter(port_dict)
self.assertFalse(m_init_flows.called)
def test_prepare_port_filter_initialized_port(self):
port_dict = {'device': 'port-id',
'security_groups': [1]}
self._prepare_security_group()
self.firewall.prepare_port_filter(port_dict)
self.assertFalse(self.mock_bridge.br.delete_flows.called)
self.firewall.prepare_port_filter(port_dict)
self.assertTrue(self.mock_bridge.br.delete_flows.called)
def test_update_port_filter(self):
port_dict = {'device': 'port-id',
'security_groups': [1]}
self._prepare_security_group()
self.firewall.prepare_port_filter(port_dict)
port_dict['security_groups'] = [2]
self.mock_bridge.reset_mock()
self.firewall.update_port_filter(port_dict)
self.assertTrue(self.mock_bridge.br.delete_flows.called)
conj_id = self.firewall.conj_ip_manager.conj_id_map.get_conj_id(
2, 2, firewall.EGRESS_DIRECTION, constants.IPv6)
filter_rules = [mock.call(
actions='resubmit(,{:d})'.format(
ovs_consts.ACCEPT_OR_INGRESS_TABLE),
dl_type="0x{:04x}".format(n_const.ETHERTYPE_IP),
nw_proto=constants.PROTO_NUM_UDP,
priority=77,
ct_state=ovsfw_consts.OF_STATE_NEW_NOT_ESTABLISHED,
reg5=self.port_ofport,
table=ovs_consts.RULES_EGRESS_TABLE),
mock.call(
actions='conjunction({:d},2/2)'.format(conj_id + 6),
ct_state=ovsfw_consts.OF_STATE_ESTABLISHED_NOT_REPLY,
dl_type=mock.ANY,
nw_proto=6,
priority=73, reg5=self.port_ofport,
table=ovs_consts.RULES_EGRESS_TABLE)]
self.mock_bridge.br.add_flow.assert_has_calls(
filter_rules, any_order=True)
def test_update_port_filter_create_new_port_if_not_present(self):
port_dict = {'device': 'port-id',
'security_groups': [1]}
self._prepare_security_group()
with mock.patch.object(
self.firewall, 'prepare_port_filter'
) as prepare_mock, mock.patch.object(
self.firewall, 'initialize_port_flows'
) as initialize_port_flows_mock, mock.patch.object(
self.firewall, 'add_flows_from_rules'
) as add_flows_from_rules_mock:
self.firewall.update_port_filter(port_dict)
self.assertFalse(prepare_mock.called)
self.assertFalse(self.mock_bridge.br.delete_flows.called)
self.assertTrue(initialize_port_flows_mock.called)
self.assertTrue(add_flows_from_rules_mock.called)
def test_update_port_filter_port_security_disabled(self):
port_dict = {'device': 'port-id',
'security_groups': [1]}
self._prepare_security_group()
self.firewall.prepare_port_filter(port_dict)
port_dict['port_security_enabled'] = False
self.firewall.update_port_filter(port_dict)
self.assertTrue(self.mock_bridge.br.delete_flows.called)
def test_update_port_filter_applies_added_flows(self):
"""Check flows are applied right after _set_flows is called."""
port_dict = {'device': 'port-id',
'security_groups': [1]}
self._prepare_security_group()
self.firewall.prepare_port_filter(port_dict)
with self.firewall.defer_apply():
self.firewall.update_port_filter(port_dict)
self.assertEqual(2, self.mock_bridge.apply_flows.call_count)
def test_remove_port_filter(self):
port_dict = {'device': 'port-id',
'security_groups': [1]}
self._prepare_security_group()
self.firewall.prepare_port_filter(port_dict)
self.firewall.remove_port_filter(port_dict)
self.assertTrue(self.mock_bridge.br.delete_flows.called)
self.assertIn(1, self.firewall.sg_to_delete)
def test_remove_port_filter_port_security_disabled(self):
port_dict = {'device': 'port-id',
'security_groups': [1]}
self.firewall.remove_port_filter(port_dict)
self.assertFalse(self.mock_bridge.br.delete_flows.called)
def test_update_security_group_rules(self):
"""Just make sure it doesn't crash"""
new_rules = [
{'ethertype': constants.IPv4,
'direction': firewall.INGRESS_DIRECTION,
'protocol': constants.PROTO_NAME_ICMP},
{'ethertype': constants.IPv4,
'direction': firewall.EGRESS_DIRECTION,
'remote_group_id': 2}]
self.firewall.update_security_group_rules(1, new_rules)
def test_update_security_group_members(self):
"""Just make sure it doesn't crash"""
new_members = {constants.IPv4: [1, 2, 3, 4]}
self.firewall.update_security_group_members(2, new_members)
def test__cleanup_stale_sg(self):
self._prepare_security_group()
self.firewall.sg_to_delete = {1}
with mock.patch.object(self.firewall.conj_ip_manager,
'sg_removed') as sg_removed_mock,\
mock.patch.object(self.firewall.sg_port_map,
'delete_sg') as delete_sg_mock:
self.firewall._cleanup_stale_sg()
sg_removed_mock.assert_called_once_with(1)
delete_sg_mock.assert_called_once_with(1)
def test_get_ovs_port(self):
ovs_port = self.firewall.get_ovs_port('port_id')
self.assertEqual(self.fake_ovs_port, ovs_port)
def test_get_ovs_port_non_existent(self):
self.mock_bridge.br.get_vif_port_by_id.return_value = None
with testtools.ExpectedException(exceptions.OVSFWPortNotFound):
self.firewall.get_ovs_port('port_id')
def test__initialize_egress_no_port_security_sends_to_egress(self):
self.mock_bridge.br.db_get_val.return_value = {'tag': TESTING_VLAN_TAG}
self.firewall._initialize_egress_no_port_security('port_id')
expected_call = mock.call(
table=ovs_consts.TRANSIENT_TABLE,
priority=100,
in_port=self.fake_ovs_port.ofport,
actions='set_field:%d->reg%d,'
'set_field:%d->reg%d,'
'resubmit(,%d)' % (
self.fake_ovs_port.ofport,
ovsfw_consts.REG_PORT,
TESTING_VLAN_TAG,
ovsfw_consts.REG_NET,
ovs_consts.ACCEPT_OR_INGRESS_TABLE)
)
calls = self.mock_bridge.br.add_flow.call_args_list
self.assertIn(expected_call, calls)
def test__initialize_egress_no_port_security_no_tag(self):
self.mock_bridge.br.db_get_val.return_value = {}
self.firewall._initialize_egress_no_port_security('port_id')
self.assertFalse(self.mock_bridge.br.add_flow.called)
def test__remove_egress_no_port_security_deletes_flow(self):
self.mock_bridge.br.db_get_val.return_value = {'tag': TESTING_VLAN_TAG}
self.firewall.sg_port_map.unfiltered['port_id'] = 1
self.firewall._remove_egress_no_port_security('port_id')
expected_call = mock.call(
table=ovs_consts.TRANSIENT_TABLE,
in_port=self.fake_ovs_port.ofport,
)
calls = self.mock_bridge.br.delete_flows.call_args_list
self.assertIn(expected_call, calls)
def test__remove_egress_no_port_security_non_existing_port(self):
with testtools.ExpectedException(exceptions.OVSFWPortNotHandled):
self.firewall._remove_egress_no_port_security('foo')
def test_process_trusted_ports_caches_port_id(self):
self.firewall.process_trusted_ports(['port_id'])
self.assertIn('port_id', self.firewall.sg_port_map.unfiltered)
def test_process_trusted_ports_port_not_found(self):
"""Check that exception is not propagated outside."""
self.mock_bridge.br.get_vif_port_by_id.return_value = None
self.firewall.process_trusted_ports(['port_id'])
# Processing should have failed so port is not cached
self.assertNotIn('port_id', self.firewall.sg_port_map.unfiltered)
def test_remove_trusted_ports_clears_cached_port_id(self):
self.firewall.sg_port_map.unfiltered['port_id'] = 1
self.firewall.remove_trusted_ports(['port_id'])
self.assertNotIn('port_id', self.firewall.sg_port_map.unfiltered)
def test_remove_trusted_ports_not_managed_port(self):
"""Check that exception is not propagated outside."""
self.firewall.remove_trusted_ports(['port_id'])
class TestCookieContext(base.BaseTestCase):
def setUp(self):
super(TestCookieContext, self).setUp()
# Don't attempt to connect to ovsdb
mock.patch('neutron.agent.ovsdb.api.from_config').start()
# Don't trigger iptables -> ovsfw migration
mock.patch(
'neutron.agent.linux.openvswitch_firewall.iptables.Helper').start()
self.execute = mock.patch.object(
utils, "execute", spec=utils.execute).start()
bridge = ovs_bridge.OVSAgentBridge('foo')
mock.patch.object(
ovsfw.OVSFirewallDriver, 'initialize_bridge',
return_value=bridge.deferred(
full_ordered=True, use_bundle=True)).start()
self.firewall = ovsfw.OVSFirewallDriver(bridge)
# Remove calls from firewall initialization
self.execute.reset_mock()
def test_cookie_is_different_in_context(self):
default_cookie = self.firewall.int_br.br.default_cookie
with self.firewall.update_cookie_context():
self.firewall._add_flow(actions='drop')
update_cookie = self.firewall._update_cookie
self.firewall._add_flow(actions='drop')
expected_calls = [
mock.call(
mock.ANY,
process_input='hard_timeout=0,idle_timeout=0,priority=1,'
'cookie=%d,actions=drop' % cookie,
run_as_root=mock.ANY,
) for cookie in (update_cookie, default_cookie)
]
self.execute.assert_has_calls(expected_calls)
def test_context_cookie_is_not_left_as_used(self):
with self.firewall.update_cookie_context():
update_cookie = self.firewall._update_cookie
self.assertNotIn(
update_cookie,
self.firewall.int_br.br._reserved_cookies)
| 42.929024 | 79 | 0.630953 | [
"Apache-2.0"
] | mmidolesov2/neutron | neutron/tests/unit/agent/linux/openvswitch_firewall/test_firewall.py | 33,871 | Python |
"""
Test Sermin config module
"""
from sermin.config.module import Registry, Namespace, Setting, settings
from sermin.config.utils import parse_args
from .utils import SafeTestCase
class SettingsTest(SafeTestCase):
def setUp(self):
self.old_settings = settings._namespaces
settings._clear()
def tearDown(self):
settings.__dict__['_namespaces'] = self.old_settings
def test_settings_exists(self):
self.assertIsInstance(settings, Registry)
def test_create_namespace(self):
settings.test = 'Test settings'
self.assertIsInstance(settings.test, Namespace)
self.assertEqual(settings.test._label, 'Test settings')
def test_create_setting(self):
settings.test = 'Test settings'
settings.test.setting = Setting('Test setting')
self.assertIsInstance(settings.test._settings['setting'], Setting)
self.assertEqual(
settings.test._settings['setting'].label, 'Test setting',
)
self.assertEqual(settings.test.setting, None)
def test_set_setting(self):
settings.test = 'Test settings'
settings.test.setting = Setting('Test setting')
settings.test.setting = 'Testing'
self.assertEqual(settings.test.setting, 'Testing')
def test_cannot_redefine_namespace(self):
settings.test = 'Test settings'
with self.assertRaisesRegexp(
ValueError, r'^Namespaces cannot be redefined$',
):
settings.test = 'Second assignment'
def test_cannot_redefine_setting(self):
settings.test = 'Test settings'
settings.test.setting = Setting('Test setting')
with self.assertRaisesRegexp(
ValueError, r'^Settings cannot be redefined$',
):
settings.test.setting = Setting('Second assignment')
def test_setting_evaluates_bool(self):
settings.test = 'Test settings'
settings.test.setting = Setting('Test')
settings.test.setting = False
self.assertTrue(type(settings.test.setting), bool)
self.assertFalse(settings.test.setting)
class ParseArgsTest(SafeTestCase):
def test_empty(self):
unnamed, named = parse_args('')
self.assertIsInstance(unnamed, list)
self.assertIsInstance(named, dict)
self.assertEqual(len(unnamed), 0)
self.assertEqual(len(named), 0)
| 33.676056 | 74 | 0.670849 | [
"BSD-3-Clause"
] | radiac/sermin | tests/test_config.py | 2,391 | Python |
import numpy as np
from rafiki.constants import TaskType
def ensemble_predictions(predictions_list, predict_label_mappings, task):
# TODO: Better ensembling of predictions based on `predict_label_mapping` & `task` of models
if len(predictions_list) == 0 or len(predictions_list[0]) == 0:
return []
# By default, just return some trial's predictions
index = 0
predictions = predictions_list[index]
predict_label_mapping = predict_label_mappings[index]
if task == TaskType.IMAGE_CLASSIFICATION:
# Map probabilities to most probable label
pred_indices = np.argmax(predictions, axis=1)
predictions = [predict_label_mapping[str(i)] for i in pred_indices]
return predictions
| 33.772727 | 96 | 0.725437 | [
"Apache-2.0"
] | zlheui/rafiki | rafiki/predictor/ensemble.py | 743 | Python |
"""Auto-generated file, do not edit by hand. EC metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_EC = PhoneMetadata(id='EC', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='[19]\\d{2}', possible_number_pattern='\\d{3}'),
toll_free=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
premium_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
emergency=PhoneNumberDesc(national_number_pattern='1(?:0[12]|12)|911', possible_number_pattern='\\d{3}', example_number='911'),
short_code=PhoneNumberDesc(national_number_pattern='1(?:0[12]|12)|911', possible_number_pattern='\\d{3}', example_number='911'),
standard_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
carrier_specific=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
short_data=True)
| 76.384615 | 132 | 0.784491 | [
"Apache-2.0"
] | CygnusNetworks/python-phonenumbers | python/phonenumbers/shortdata/region_EC.py | 993 | Python |
import django_filters
from django_filters import DateFilter
from .models import Pet
class PetFilter(django_filters.FilterSet):
# name = django_filters.CharFilter(lookup_expr='iexact')
start_date = DateFilter(field_name = "age", lookup_expr='gte') #greater or equal to
end_date = DateFilter(field_name = "age", lookup_expr='lte') #less or equal to
class Meta:
model = Pet
fields = ['age',
'pet_type','breed','size', 'sex' ,'vaccinated',
'castrated','dewormed','vulnerable', ]
exclude = ['age']
| 38.4 | 87 | 0.640625 | [
"MIT"
] | Me-Adota/website | pets/filters.py | 576 | Python |
import pytest
from app import crud
from app.schemas import EpisodeCreate
from app.schemas.episode import EpisodeSearch
from app.tests.utils import random_segment
def test_get_episode(db):
ep_in = EpisodeCreate(name="ep1", air_date="2022-03-04", segment=random_segment())
ep = crud.episode.create(db, ep_in)
assert crud.episode.get(db, ep.id)
def test_get_multi_episode(db):
ep_in = EpisodeCreate(name="ep11", air_date="2022-03-04", segment=random_segment())
crud.episode.create(db, ep_in)
ep_in = EpisodeCreate(name="ep12", air_date="2022-03-04", segment=random_segment())
crud.episode.create(db, ep_in)
assert len(crud.episode.get_multi(db)) == 2
@pytest.mark.parametrize(
"test_input,expected",
[
({"air_date__lte": "2022-03-01"}, 1),
({"air_date__gte": "2022-04-01"}, 4),
({"air_date__gte": "2022-03-01", "air_date__lte": "2022-05-01"}, 3),
],
)
def test_date_search_episode(db, setup, test_input, expected):
obj_in = EpisodeSearch(**test_input)
assert len(crud.episode.search(db, obj_in)) == expected
@pytest.mark.parametrize(
"test_input,expected",
[
({"name__icontains": "Nibelheim"}, 0),
({"name__icontains": "Midgar"}, 1),
({"name__icontains": "mId"}, 1),
({"name__icontains": "GaR"}, 1),
],
)
def test_name_search_episode(db, setup, test_input, expected):
obj_in = EpisodeSearch(**test_input)
assert len(crud.episode.search(db, obj_in)) == expected
@pytest.mark.parametrize(
"test_input,expected",
[
({"segment": "s01e15"}, 0),
({"segment": "S01e01"}, 1),
],
)
def test_segment_search_episode(db, setup, test_input, expected):
obj_in = EpisodeSearch(**test_input)
assert len(crud.episode.search(db, obj_in)) == expected
| 29.129032 | 87 | 0.662237 | [
"MIT"
] | flsworld/comment-rick-n-morty | backend/app/tests/unit/crud/test_episode.py | 1,806 | Python |
# coding:utf-8
import sys
import codecs
from pathlib import Path
from collections import defaultdict
MAIN_PATH = Path(__file__).absolute().parent.parent.parent
sys.path.insert(0, str(MAIN_PATH))
from log import log_info as _info
from log import log_error as _error
from log import print_process as _process
class Vertex(object):
def __init__(self, value):
self.value = value
def __eq__(self, other):
return self.value == other.value
def __str__(self):
return str(self.value)
def __lt__(self, other):
return self.value < other.value
def __hash__(self):
return hash(self.value)
class Graph(object):
def __init__(self):
self.graph = defaultdict(list)
def addEdge(self, u, v):
self.graph[u].append(v)
# for saving the nodes which have no outgoing arc
if v not in self.graph.keys():
self.graph[v] = []
def DFSSearchInner(self, u, explored_list):
explored_list[u] = True
self.cache.append(u)
for v in self.graph[u]:
if not explored_list[v]:
self.DFSSearchInner(v, explored_list)
def DFSSearch(self, u):
explored_list = {}
for v in self.graph.keys():
explored_list[v] = False
self.cache = []
self.DFSSearchInner(u, explored_list)
return self.cache
def SCCSearch(self, v_sorted):
self.t = 0
self.finish_time = {}
explored_list = {}
for v in self.graph.keys():
explored_list[v] = False
leaders = {}
for v in v_sorted:
if not explored_list[v]:
leaders[v] = []
self.SCCSearch_DFS(v, explored_list, leaders[v])
return self.finish_time, leaders
def SCCSearch_DFS(self, v, explored_list, leaders):
explored_list[v] = True
for u in self.graph[v]:
if not explored_list[u]:
leaders.append(u)
self.SCCSearch_DFS(u, explored_list, leaders)
self.t += 1
self.finish_time[v] = self.t
def readFile(path):
_info('Start building graph...')
graph = Graph()
with codecs.open(path, 'r', 'utf-8') as file:
data = file.read().split('\n')
for line in data:
line_split = line.split(' ')
u, other = line_split[0], line_split[1:]
u_obj = Vertex(u)
for v in other:
v_obj = Vertex(v)
graph.addEdge(u_obj, v_obj)
_info('Finish building graph...')
return graph
def reverseGraph(graph):
v_unsorted = list(graph.graph.keys())
v_sorted = sorted(v_unsorted, reverse=True)
# reverse the graph
graph_rev = Graph()
for v in v_unsorted:
for u in graph.graph[v]:
graph_rev.addEdge(u, v)
return graph_rev, v_sorted
if __name__ == '__main__':
graph = readFile('test_scc.txt')
# sanity check
_info('Check the graph:')
cache = graph.DFSSearch(Vertex('b'))
for v in cache:
print(v, end=' ')
_info('Finish checking!', head='\n INFO')
# reverse the graph
_info('Reverse the graph...')
graph_rev, v_sorted = reverseGraph(graph)
# sanity check
_info('Check the graph:')
cache = graph_rev.DFSSearch(Vertex('a'))
for v in cache:
print(v, end=' ')
_info('Finish checking!', head='\n INFO')
# find SCCs
finish_time, _ = graph_rev.SCCSearch(v_sorted)
v_2nd_pass = reversed([v for v, _ in finish_time.items()])
_info('Start finding SCCs...')
_, leaders = graph.SCCSearch(v_2nd_pass)
for k in leaders.keys():
print(k)
_info('Result:')
for key, value in leaders.items():
print(key)
for v in value:
print(v)
print() | 23.466216 | 60 | 0.644976 | [
"Apache-2.0"
] | KnightZhang625/Stanford_Algorithm | Course_2/Week_01/3_SCC.py | 3,473 | Python |
d = set()
for i in range(int(input())):
I = input().split('-> ')
if len(I) == 1:
d.add(I[1])
continue
a, k = I
for aa in a.split():
if aa not in d:
print(i + 1)
exit()
d.add(k)
print('correct')
| 15.266667 | 29 | 0.475983 | [
"CC0-1.0"
] | terror/CompetitiveProgramming | kattis/proofs.py | 229 | Python |
#!/usr/bin/env python
from multipledispatch import dispatch as Override
import rospy
import threading
from std_msgs.msg import Float64
from araig_msgs.msg import BoolStamped
from base_classes.base_calculator import BaseCalculator
"""Compare data from one topic with one param
pub_list = {"out_bool": "BoolStamped"}
sub_list = {"in_float": "Float64"}
rosparam
inherit Base, only modify compare function"""
class compParam(BaseCalculator):
_pub_topic = "/out_bool"
_sub_topic = "/in_float"
def __init__(self,
sub_dict = {_sub_topic: Float64},
pub_dict = {_pub_topic: BoolStamped},
rosparam = None,
tolerance = 0,
rate = None):
if rosparam == None:
rospy.logerr(rospy.get_name() + ": Please provide rosparam")
else:
self.compare_param = rosparam
self.tolerance = tolerance
super(compParam, self).__init__(
sub_dict = sub_dict,
pub_dict = pub_dict,
rate = rate)
@Override()
def calculate(self):
with BaseCalculator.LOCK[self._sub_topic]:
current_vel = BaseCalculator.MSG[self._sub_topic]
flag_test_ready = True
if current_vel == None:
flag_test_ready = False
if flag_test_ready == True:
msg = self.PubDict[self._pub_topic]()
msg.header.stamp = rospy.Time.now()
if abs(self.compare_param - current_vel.data) <= self.tolerance:
msg.data = True
else:
msg.data = False
self.PubDiag[self._pub_topic].publish(msg) | 29.553571 | 76 | 0.610876 | [
"Apache-2.0"
] | ipa-kut/araig_test_stack | araig_calculators/src/comparators/comp_param.py | 1,655 | Python |
from deadfroglib import *
import Image
import math
# set up the colors
BLACK = 0xff000000
WHITE = 0xffffffff
im = Image.open("willow.bmp")
imOut = Image.new(im.mode, im.size)
graph3d = CreateGraph3d()
minA = 1000
maxA = -1000
minB = 1000
maxB = -1000
minC = 1000
maxC = -1000
err = 0.0
for y in range(im.size[1]):
for x in range(im.size[0]):
(r, g, b) = im.getpixel((x, y))
# ya = round(0.299*r + 0.587*g + 0.114*b)
# cb = round(128 - 0.168736*r - 0.331264*g + 0.5*b)
# cr = round(128 + 0.5*r - 0.418688*g - 0.081312*b)
# ya = round( r + g + b)
# cb = round( r - g)
# cr = round( r + g - 2*b)
ya = round((+0.61333*r + 0.58095*g + 0.53509*b) * 0.575)
cb = round((-0.32762*r + 0.80357*g - 0.49693*b) * 0.575) + 128
cr = round((+0.71868*r - 0.12948*g - 0.68318*b) * 0.575) + 128
# {{0.61333, 0.58095, 0.53509}, {-0.32762, 0.80357, -0.49693}, {0.71868, -0.12948, -0.68318}}
if ya < minA: minA = ya
if ya > maxA: maxA = ya
if cr < minB: minB = cr
if cr > maxB: maxB = cr
if cb < minC: minC = cb
if cb > maxC: maxC = cb
# Invert Y Cr Cb
r2 = ya + 1.402 * (cr - 128)
g2 = ya - 0.34414 * (cb - 128) - 0.71414 * (cr - 128)
b2 = ya + 1.772 * (cb - 128)
# # Invert custom colour space
# cr -= 128
# cb -= 128
# ya /= 0.575
# cr /= 0.575
# cb /= 0.575
# r2 = 0.61333 * ya - 0.32762 * cb + 0.71868 * cr
# g2 = 0.58095 * ya + 0.80357 * cb - 0.12948 * cr
# b2 = 0.53509 * ya - 0.49693 * cb - 0.68318 * cr
# if r != r2 or g != g2 or b != b2:
# print "%5.2f %5.2f %5.2f" % (r,g,b)
# print "%5.2f %5.2f %5.2f" % (r2,g2,b2)
# print
# Calc RMS error for this pixel
err += math.sqrt((r - round(r2)) ** 2 +
(g - round(g2)) ** 2 +
(b - round(b2)) ** 2)
col = (r << 16) + (g << 8) + b
# Graph3dAddPoint(graph3d, ya-128, cr, cb, col)
# print "%6.2f %6.2f %6.2f" % (ya, cr, cb)
Graph3dAddPoint(graph3d, r-128, g-128, b-128, col)
imOut.putpixel((x,y), (int(ya), int(cr), int(cb)))
imOut.save("foo.bmp")
print "Min:", minA, minB, minC
print "Max:", maxA, maxB, maxC
#Graph3dAddPoint(graph3d, -128, -128, -128, WHITE)
#Graph3dAddPoint(graph3d, 128, 128, 128, WHITE)
print "err", err / (im.size[0] * im.size[1])
# set up the window
screenw = 600
screenh = 600
win = CreateWin(500, 50, screenw, screenh, True, '3d plot')
input = win.contents.inputManager.contents
font = CreateTextRenderer("Fixedsys", 8, True)
dist = 730.0
zoom = 800.0
rotX = 0.0
rotZ = 0.0
cx = screenw / 2
cy = screenh / 2
while not win.contents.windowClosed and input.keys[KEY_ESC] == 0:
bmp = AdvanceWin(win)
ClearBitmap(bmp, WHITE)
if input.lmb:
if input.keys[KEY_SHIFT]:
cx += input.mouseVelX
cy += input.mouseVelY
else:
rotX -= float(input.mouseVelY) * 0.01
rotZ += float(input.mouseVelX) * 0.01
#rotZ += 0.03
zoom *= 1.0 + (input.mouseVelZ * 0.05)
Graph3dRender(graph3d, bmp, cx, cy, dist, zoom, BLACK, rotX, rotZ)
DrawTextSimple(font, BLACK, bmp, screenw - 100, 5, str(win.contents.fps))
RectFill(bmp, 0, screenh - 40, 230, 40, WHITE)
DrawTextSimple(font, BLACK, bmp, 10, screenh - 35, "Hold left mouse to rotate")
DrawTextSimple(font, BLACK, bmp, 10, screenh - 20, "Mouse wheel to zoom")
| 29.87395 | 100 | 0.52602 | [
"MIT"
] | abainbridge/deadfrog-lib | python/graph3d.py | 3,555 | Python |
# Copyright (c) OpenMMLab. All rights reserved.
log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=10)
evaluation = dict(interval=10, metric='mAP', key_indicator='AP')
optimizer = dict(
type='Adam',
lr=5e-4,
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[170, 200])
total_epochs = 210
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
channel_cfg = dict(
num_output_channels=17,
dataset_joints=17,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
])
# model settings
model = dict(
type='TopDown',
pretrained='https://download.openmmlab.com/mmpose/'
'pretrain_models/hrnet_w32-36af842e.pth',
backbone=dict(
type='HRNet',
in_channels=3,
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(32, 64)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(32, 64, 128)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(32, 64, 128, 256))),
),
keypoint_head=dict(
type='TopdownHeatmapSimpleHead',
in_channels=32,
out_channels=channel_cfg['num_output_channels'],
num_deconv_layers=0,
extra=dict(final_conv_kernel=1, ),
loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)),
train_cfg=dict(),
test_cfg=dict(
flip_test=True,
post_process='default',
shift_heatmap=True,
modulate_kernel=11))
data_cfg = dict(
image_size=[192, 256],
heatmap_size=[48, 64],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
soft_nms=False,
nms_thr=1.0,
oks_thr=0.9,
vis_thr=0.2,
use_gt_bbox=False,
det_bbox_thr=0.0,
bbox_file='data/coco/person_detection_results/'
'COCO_val2017_detections_AP_H_56_person.json',
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownRandomFlip', flip_prob=0.5),
dict(
type='TopDownHalfBodyTransform',
num_joints_half_body=8,
prob_half_body=0.3),
dict(
type='TopDownGetRandomScaleRotation', rot_factor=40, scale_factor=0.5),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(type='TopDownGenerateTarget', sigma=2),
dict(
type='Collect',
keys=['img', 'target', 'target_weight'],
meta_keys=[
'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
'rotation', 'bbox_score', 'flip_pairs'
]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='Collect',
keys=['img'],
meta_keys=[
'image_file', 'center', 'scale', 'rotation', 'bbox_score',
'flip_pairs'
]),
]
test_pipeline = val_pipeline
data_root = 'data/coco'
data = dict(
samples_per_gpu=64,
workers_per_gpu=2,
val_dataloader=dict(samples_per_gpu=32),
test_dataloader=dict(samples_per_gpu=32),
train=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_train2017.json',
img_prefix=f'{data_root}/train2017/',
data_cfg=data_cfg,
pipeline=train_pipeline),
val=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
test=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
)
| 28.810345 | 79 | 0.591063 | [
"Apache-2.0"
] | CCODING04/mmaction2 | demo/hrnet_w32_coco_256x192.py | 5,013 | Python |
import codecs
from xml.sax.saxutils import quoteattr, escape
__all__ = ['XMLWriter']
ESCAPE_ENTITIES = {
'\r': ' '
}
class XMLWriter(object):
def __init__(self, stream, namespace_manager, encoding=None,
decl=1, extra_ns=None):
encoding = encoding or 'utf-8'
encoder, decoder, stream_reader, stream_writer = \
codecs.lookup(encoding)
self.stream = stream = stream_writer(stream)
if decl:
stream.write('<?xml version="1.0" encoding="%s"?>' % encoding)
self.element_stack = []
self.nm = namespace_manager
self.extra_ns = extra_ns or {}
self.closed = True
def __get_indent(self):
return " " * len(self.element_stack)
indent = property(__get_indent)
def __close_start_tag(self):
if not self.closed: # TODO:
self.closed = True
self.stream.write(">")
def push(self, uri):
self.__close_start_tag()
write = self.stream.write
write("\n")
write(self.indent)
write("<%s" % self.qname(uri))
self.element_stack.append(uri)
self.closed = False
self.parent = False
def pop(self, uri=None):
top = self.element_stack.pop()
if uri:
assert uri == top
write = self.stream.write
if not self.closed:
self.closed = True
write("/>")
else:
if self.parent:
write("\n")
write(self.indent)
write("</%s>" % self.qname(top))
self.parent = True
def element(self, uri, content, attributes={}):
"""Utility method for adding a complete simple element"""
self.push(uri)
for k, v in attributes.iteritems():
self.attribute(k, v)
self.text(content)
self.pop()
def namespaces(self, namespaces=None):
if not namespaces:
namespaces = self.nm.namespaces()
write = self.stream.write
write("\n")
for prefix, namespace in namespaces:
if prefix:
write(' xmlns:%s="%s"\n' % (prefix, namespace))
# Allow user-provided namespace bindings to prevail
elif prefix not in self.extra_ns:
write(' xmlns="%s"\n' % namespace)
for prefix, namespace in self.extra_ns.items():
if prefix:
write(' xmlns:%s="%s"\n' % (prefix, namespace))
else:
write(' xmlns="%s"\n' % namespace)
def attribute(self, uri, value):
write = self.stream.write
write(" %s=%s" % (self.qname(uri), quoteattr(value)))
def text(self, text):
self.__close_start_tag()
if "<" in text and ">" in text and not "]]>" in text:
self.stream.write("<![CDATA[")
self.stream.write(text)
self.stream.write("]]>")
else:
self.stream.write(escape(text, ESCAPE_ENTITIES))
def qname(self, uri):
"""Compute qname for a uri using our extra namespaces,
or the given namespace manager"""
for pre, ns in self.extra_ns.items():
if uri.startswith(ns):
if pre != "":
return ":".join(pre, uri[len(ns):])
else:
return uri[len(ns):]
return self.nm.qname(uri)
| 30.594595 | 74 | 0.535925 | [
"Apache-2.0"
] | 27theworldinurhand/schemaorg | lib/rdflib/plugins/serializers/xmlwriter.py | 3,396 | Python |
from tests.conftest import log_in
def test_logout_auth_user(test_client):
"""
GIVEN a flask app
WHEN an authorized user logs out
THEN check that the user was logged out successfully
"""
log_in(test_client)
response = test_client.get("auth/logout", follow_redirects=True)
assert response.status_code == 200
# assert b"<!-- index.html -->" in response.data # Removed -- COVID
assert b"You have been logged out." in response.data
def test_logout_anon_user(test_client):
"""
GIVEN a flask app
WHEN an anon user attemps to log out
THEN check that a message flashes informing them that they are already logged out.
"""
response = test_client.get("auth/logout", follow_redirects=True)
assert response.status_code == 200
# assert b"<!-- index.html -->" in response.data # Removed -- COVID
assert b"You were not, and still are not, logged in." in response.data
| 34.518519 | 86 | 0.697425 | [
"MIT"
] | KGB33/Wedding-Website | tests/test_auth/test_logout.py | 932 | Python |
def demo():
"""Output:
---------⌝
----------
----?????-
----------
----------
--!!!-----
--!!!-----
----------
----------
⌞---------
"""
n = 10
# Construction is easy:
grid = {}
# Assignment is easy:
grid[(0, 0)] = "⌞"
grid[(n - 1, n - 1)] = "⌝"
# Helper functions that just work on the dictionary:
fill(grid, "!", start=(2, 3), stop=(5, 5))
fill(grid, "?", start=(4, 7), stop=(9, 8))
print(stringify(grid, n))
def fill(grid: dict, value: str, start=(0, 0), stop=(0, 0)):
"""Using product allows for flatter loops."""
from itertools import product
for coord in product(range(start[0], stop[0]), range(start[1], stop[1])):
grid[coord] = value
def stringify(grid: dict, n: int) -> str:
"""Stringify with (0, 0) in the lower-left corner."""
rows = []
for y in reversed(range(n)):
row = []
for x in range(n):
value = grid.get((x, y), "-")
row.append(value)
rows.append(row)
return "\n".join("".join(row) for row in rows)
if __name__ == "__main__":
demo()
| 20.745455 | 77 | 0.45837 | [
"MIT"
] | ssangervasi/examples | examples/grids/python/grid.py | 1,149 | Python |
# coding: utf-8
"""
Apteco API
An API to allow access to Apteco Marketing Suite resources # noqa: E501
The version of the OpenAPI document: v2
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class Selection(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'ancestor_counts': 'bool',
'record_set': 'RecordSet',
'rule': 'Rule',
'rfv': 'RFV',
'n_per': 'NPer',
'top_n': 'TopN',
'limits': 'Limits',
'table_name': 'str',
'name': 'str'
}
attribute_map = {
'ancestor_counts': 'ancestorCounts',
'record_set': 'recordSet',
'rule': 'rule',
'rfv': 'rfv',
'n_per': 'nPer',
'top_n': 'topN',
'limits': 'limits',
'table_name': 'tableName',
'name': 'name'
}
def __init__(self, ancestor_counts=None, record_set=None, rule=None, rfv=None, n_per=None, top_n=None, limits=None, table_name=None, name=None): # noqa: E501
"""Selection - a model defined in OpenAPI""" # noqa: E501
self._ancestor_counts = None
self._record_set = None
self._rule = None
self._rfv = None
self._n_per = None
self._top_n = None
self._limits = None
self._table_name = None
self._name = None
self.discriminator = None
if ancestor_counts is not None:
self.ancestor_counts = ancestor_counts
if record_set is not None:
self.record_set = record_set
if rule is not None:
self.rule = rule
if rfv is not None:
self.rfv = rfv
if n_per is not None:
self.n_per = n_per
if top_n is not None:
self.top_n = top_n
if limits is not None:
self.limits = limits
self.table_name = table_name
if name is not None:
self.name = name
@property
def ancestor_counts(self):
"""Gets the ancestor_counts of this Selection. # noqa: E501
:return: The ancestor_counts of this Selection. # noqa: E501
:rtype: bool
"""
return self._ancestor_counts
@ancestor_counts.setter
def ancestor_counts(self, ancestor_counts):
"""Sets the ancestor_counts of this Selection.
:param ancestor_counts: The ancestor_counts of this Selection. # noqa: E501
:type: bool
"""
self._ancestor_counts = ancestor_counts
@property
def record_set(self):
"""Gets the record_set of this Selection. # noqa: E501
:return: The record_set of this Selection. # noqa: E501
:rtype: RecordSet
"""
return self._record_set
@record_set.setter
def record_set(self, record_set):
"""Sets the record_set of this Selection.
:param record_set: The record_set of this Selection. # noqa: E501
:type: RecordSet
"""
self._record_set = record_set
@property
def rule(self):
"""Gets the rule of this Selection. # noqa: E501
:return: The rule of this Selection. # noqa: E501
:rtype: Rule
"""
return self._rule
@rule.setter
def rule(self, rule):
"""Sets the rule of this Selection.
:param rule: The rule of this Selection. # noqa: E501
:type: Rule
"""
self._rule = rule
@property
def rfv(self):
"""Gets the rfv of this Selection. # noqa: E501
:return: The rfv of this Selection. # noqa: E501
:rtype: RFV
"""
return self._rfv
@rfv.setter
def rfv(self, rfv):
"""Sets the rfv of this Selection.
:param rfv: The rfv of this Selection. # noqa: E501
:type: RFV
"""
self._rfv = rfv
@property
def n_per(self):
"""Gets the n_per of this Selection. # noqa: E501
:return: The n_per of this Selection. # noqa: E501
:rtype: NPer
"""
return self._n_per
@n_per.setter
def n_per(self, n_per):
"""Sets the n_per of this Selection.
:param n_per: The n_per of this Selection. # noqa: E501
:type: NPer
"""
self._n_per = n_per
@property
def top_n(self):
"""Gets the top_n of this Selection. # noqa: E501
:return: The top_n of this Selection. # noqa: E501
:rtype: TopN
"""
return self._top_n
@top_n.setter
def top_n(self, top_n):
"""Sets the top_n of this Selection.
:param top_n: The top_n of this Selection. # noqa: E501
:type: TopN
"""
self._top_n = top_n
@property
def limits(self):
"""Gets the limits of this Selection. # noqa: E501
:return: The limits of this Selection. # noqa: E501
:rtype: Limits
"""
return self._limits
@limits.setter
def limits(self, limits):
"""Sets the limits of this Selection.
:param limits: The limits of this Selection. # noqa: E501
:type: Limits
"""
self._limits = limits
@property
def table_name(self):
"""Gets the table_name of this Selection. # noqa: E501
:return: The table_name of this Selection. # noqa: E501
:rtype: str
"""
return self._table_name
@table_name.setter
def table_name(self, table_name):
"""Sets the table_name of this Selection.
:param table_name: The table_name of this Selection. # noqa: E501
:type: str
"""
if table_name is None:
raise ValueError("Invalid value for `table_name`, must not be `None`") # noqa: E501
self._table_name = table_name
@property
def name(self):
"""Gets the name of this Selection. # noqa: E501
:return: The name of this Selection. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Selection.
:param name: The name of this Selection. # noqa: E501
:type: str
"""
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Selection):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 24.885449 | 162 | 0.553496 | [
"Apache-2.0"
] | Apteco/apteco-api | apteco_api/models/selection.py | 8,038 | Python |
"""
mavDynamics
- this file implements the dynamic equations of motion for MAV
- use unit quaternion for the attitude state
part of mavPySim
- Beard & McLain, PUP, 2012
- Update history:
12/20/2018 - RWB
2/24/2020
"""
import sys
sys.path.append('..')
import numpy as np
# load message types
from message_types.msg_state import msgState
import parameters.aerosonde_parameters as MAV
from tools.rotations import Quaternion2Rotation, Quaternion2Euler, skew, quat_prod
import mavsim_python_chap5_model_coef as chap5
class mavDynamics:
def __init__(self, Ts):
self._ts_simulation = Ts
# set initial states based on parameter file
# _state is the 13x1 internal state of the aircraft that is being propagated:
# _state = [pn, pe, pd, u, v, w, e0, e1, e2, e3, p, q, r]
# We will also need a variety of other elements that are functions of the _state and the wind.
# self.true_state is a 19x1 vector that is estimated and used by the autopilot to control the aircraft:
# true_state = [pn, pe, h, Va, alpha, beta, phi, theta, chi, p, q, r, Vg, wn, we, psi, gyro_bx, gyro_by, gyro_bz]
self._state = np.array([[MAV.pn0], # (0)
[MAV.pe0], # (1)
[MAV.pd0], # (2)
[MAV.u0], # (3)
[MAV.v0], # (4)
[MAV.w0], # (5)
[MAV.e0], # (6)
[MAV.e1], # (7)
[MAV.e2], # (8)
[MAV.e3], # (9)
[MAV.p0], # (10)
[MAV.q0], # (11)
[MAV.r0]]) # (12)
# store wind data for fast recall since it is used at various points in simulation
self._wind = np.array([[0.], [0.], [0.]]) # wind in NED frame in meters/sec
self._update_velocity_data()
# store forces to avoid recalculation in the sensors function
self._forces = np.array([[], [], []])
ur = self._state.item(3)
vr = self._state.item(4)
wr = self._state.item(5)
self._Va = np.sqrt(ur**2 + vr**2 + wr**2)
self._alpha = np.arctan2(wr,ur)
self._beta = np.arcsin(vr/self._Va)
# initialize true_state message
self.true_state = msgState()
###################################
# public functions
def update(self, delta, wind):
"""
Integrate the differential equations defining dynamics, update sensors
delta = (delta_a, delta_e, delta_r, delta_t) are the control inputs
wind is the wind vector in inertial coordinates
Ts is the time step between function calls.
"""
# get forces and moments acting on rigid bod
forces_moments = self._forces_moments(delta)
# Integrate ODE using Runge-Kutta RK4 algorithm
time_step = self._ts_simulation
k1 = self._derivatives(self._state, forces_moments)
k2 = self._derivatives(self._state + time_step/2.*k1, forces_moments)
k3 = self._derivatives(self._state + time_step/2.*k2, forces_moments)
k4 = self._derivatives(self._state + time_step*k3, forces_moments)
self._state += time_step/6 * (k1 + 2*k2 + 2*k3 + k4)
# normalize the quaternion
e0 = self._state.item(6)
e1 = self._state.item(7)
e2 = self._state.item(8)
e3 = self._state.item(9)
normE = np.sqrt(e0**2+e1**2+e2**2+e3**2)
self._state[6][0] = self._state.item(6)/normE
self._state[7][0] = self._state.item(7)/normE
self._state[8][0] = self._state.item(8)/normE
self._state[9][0] = self._state.item(9)/normE
# update the airspeed, angle of attack, and side slip angles using new state
self._update_velocity_data(wind)
# update the message class for the true state
self._update_true_state()
def external_set_state(self, new_state):
self._state = new_state
###################################
# private functions
def _derivatives(self, x, u):
"""
for the dynamics xdot = f(x, u), returns fdot(x, u)
"""
# Get force, moment (torque)
f_b = u[:3]
m_b = u[3:]
# Get position, velocity, quaternion (rotation), angular velocity
r_i = x[:3] # wrt to i-frame
v_b = x[3:6] # wrt to i-frame
q_ib = x[6:10] # for rotation b to i-frame
w_b = x[10:] # wrt to b-frame
# Normalize quat. -> rotation
q_ib = q_ib/np.linalg.norm(q_ib) # normalize
R_ib = Quaternion2Rotation(q_ib)
# Compute equations of motion
# d/dt(r_i)
rdot_i = R_ib @ v_b
# d/dt(v_b)
vdot_b = (1/MAV.mass)*f_b-skew(w_b) @ v_b
# d/dt(q_ib)
wq_ib = np.zeros((4,1))
wq_ib[1:] = w_b
qdot_ib = 0.5 * quat_prod(wq_ib, q_ib)
wt_b = skew(w_b)
# d/dt(w_b)
wdot_b = np.linalg.inv(MAV.J) @ (m_b - (wt_b @ (MAV.J @ w_b)))
x_out = np.concatenate([rdot_i,vdot_b,qdot_ib,np.array(wdot_b)],axis = 0)
return x_out
def _update_velocity_data(self, wind=np.zeros((6,1))):
steady_state = wind[0:3]
gust = wind[3:6]
ur = self._state.item(3) - steady_state[0] - gust[0]
vr = self._state.item(4) - steady_state[1] - gust[1]
wr = self._state.item(5) - steady_state[2] - gust[2]
self._Va = np.sqrt(ur**2 + vr**2 + wr**2)
self._alpha = np.arctan2(wr,ur)
self._beta = np.arcsin(vr/self._Va)
def thrust_from_prop(self, delta_t):
# compute thrust and torque due to propeller (See addendum by McLain)
# map delta_t throttle command (0 to 1) into motor input voltage
V_in = MAV.V_max * delta_t
KQ = MAV.KQ
# Quadratic formula to solve for motor speed
a = MAV.C_Q0 * MAV.rho * np.power(MAV.D_prop, 5) / ((2. * np.pi )**2 )
b = (MAV.C_Q1 * MAV.rho * np.power(MAV.D_prop, 4) / (2.*np.pi)) * self._Va + KQ**2/MAV.R_motor
c = MAV.C_Q2 * MAV.rho * np.power(MAV.D_prop, 3) * self._Va**2 - (KQ / MAV.R_motor ) * V_in + KQ * MAV.i0
# Consider only positive root
Omega_op = (-b + np.sqrt(b**2 - 4*a* c)) / (2. * a )
# compute advance ratio
J_op = 2 * np.pi * self._Va / (Omega_op * MAV.D_prop)
# compute nondimensionalized coefficients of thrust and torque
C_T = MAV.C_T2 * J_op **2 + MAV.C_T1 * J_op + MAV.C_T0
C_Q = MAV.C_Q2 * J_op **2 + MAV.C_Q1 * J_op + MAV.C_Q0
# add thrust and torque due to propeller
n = Omega_op / (2 * np.pi )
fx = MAV.rho * n**2 * np.power(MAV.D_prop, 4) * C_T
Mx = -MAV.rho * n**2 * np.power(MAV.D_prop, 5) * C_Q
return fx,Mx
def sigma(self,alpha):
# pseudo sigmoid functions with cutoff +- alpha_0, returns coef btw 0 and 1
a1 = -MAV.M * (alpha - MAV.alpha0)
a2 = MAV.M * (alpha + MAV.alpha0)
sigma_alpha = (1 + np.exp(a1)+np.exp(a2)) / ((1+np.exp(a1))*(1+np.exp(a2)))
return sigma_alpha
def CL(self,alpha):
CL0 = MAV.C_L_0
CLA = MAV.C_L_alpha
sigma_alpha = self.sigma(alpha)
# returns lift coefficient using eq 4.9
CL_alpha = (1-sigma_alpha)*(CL0 + CLA*alpha) + sigma_alpha*(2*np.sign(alpha)*np.sin(alpha)**2 * np.cos(alpha))
return CL_alpha
def CD(self,alpha):
# returns drag coefficient using eq 4.11
CD_alpha = MAV.C_D_p + (MAV.C_L_0 + MAV.C_L_alpha*alpha)**2/(np.pi*MAV.e*MAV.AR)
return CD_alpha
def Cx(self,alpha):
return -self.CD(alpha)*np.cos(alpha) + self.CL(alpha)*np.sin(alpha)
def Cx_q(self,alpha):
return -MAV.C_D_q*np.cos(alpha) + MAV.C_L_q*np.sin(alpha)
def Cx_deltae(self,alpha):
return -MAV.C_D_delta_e*np.cos(alpha) + MAV.C_L_delta_e*np.sin(alpha)
def Cz(self,alpha):
return -self.CD(alpha)*np.sin(alpha)-self.CL(alpha)*np.cos(alpha)
def Cz_q(self,alpha):
return -MAV.C_D_q*np.sin(alpha)-MAV.C_L_q*np.cos(alpha)
def Cz_deltae(self,alpha):
return -MAV.C_D_delta_e*np.sin(alpha)-MAV.C_L_delta_e*np.cos(alpha)
def _forces_moments(self, delta):
"""
return the forces on the UAV based on the state, wind, and control surfaces
:param delta: np.matrix(delta_e, delta_a, delta_r, delta_t)
:return: Forces and Moments on the UAV np.matrix(Fx, Fy, Fz, Ml, Mn, Mm)
"""
phi, theta, psi = Quaternion2Euler(self._state[6:10])
p = self._state.item(10)
q = self._state.item(11)
r = self._state.item(12)
delta_e = delta.item(0)
delta_a = delta.item(1)
delta_r = delta.item(2)
delta_t = delta.item(3)
# Gravitational Components of Force, Moments = 0
mg = MAV.mass*MAV.gravity
fx_grav = -mg*np.sin(theta)
fy_grav = mg* np.cos(theta) * np.sin(phi)
fz_grav = mg* np.cos(theta) * np.cos(phi)
# Thrust Components of Force and Moments
fx_thrust,Mx_thrust = self.thrust_from_prop(delta_t)
fy_thrust = 0
fz_thrust = 0
My_thrust = 0
Mz_thrust = 0
# Aerodynamic Components of Forces and Moments
b = MAV.b
cyp = MAV.C_Y_p
cyr = MAV.C_Y_r
cydeltaa = MAV.C_Y_delta_a
cydeltar = MAV.C_Y_delta_r
aero_coef = 0.5*MAV.rho*self._Va**2*MAV.S_wing
fx_aero = aero_coef * (self.Cx(self._alpha) + self.Cx_q(self._alpha)*MAV.c/(2*self._Va)*q + self.Cx_deltae(self._alpha)*delta_e)
fy_aero = aero_coef * (MAV.C_Y_0 + MAV.C_Y_beta*self._beta + MAV.C_Y_p*b/(2*self._Va)*p + cyr * b/(2*self._Va)*r + cydeltaa * delta_a + cydeltar* delta_r)
fz_aero = aero_coef * (self.Cz(self._alpha) + self.Cz_q(self._alpha)*MAV.c/(2*self._Va)*q + self.Cz_deltae(self._alpha)*delta_e)
Mx_aero = aero_coef * MAV.b * (MAV.C_ell_0 + MAV.C_ell_beta*self._beta + MAV.C_ell_p*b/(2*self._Va)*p + MAV.C_ell_r*b/(2*self._Va)*r + MAV.C_ell_delta_a*delta_a + MAV.C_ell_delta_r*delta_r)
My_aero = aero_coef * MAV.c * (MAV.C_m_0 + MAV.C_m_alpha*self._alpha + MAV.C_m_q*MAV.c/(2*self._Va)*q + MAV.C_m_delta_e*delta_e)
Mz_aero = aero_coef * MAV.b * (MAV.C_n_0 + MAV.C_n_beta*self._beta + MAV.C_n_p*MAV.b/(2*self._Va)*p + MAV.C_n_r*MAV.b/(2*self._Va)*r + MAV.C_n_delta_a*delta_a + MAV.C_n_delta_r*delta_r)
fx = fx_grav + fx_aero + fx_thrust
fy = fy_grav + fy_aero + fy_thrust
fz = fz_grav + fz_aero + fz_thrust
# print('fx = ',fx)
# print('fy = ',fy)
# print('fz = ',fz)
Mx = Mx_aero + Mx_thrust
My = My_aero + My_thrust
Mz = Mz_aero + Mz_thrust
# print('Mx = ',Mx)
# print('My = ',My)
# print('Mz = ',Mz)
self._forces[0] = fx
self._forces[1] = fy
self._forces[2] = fz
fm = np.reshape(np.array([fx, fy, fz, Mx, My, Mz]),[6,1])
return fm
def _update_true_state(self):
# update the class structure for the true state:
# [pn, pe, h, Va, alpha, beta, phi, theta, chi, p, q, r, Vg, wn, we, psi, gyro_bx, gyro_by, gyro_bz]
phi, theta, psi = Quaternion2Euler(self._state[6:10])
pdot = Quaternion2Rotation(self._state[6:10]) @ self._state[3:6]
self.true_state.pn = self._state.item(0)
self.true_state.pe = self._state.item(1)
self.true_state.h = -self._state.item(2)
self.true_state.Va = self._Va
self.true_state.alpha = self._alpha
self.true_state.beta = self._beta
self.true_state.phi = phi
self.true_state.theta = theta
self.true_state.psi = psi
self.true_state.Vg = np.linalg.norm(pdot)
self.true_state.gamma = np.arcsin(pdot.item(2) / self.true_state.Vg)
self.true_state.chi = np.arctan2(pdot.item(1), pdot.item(0))
self.true_state.p = self._state.item(10)
self.true_state.q = self._state.item(11)
self.true_state.r = self._state.item(12)
self.true_state.wn = self._wind.item(0)
self.true_state.we = self._wind.item(1)
| 41.55298 | 197 | 0.563073 | [
"MIT"
] | donnel2-cooper/drone_control | Lectures/MAV_Dynamics/mav_dynamics.py | 12,549 | Python |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# ==============================================================================
from .kernel import NativeKernel
from ....variables import Variable
from ....variables import PositiveTransformation
from .....util.customop import broadcast_to_w_samples
class Bias(NativeKernel):
"""
Bias kernel, which produces a constant value for every entries of the covariance matrix.
.. math::
k(x,y) = \\sigma^2
:param input_dim: the number of dimensions of the kernel. (The total number of active dimensions).
:type input_dim: int
:param variance: the initial value for the variance parameter.
:type variance: float or MXNet NDArray
:param name: the name of the kernel. The name is used to access kernel parameters.
:type name: str
:param active_dims: The dimensions of the inputs that are taken for the covariance matrix computation.
(default: None, taking all the dimensions).
:type active_dims: [int] or None
:param dtype: the data type for float point numbers.
:type dtype: numpy.float32 or numpy.float64
:param ctx: the mxnet context (default: None/current context).
:type ctx: None or mxnet.cpu or mxnet.gpu
"""
broadcastable = True
def __init__(self, input_dim, variance=1., name='bias', active_dims=None,
dtype=None, ctx=None):
super(Bias, self).__init__(input_dim=input_dim, name=name,
active_dims=active_dims, dtype=dtype,
ctx=ctx)
if not isinstance(variance, Variable):
variance = Variable(shape=(1,),
transformation=PositiveTransformation(),
initial_value=variance)
self.variance = variance
def _compute_K(self, F, X, variance, X2=None):
"""
The internal interface for the actual covariance matrix computation.
:param F: MXNet computation type <mx.sym, mx.nd>.
:param X: the first set of inputs to the kernel.
:type X: MXNet NDArray or MXNet Symbol
:param X2: (optional) the second set of arguments to the kernel. If X2 is None,
this computes a square covariance matrix of X. In other words, X2 is internally treated as X.
:type X2: MXNet NDArray or MXNet Symbol
:param variance: the variance parameter.
:type variance: MXNet NDArray or MXNet Symbol
:return: The covariance matrix.
:rtype: MXNet NDArray or MXNet Symbol
"""
if X2 is None:
X2 = X
return broadcast_to_w_samples(F, variance, X.shape[:-1] +
(X2.shape[-2],))
def _compute_Kdiag(self, F, X, variance):
"""
The internal interface for the actual computation for the diagonal.
:param F: MXNet computation type <mx.sym, mx.nd>.
:param X: the first set of inputs to the kernel.
:type X: MXNet NDArray or MXNet Symbol
:param variance: the variance parameter.
:type variance: MXNet NDArray or MXNet Symbol
:return: The covariance matrix.
:rtype: MXNet NDArray or MXNet Symbol
"""
return broadcast_to_w_samples(F, variance, X.shape[:-1])
class White(NativeKernel):
"""
White kernel, which produces a constant value for the diagonal of the covariance matrix.
.. math::
K = \\sigma^2 I
:param input_dim: the number of dimensions of the kernel. (The total number of active dimensions).
:type input_dim: int
:param variance: the initial value for the variance parameter.
:type variance: float or MXNet NDArray
:param name: the name of the kernel. The name is used to access kernel parameters.
:type name: str
:param active_dims: The dimensions of the inputs that are taken for the covariance matrix computation.
(default: None, taking all the dimensions).
:type active_dims: [int] or None
:param dtype: the data type for float point numbers.
:type dtype: numpy.float32 or numpy.float64
:param ctx: the mxnet context (default: None/current context).
:type ctx: None or mxnet.cpu or mxnet.gpu
"""
broadcastable = True
def __init__(self, input_dim, variance=1., name='white', active_dims=None,
dtype=None, ctx=None):
super(White, self).__init__(input_dim=input_dim, name=name,
active_dims=active_dims, dtype=dtype,
ctx=ctx)
if not isinstance(variance, Variable):
variance = Variable(shape=(1,),
transformation=PositiveTransformation(),
initial_value=variance)
self.variance = variance
def _compute_K(self, F, X, variance, X2=None):
"""
The internal interface for the actual covariance matrix computation.
:param F: MXNet computation type <mx.sym, mx.nd>
:param X: the first set of inputs to the kernel.
:type X: MXNet NDArray or MXNet Symbol
:param X2: (optional) the second set of arguments to the kernel. If X2 is None, this computes a square
covariance matrix of X. In other words, X2 is internally treated as X.
:type X2: MXNet NDArray or MXNet Symbol
:param variance: the variance parameter.
:type variance: MXNet NDArray or MXNet Symbol
:return: The covariance matrix.
:rtype: MXNet NDArray or MXNet Symbol
"""
if X2 is None:
Imat = F.eye(N=X.shape[-2:-1][0],
ctx=self.ctx,
dtype=self.dtype)
Imat = broadcast_to_w_samples(F, Imat, X.shape[:-1] +
X.shape[-2:-1], False)
return Imat * broadcast_to_w_samples(F, variance, X.shape[:-1] +
X.shape[-2:-1])
else:
return F.zeros(shape=X.shape[:-1] + X2.shape[-2:-1], ctx=self.ctx,
dtype=self.dtype)
def _compute_Kdiag(self, F, X, variance):
"""
The internal interface for the actual computation for the diagonal of the covariance matrix.
:param F: MXNet computation type <mx.sym, mx.nd>.
:param X: the first set of inputs to the kernel.
:type X: MXNet NDArray or MXNet Symbol
:param variance: the variance parameter.
:type variance: MXNet NDArray or MXNet Symbol
:return: The covariance matrix.
:rtype: MXNet NDArray or MXNet Symbol
"""
return broadcast_to_w_samples(F, variance, X.shape[:-1])
| 43.878788 | 110 | 0.618785 | [
"Apache-2.0"
] | DerrickGXD/MXFusion | mxfusion/components/distributions/gp/kernels/static.py | 7,240 | Python |
from typing import Any
import tensorflow as tf
from .tf_util import scope_name as get_scope_name
def absolute_scope_name(relative_scope_name):
"""Appends parent scope name to `relative_scope_name`"""
base = get_scope_name()
if len(base) > 0:
base += '/'
return base + relative_scope_name
def _infer_scope_name(self, scope_name):
return scope_name if scope_name is not None else type(self).__name__
infer_rel_scope_name = _infer_scope_name
def infer_abs_scope_name(self, scope_name: str = None):
scope_name = infer_rel_scope_name(self, scope_name)
return absolute_scope_name(scope_name)
class Scope(object):
def __init__(self, scope_name: str, obj: Any = None):
self.rel = self.abs = None
self.setup(scope_name, obj)
def setup(self, scope_name: str, obj: Any = None):
if scope_name is None:
assert obj is not None, 'Must provide either scope_name or a reference object to infer scope_name'
scope_name = type(obj).__name__
self.rel = scope_name
self.abs = absolute_scope_name(self.rel)
def make_unique(self, graph=None):
if graph is None:
graph = tf.get_default_graph()
self.rel = graph.unique_name(self.rel)
self.setup(self.rel)
@property
def exact_rel_pattern(self) -> str:
return self.abs + '/'
@property
def exact_abs_pattern(self) -> str:
return '^' + self.abs + '/'
class UninitializedScope(Scope):
# noinspection PyMissingConstructor
def __init__(self):
pass
def __getattribute__(self, item):
raise AttributeError('The scope is only available after you call super constructor __init__.\n'
'Alternatively, manually setup the scope with self.setup_scope(scope_name)')
| 26.190476 | 101 | 0.747879 | [
"MIT"
] | SandBlox/sandblox | sandblox/util/scope.py | 1,650 | Python |
n = input("Enter your name: ")
l = len(n)
print("The name enther is ", n, "and its length is ", l)
| 24.75 | 56 | 0.606061 | [
"Unlicense"
] | GalliWare/UNISA-studies | INF1511/Chapter3/string1.py | 99 | Python |
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class TextBotFlowLaunchResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
TextBotFlowLaunchResponse - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'str'
}
self.attribute_map = {
'id': 'id'
}
self._id = None
@property
def id(self):
"""
Gets the id of this TextBotFlowLaunchResponse.
The session ID of the bot flow, used to send to subsequent turn requests
:return: The id of this TextBotFlowLaunchResponse.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this TextBotFlowLaunchResponse.
The session ID of the bot flow, used to send to subsequent turn requests
:param id: The id of this TextBotFlowLaunchResponse.
:type: str
"""
self._id = id
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 27.816794 | 80 | 0.568332 | [
"MIT"
] | MyPureCloud/platform-client-sdk-python | build/PureCloudPlatformClientV2/models/text_bot_flow_launch_response.py | 3,644 | Python |
# Generated by Django 3.1.2 on 2020-10-29 20:54
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Quote',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('author', models.CharField(max_length=512)),
],
),
migrations.CreateModel(
name='ScrapyItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('unique_id', models.CharField(max_length=100, null=True)),
('data', models.TextField()),
('date', models.DateTimeField(default=django.utils.timezone.now)),
],
),
]
| 30.060606 | 114 | 0.561492 | [
"Apache-2.0"
] | alinbal/enterspeedcrawler | crawler/migrations/0001_initial.py | 992 | Python |
from django.db import models
from django.utils.translation import gettext_lazy as _
from oscar.apps.offer.abstract_models import AbstractConditionalOffer, AbstractBenefit
from oscar.core.loading import get_class
class ConditionalOffer(AbstractConditionalOffer):
SITE, FLASH_SALE, VOUCHER, USER, SESSION = "Site", "Flash Sale", "Voucher", "User", "Session"
TYPE_CHOICES = (
(SITE, _("Site offer - available to all users")),
(FLASH_SALE, _("Flash Sale offer - short-term discount for the specific product")),
(VOUCHER, _("Voucher offer - only available after entering the appropriate voucher code")),
(USER, _("User offer - available to certain types of user")),
(SESSION, _("Session offer - temporary offer, available for a user for the duration of their session")),
)
offer_type = models.CharField(_("Type"), choices=TYPE_CHOICES, default=SITE, max_length=128)
class Benefit(AbstractBenefit):
PERCENTAGE, FIXED, MULTIBUY, FIXED_PRICE, FIXED_PER_PRODUCT = (
"Percentage", "Absolute", "Multibuy", "Fixed price", "Fixed per product")
SHIPPING_PERCENTAGE, SHIPPING_ABSOLUTE, SHIPPING_FIXED_PRICE = (
'Shipping percentage', 'Shipping absolute', 'Shipping fixed price')
TYPE_CHOICES = (
(PERCENTAGE, _("Discount is a percentage off of the product's value")),
(FIXED, _("Discount is a fixed amount off of the product's value")),
(FIXED_PER_PRODUCT, _("Discount is a fixed amount off of each product's value that match condition")),
(MULTIBUY, _("Discount is to give the cheapest product for free")),
(FIXED_PRICE,
_("Get the products that meet the condition for a fixed price")),
(SHIPPING_ABSOLUTE,
_("Discount is a fixed amount of the shipping cost")),
(SHIPPING_FIXED_PRICE, _("Get shipping for a fixed price")),
(SHIPPING_PERCENTAGE, _("Discount is a percentage off of the shipping"
" cost")),
)
type = models.CharField(_("Type"), max_length=128, choices=TYPE_CHOICES, blank=True)
def apply_to_product(self, price):
if self.type in [self.PERCENTAGE, self.FIXED_PRICE, self.FIXED_PER_PRODUCT]:
return self.proxy().apply_to_product(price)
@property
def proxy_map(self):
custom_proxy_map = super().proxy_map
custom_proxy_map[self.PERCENTAGE] = get_class('offer.benefits', 'CustomPercentageDiscountBenefit')
custom_proxy_map[self.FIXED_PRICE] = get_class('offer.benefits', 'CustomFixedPriceBenefit')
custom_proxy_map[self.FIXED_PER_PRODUCT] = get_class(
'offer.benefits', 'CustomAbsoluteDiscountPerProductBenefit'
)
return custom_proxy_map
from oscar.apps.offer.models import * # noqa isort:skip
from .benefits import * # noqa isort:skip
| 48.982759 | 112 | 0.693066 | [
"BSD-3-Clause"
] | Bastilla123/shop2 | sandbox/offer/models.py | 2,841 | Python |
"""Metadata State Manager."""
import asyncio
import logging
from typing import Dict, List, Optional, Set, Tuple, Type
from pydantic import ValidationError
from astoria.common.components import StateManager
from astoria.common.disks import DiskInfo, DiskType, DiskUUID
from astoria.common.ipc import (
MetadataManagerMessage,
MetadataSetManagerRequest,
RequestResponse,
)
from astoria.common.metadata import Metadata
from astoria.common.mixins.disk_handler import DiskHandlerMixin
from .metadata_cache import MetadataCache
from .metadata_disk_lifecycle import (
AbstractMetadataDiskLifecycle,
MetadataDiskLifecycle,
UsercodeDiskLifecycle,
)
LOGGER = logging.getLogger(__name__)
class MetadataManager(DiskHandlerMixin, StateManager[MetadataManagerMessage]):
"""Astoria Metadata State Manager."""
name = "astmetad"
dependencies = ["astdiskd"]
DISK_TYPE_LIFECYCLE_MAP: Dict[DiskType, Type[AbstractMetadataDiskLifecycle]] = {
DiskType.USERCODE: UsercodeDiskLifecycle,
DiskType.METADATA: MetadataDiskLifecycle,
}
DISK_TYPE_OVERRIDE_MAP: Dict[DiskType, Set[str]] = {
DiskType.USERCODE: {
"usercode_entrypoint", "wifi_ssid",
"wifi_psk", "wifi_region", "wifi_enabled",
},
DiskType.METADATA: {
"arena", "zone", "mode", "marker_offset", "game_timeout", "wifi_enabled",
},
}
MUTABLE_ATTRS_BY_REQUEST: Set[str] = {"arena", "zone", "mode"}
CACHED_ATTRS: Set[str] = {"wifi_ssid", "wifi_psk", "wifi_region"}
def _init(self) -> None:
self._lifecycles: Dict[DiskType, Optional[AbstractMetadataDiskLifecycle]] = {
disk_type: None
for disk_type in self.DISK_TYPE_LIFECYCLE_MAP
}
self._cache = MetadataCache(
self.CACHED_ATTRS,
cache_path=self.config.system.cache_dir / "astmetad-metadata.json",
)
self._cur_disks: Dict[DiskUUID, DiskInfo] = {}
self._mqtt.subscribe("astdiskd", self.handle_astdiskd_disk_info_message)
self._requested_data: Dict[str, str] = {}
self._register_request(
"mutate",
MetadataSetManagerRequest,
self.handle_mutation_request,
)
@property
def offline_status(self) -> MetadataManagerMessage:
"""
Status to publish when the manager goes offline.
This status should ensure that any other components relying
on this data go into a safe state.
"""
return MetadataManagerMessage(
status=MetadataManagerMessage.Status.STOPPED,
metadata=Metadata.init(self.config),
)
async def main(self) -> None:
"""Main routine for astmetad."""
self.update_status()
# Wait whilst the program is running.
await self.wait_loop()
for uuid, info in self._cur_disks.items():
asyncio.ensure_future(self.handle_disk_removal(uuid, info))
async def handle_disk_insertion(self, uuid: DiskUUID, disk_info: DiskInfo) -> None:
"""Handle a disk insertion."""
LOGGER.debug(f"Disk inserted: {uuid} ({disk_info.disk_type})")
for disk_type, lifecycle_class in self.DISK_TYPE_LIFECYCLE_MAP.items():
if disk_info.disk_type is disk_type:
LOGGER.info(
f"{disk_type.name} disk {uuid} is mounted"
f" at {disk_info.mount_path}",
)
if self._lifecycles[disk_type] is None:
LOGGER.debug(f"Starting lifecycle for {uuid}")
self._lifecycles[disk_type] = lifecycle_class(
uuid,
disk_info,
self.config,
)
self.update_status()
else:
LOGGER.warn(
"Cannot use metadata, there is already a lifecycle present.",
)
async def handle_disk_removal(self, uuid: DiskUUID, disk_info: DiskInfo) -> None:
"""Handle a disk removal."""
LOGGER.debug(f"Disk removed: {uuid} ({disk_info.disk_type})")
for disk_type, lifecycle_class in self.DISK_TYPE_LIFECYCLE_MAP.items():
if disk_info.disk_type is disk_type:
LOGGER.info(f"Metadata disk {uuid} removed ({disk_info.mount_path})")
lifecycle = self._lifecycles[disk_type]
if lifecycle is not None and lifecycle._uuid == disk_info.uuid:
self._lifecycles[disk_type] = None
self.update_status()
async def handle_mutation_request(
self,
request: MetadataSetManagerRequest,
) -> RequestResponse:
"""Handle a request to mutate metadata."""
if request.attr not in self.MUTABLE_ATTRS_BY_REQUEST:
return RequestResponse(
uuid=request.uuid,
success=False,
reason=f"{request.attr} is not a mutable attribute",
)
if len(request.value) == 0:
# Stop mutating the attr if it is empty.
try:
del self._requested_data[request.attr]
LOGGER.info(f"{request.attr} override has been removed by request")
self.update_status()
except KeyError:
pass
else:
# Store the old value, just in case we need to set it back.
if request.attr in self._requested_data:
old_value = self._requested_data[request.attr]
else:
old_value = None
# Attempt to update the data, reset it if it is invalid.
try:
self._requested_data[request.attr] = request.value
self.update_status()
LOGGER.info(
f"{request.attr} has been overridden to {request.value} by request",
)
except ValidationError as e:
# Set the requested data back to the old value
if old_value is not None:
self._requested_data[request.attr] = old_value
LOGGER.warning(f"Unable to set {request.attr} to {request.value}.")
LOGGER.warning(str(e))
return RequestResponse(
uuid=request.uuid,
success=False,
reason=f"{request.value} is not a valid value for {request.attr}",
)
return RequestResponse(
uuid=request.uuid,
success=True,
)
def get_current_metadata(self) -> Metadata:
"""
Calculate the current metadata.
Takes the default, static metadata based on the config and system
information. It then overlays data from other sources in a priority order,
whereby each source has a set of permitted attributes in the metadata that
can be overridden.
"""
# Metadata sources in priority order.
metadata_sources: List[Tuple[Set[str], Dict[str, str]]] = [
(self.CACHED_ATTRS, self._cache.data),
(self.MUTABLE_ATTRS_BY_REQUEST, self._requested_data),
]
for disk_type, val in self._lifecycles.items():
if val is not None:
# Add disk-based metadata source if it is present.
metadata_sources.append(
(
self.DISK_TYPE_OVERRIDE_MAP[disk_type],
val.diff_data,
),
)
metadata = Metadata.init(self.config)
for permitted_attrs, diff_data in metadata_sources:
for k, v in diff_data.items():
if k in permitted_attrs:
metadata.__setattr__(k, v)
else:
LOGGER.warning(
f"There was an attempt to mutate {k}, but it was not permitted.",
)
# Update the cache with the new values.
for key in self.CACHED_ATTRS:
self._cache.update_cached_attr(key, metadata.__getattribute__(key))
return metadata
def update_status(self) -> None:
"""Update the status of the manager."""
self.status = MetadataManagerMessage(
status=MetadataManagerMessage.Status.RUNNING,
metadata=self.get_current_metadata(),
)
| 37.070175 | 89 | 0.591103 | [
"MIT"
] | trickeydan/astoria | astoria/astmetad/metadata_manager.py | 8,452 | Python |
from temboo.Library.Utilities.Authentication.OAuth2.FinalizeOAuth import FinalizeOAuth, FinalizeOAuthInputSet, FinalizeOAuthResultSet, FinalizeOAuthChoreographyExecution
from temboo.Library.Utilities.Authentication.OAuth2.InitializeOAuth import InitializeOAuth, InitializeOAuthInputSet, InitializeOAuthResultSet, InitializeOAuthChoreographyExecution
from temboo.Library.Utilities.Authentication.OAuth2.RefreshToken import RefreshToken, RefreshTokenInputSet, RefreshTokenResultSet, RefreshTokenChoreographyExecution
| 128.75 | 179 | 0.912621 | [
"Apache-2.0"
] | lupyuen/RaspberryPiImage | home/pi/GrovePi/Software/Python/others/temboo/Library/Utilities/Authentication/OAuth2/__init__.py | 515 | Python |
#
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2021 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import logging
from logging import Handler
from .Qt import QtCore
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
class LogHandler(Handler):
# Class Emitter is added to keep compatibility with PySide2
# 1. Signal needs to be class attribute of a QObject subclass
# 2. logging Handler emit method clashes with QObject emit method
# 3. As a consequence, the LogHandler cannot inherit both from
# Handler and QObject
# 4. A new utility class Emitter subclass of QObject is
# introduced to handle record Signal and workaround the problem
class Emitter(QtCore.QObject):
record = QtCore.QSignal(object)
def __init__(self):
super().__init__()
self.emitter = self.Emitter()
def connect(self, *args, **kwargs):
return self.emitter.record.connect(*args, **kwargs)
def emit(self, record):
self.emitter.record.emit(self.format(record))
| 38.127273 | 79 | 0.745351 | [
"MIT"
] | Bruyant/pymeasure | pymeasure/display/log.py | 2,097 | Python |
from django import forms
from sme_uniforme_apps.proponentes.models import Anexo
class AnexoForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(AnexoForm, self).__init__(*args, **kwargs)
self.fields['tipo_documento'].required = True
class Meta:
model = Anexo
fields = '__all__' | 22.4 | 56 | 0.675595 | [
"MIT"
] | prefeiturasp/SME-PortalUniforme-BackEnd | sme_uniforme_apps/proponentes/models/forms.py | 336 | Python |
import re
import setuptools
import setuptools.command.develop
import setuptools.command.install
import subprocess
import sys
try:
result = subprocess.run(
[sys.executable, "-m", "pip", "show", "pkg_utils"],
check=True, capture_output=True)
match = re.search(r'\nVersion: (.*?)\n', result.stdout.decode(), re.DOTALL)
assert match and tuple(match.group(1).split('.')) >= ('0', '0', '5')
except (subprocess.CalledProcessError, AssertionError):
subprocess.run(
[sys.executable, "-m", "pip", "install", "-U", "pkg_utils"],
check=True)
import os
import pkg_utils
name = 'biosimulators_ginsim'
dirname = os.path.dirname(__file__)
# get package metadata
md = pkg_utils.get_package_metadata(dirname, name)
# install package
setuptools.setup(
name=name,
version=md.version,
description=("BioSimulators-compliant command-line interface to "
"the GINsim simulation program."),
long_description=md.long_description,
url="https://github.com/biosimulators/Biosimulators_GINsim",
download_url="https://github.com/biosimulators/Biosimulators_GINsim",
author='BioSimulators Team',
author_email="[email protected]",
license="MIT",
keywords=[
'systems biology',
'computational biology',
'logical model',
'numerical simulation',
'BioSimulators',
'SBML',
'SED-ML',
'COMBINE',
'OMEX',
'GINsim',
],
packages=setuptools.find_packages(exclude=['tests', 'tests.*']),
install_requires=md.install_requires,
extras_require=md.extras_require,
tests_require=md.tests_require,
dependency_links=md.dependency_links,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Topic :: Scientific/Engineering :: Bio-Informatics',
],
entry_points={
'console_scripts': [
'biosimulators-ginsim = biosimulators_ginsim.__main__:main',
],
},
)
| 30.441176 | 79 | 0.653623 | [
"MIT"
] | biosimulators/Biosimulators_GINsim | setup.py | 2,070 | Python |
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
# DO SOME OS-WISE PATCHES
import datetime as _datetime
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import types as _types
from google.protobuf.internal import api_implementation as _api_implementation
if _api_implementation._default_implementation_type != 'cpp':
import warnings as _warnings
_warnings.warn(
'''
You are using Python protobuf backend, not the C++ version, which is much faster.
This is often due to C++ implementation failed to compile while installing Protobuf
- You are using in Python 3.9 (https://github.com/jina-ai/jina/issues/1801)
- You are using on architecture other than x86_64/armv6/armv7
- You installation is broken, try `pip install --force protobuf`
- You have C++ backend but you shut it down, try `export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp`
''',
RuntimeWarning,
)
if _sys.version_info < (3, 7, 0) or _sys.version_info >= (3, 10, 0):
raise OSError(f'Jina requires Python 3.7/3.8/3.9, but yours is {_sys.version_info}')
if _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
_set_start_method('fork')
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# Underscore variables shared globally
__copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '1.0.10'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.0.78'
__uptime__ = _datetime.datetime.now().isoformat()
# update on MacOS
# 1. clean this tuple,
# 2. grep -rohEI --exclude-dir=jina/hub --exclude-dir=tests --include \*.py "\'JINA_.*?\'" jina | sort -u | sed "s/$/,/g"
# 3. copy all lines EXCEPT the first (which is the grep command in the last line)
__jina_env__ = (
'JINA_ARRAY_QUANT',
'JINA_BINARY_DELIMITER',
'JINA_CONTRIB_MODULE',
'JINA_CONTRIB_MODULE_IS_LOADING',
'JINA_CONTROL_PORT',
'JINA_DEFAULT_HOST',
'JINA_DISABLE_UVLOOP',
'JINA_EXECUTOR_WORKDIR',
'JINA_FULL_CLI',
'JINA_IPC_SOCK_TMP',
'JINA_LOG_CONFIG',
'JINA_LOG_ID',
'JINA_LOG_LEVEL',
'JINA_LOG_NO_COLOR',
'JINA_LOG_WORKSPACE',
'JINA_POD_NAME',
'JINA_RAISE_ERROR_EARLY',
'JINA_RANDOM_PORTS',
'JINA_RANDOM_PORT_MAX',
'JINA_RANDOM_PORT_MIN',
'JINA_SOCKET_HWM',
'JINA_VCS_VERSION',
'JINA_WARN_UNNAMED',
'JINA_WORKSPACE',
)
__default_host__ = _os.environ.get('JINA_DEFAULT_HOST', '0.0.0.0')
__ready_msg__ = 'ready and listening'
__stop_msg__ = 'terminated'
__binary_delimiter__ = _os.environ.get(
'JINA_BINARY_DELIMITER', '460841a0a8a430ae25d9ad7c1f048c57'
).encode()
__root_dir__ = _os.path.dirname(_os.path.abspath(__file__))
_names_with_underscore = [
'__version__',
'__copyright__',
'__license__',
'__proto_version__',
'__default_host__',
'__ready_msg__',
'__stop_msg__',
'__binary_delimiter__',
'__jina_env__',
'__uptime__',
'__root_dir__',
]
# Primitive data type,
# note, they must be loaded BEFORE all executors/drivers/... to avoid cyclic imports
from jina.types.ndarray.generic import NdArray
from jina.types.request import Request, Response
from jina.types.message import Message
from jina.types.querylang import QueryLang
from jina.types.document import Document
from jina.types.document.multimodal import MultimodalDocument
from jina.types.sets import DocumentSet, QueryLangSet
# ADD GLOBAL NAMESPACE VARIABLES
JINA_GLOBAL = _types.SimpleNamespace()
import jina.importer as _ji
# driver first, as executor may contain driver
_ji.import_classes('jina.drivers', show_import_table=False, import_once=True)
_ji.import_classes('jina.executors', show_import_table=False, import_once=True)
_ji.import_classes('jina.hub', show_import_table=False, import_once=True)
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
def _set_nofile(nofile_atleast=4096):
"""
sets nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
"""
try:
import resource as res
except ImportError: # Windows
res = None
from .logging import default_logger
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
default_logger.debug(f'setting soft & hard ulimit -n {soft} {hard}')
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
default_logger.warning(
f'trouble with max limit, retrying with soft,hard {soft},{hard}'
)
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
default_logger.warning('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
default_logger.debug(f'ulimit -n soft,hard: {soft} {hard}')
return soft, hard
_set_nofile()
# Flow
from jina.flow import Flow
from jina.flow.asyncio import AsyncFlow
# Client
from jina.clients import Client
from jina.clients.asyncio import AsyncClient
# Executor
from jina.executors import BaseExecutor as Executor
from jina.executors.classifiers import BaseClassifier as Classifier
from jina.executors.crafters import BaseCrafter as Crafter
from jina.executors.encoders import BaseEncoder as Encoder
from jina.executors.evaluators import BaseEvaluator as Evaluator
from jina.executors.indexers import BaseIndexer as Indexer
from jina.executors.rankers import BaseRanker as Ranker
from jina.executors.segmenters import BaseSegmenter as Segmenter
__all__ = [_s for _s in dir() if not _s.startswith('_')]
__all__.extend([_s for _s in _names_with_underscore])
| 32.363636 | 122 | 0.724571 | [
"Apache-2.0"
] | bsherifi/jina | jina/__init__.py | 6,764 | Python |
import unittest
from CsvReader import CsvReader
from Calculator import MyCalculator
class MyTestCase(unittest.TestCase):
def setUp(self) -> None:
self.calculator = MyCalculator()
def test_instantiate_calculator(self):
self.assertIsInstance(self.calculator, MyCalculator)
def test_addition(self):
test_data = CsvReader('src/csv/TestAddition.csv').data
for row in test_data:
result = float(row['Result'])
self.assertEqual(self.calculator.add(row['Value 1'], row['Value 2']), result)
self.assertEqual(self.calculator.result, int(row['Result']))
def test_subtraction(self):
test_data = CsvReader('src/csv/TestSubtraction.csv').data
for row in test_data:
result = float(row['Result'])
self.assertEqual(self.calculator.subtract(row['Value 1'], row['Value 2']), result)
self.assertEqual(self.calculator.result, int(row['Result']))
def test_multiplication(self):
test_data = CsvReader('src/csv/TestMultiplication.csv').data
for row in test_data:
result = float(row['Result'])
self.assertEqual(self.calculator.multiply(row['Value 1'], row['Value 2']), result)
self.assertEqual(self.calculator.result, int(row['Result']))
def test_division(self):
test_data = CsvReader('src/csv/TestDivision.csv').data
for row in test_data:
result = float(row['Result'])
self.assertAlmostEqual(self.calculator.divide(row['Value 1'], row['Value 2']), result)
self.assertAlmostEqual(self.calculator.result, float(row['Result']))
def test_square(self):
test_data = CsvReader('src/csv/TestSquare.csv').data
for row in test_data:
result = float(row['Result'])
self.assertEqual(self.calculator.square(row['Value 1']), result)
self.assertEqual(self.calculator.result, int(row['Result']))
def test_square_root(self):
test_data = CsvReader('src/csv/TestSquareRoot.csv').data
for row in test_data:
result = float(row['Result'])
self.assertAlmostEqual(self.calculator.square_root(row['Value 1']), result)
self.assertAlmostEqual(self.calculator.result, float(row['Result']))
if __name__ == '__main__':
unittest.main() | 40.586207 | 98 | 0.649108 | [
"MIT"
] | jimishapatel/Calculator | src/CalculatorTest.py | 2,354 | Python |
from niaaml.classifiers.classifier import Classifier
from niaaml.utilities import MinMax
from niaaml.utilities import ParameterDefinition
from sklearn.ensemble import RandomForestClassifier as RF
import numpy as np
import warnings
from sklearn.exceptions import ChangedBehaviorWarning, ConvergenceWarning, DataConversionWarning, DataDimensionalityWarning, EfficiencyWarning, FitFailedWarning, NonBLASDotWarning, UndefinedMetricWarning
__all__ = ['RandomForest']
class RandomForest(Classifier):
r"""Implementation of random forest classifier.
Date:
2020
Author:
Luka Pečnik
License:
MIT
Reference:
Breiman, “Random Forests”, Machine Learning, 45(1), 5-32, 2001.
Documentation:
https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html
See Also:
* :class:`niaaml.classifiers.Classifier`
"""
Name = 'Random Forest Classifier'
def __init__(self, **kwargs):
r"""Initialize RandomForestClassifier instance.
"""
warnings.filterwarnings(action='ignore', category=ChangedBehaviorWarning)
warnings.filterwarnings(action='ignore', category=ConvergenceWarning)
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
warnings.filterwarnings(action='ignore', category=DataDimensionalityWarning)
warnings.filterwarnings(action='ignore', category=EfficiencyWarning)
warnings.filterwarnings(action='ignore', category=FitFailedWarning)
warnings.filterwarnings(action='ignore', category=NonBLASDotWarning)
warnings.filterwarnings(action='ignore', category=UndefinedMetricWarning)
self._params = dict(
n_estimators = ParameterDefinition(MinMax(min=10, max=111), np.uint)
)
self.__random_forest_classifier = RF()
def set_parameters(self, **kwargs):
r"""Set the parameters/arguments of the algorithm.
"""
self.__random_forest_classifier.set_params(**kwargs)
def fit(self, x, y, **kwargs):
r"""Fit RandomForestClassifier.
Arguments:
x (pandas.core.frame.DataFrame): n samples to classify.
y (pandas.core.series.Series): n classes of the samples in the x array.
Returns:
None
"""
self.__random_forest_classifier.fit(x, y)
def predict(self, x, **kwargs):
r"""Predict class for each sample (row) in x.
Arguments:
x (pandas.core.frame.DataFrame): n samples to classify.
Returns:
pandas.core.series.Series: n predicted classes.
"""
return self.__random_forest_classifier.predict(x)
def to_string(self):
r"""User friendly representation of the object.
Returns:
str: User friendly representation of the object.
"""
return Classifier.to_string(self).format(name=self.Name, args=self._parameters_to_string(self.__random_forest_classifier.get_params())) | 35.290698 | 203 | 0.691269 | [
"MIT"
] | adi3/NiaAML | niaaml/classifiers/random_forest.py | 3,040 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class PrivateEndpointConnectionsOperations(object):
"""PrivateEndpointConnectionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.iothub.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> List["_models.PrivateEndpointConnection"]
"""List private endpoint connections.
List private endpoint connection properties.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of PrivateEndpointConnection, or the result of cls(response)
:rtype: list[~azure.mgmt.iothub.models.PrivateEndpointConnection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.PrivateEndpointConnection"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[PrivateEndpointConnection]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/iotHubs/{resourceName}/privateEndpointConnections'} # type: ignore
def get(
self,
resource_group_name, # type: str
resource_name, # type: str
private_endpoint_connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.PrivateEndpointConnection"
"""Get private endpoint connection.
Get private endpoint connection properties.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.models.PrivateEndpointConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/iotHubs/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
def _update_initial(
self,
resource_group_name, # type: str
resource_name, # type: str
private_endpoint_connection_name, # type: str
private_endpoint_connection, # type: "_models.PrivateEndpointConnection"
**kwargs # type: Any
):
# type: (...) -> "_models.PrivateEndpointConnection"
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(private_endpoint_connection, 'PrivateEndpointConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/iotHubs/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
def begin_update(
self,
resource_group_name, # type: str
resource_name, # type: str
private_endpoint_connection_name, # type: str
private_endpoint_connection, # type: "_models.PrivateEndpointConnection"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.PrivateEndpointConnection"]
"""Update private endpoint connection.
Update the status of a private endpoint connection with the specified name.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection.
:type private_endpoint_connection_name: str
:param private_endpoint_connection: The private endpoint connection with updated properties.
:type private_endpoint_connection: ~azure.mgmt.iothub.models.PrivateEndpointConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.iothub.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
private_endpoint_connection=private_endpoint_connection,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/iotHubs/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
resource_name, # type: str
private_endpoint_connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.PrivateEndpointConnection"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.PrivateEndpointConnection"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/iotHubs/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
resource_name, # type: str
private_endpoint_connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.PrivateEndpointConnection"]
"""Delete private endpoint connection.
Delete private endpoint connection with the specified name.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.iothub.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/iotHubs/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
| 51.14094 | 234 | 0.684821 | [
"MIT"
] | 4thel00z/microsoft-crap-that-doesnt-work | sdk/iothub/azure-mgmt-iothub/azure/mgmt/iothub/v2020_03_01/operations/_private_endpoint_connections_operations.py | 22,860 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/8/25 22:42
# @Author : Tom.lee
# @Site :
# @File : mysql_lock.py
# @Software: PyCharm
"""
通过MySQL实现分布式锁服务
"""
import MySQLdb
import logging
import time
FORMAT_STR = '%(asctime)s -%(module)s:%(filename)s-L%(lineno)d-%(levelname)s: %(message)s'
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(FORMAT_STR)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
logging.info("Current log level is : %s", logging.getLevelName(logger.getEffectiveLevel()))
class MySqlLock(object):
LOCK_SQL = "SELECT get_lock('{key}', {timeout}) FROM dual"
UNLOCK_SQL = "SELECT release_lock('{key}') FROM dual"
def __init__(self, lock_key=None, *args, **kwargs):
"""
:param lock_key:
:param args: 参数与MySQLdb初始化参数一致.
:param kwargs: 参数与MySQLdb初始化参数一致.
host='localhost'
user='test'
passwd='test'
db='test'
"""
self.__db = MySQLdb.connect(*args, **kwargs)
self.lock_key = lock_key or '7ab18906739e4662ac01e69f5ebb7352'
def _execute(self, sql):
"""
MySQL数据库操作
:param sql:
:return: (1L,) --> tuple
"""
res = (-1,)
cursor = self.__db.cursor()
try:
cursor.execute(sql)
if cursor.rowcount != 1:
logging.error("Multiple rows returned in mysql lock function.")
else:
res = cursor.fetchone()
except Exception, ex:
logging.error("执行SQL\"%s\" 失败! 异常信息: %s", sql, str(ex))
finally:
cursor.close()
return res
def lock(self, timeout):
"""
MySQL数据库加锁
:param timeout: 超时时间
:return:
"""
# 加锁操作
lk = self._execute(self.LOCK_SQL.format(key=self.lock_key, timeout=timeout))
if lk[0] == 0:
logging.debug("锁'%s'已经被创建.", self.lock_key)
return False
elif lk[0] == 1:
logging.debug("创建锁'%s'." % self.lock_key)
return True
else:
logging.error("获取锁失败!")
return None
def unlock(self):
"""
释放MySQL锁.
:return:
"""
# 释放操作
uk = self._execute(self.UNLOCK_SQL.format(key=self.lock_key))
if uk[0] == 0:
logging.debug("释放锁'%s'失败(该锁被其他进程持有)" % self.lock_key)
return False
elif uk[0] == 1:
logging.debug("释放锁'%s'." % self.lock_key)
return True
else:
logging.error("锁'%s'不存在." % self.lock_key)
return None
if __name__ == "__main__":
l = MySqlLock(host='localhost', user='root', passwd='root', db='iaasms')
ret = l.lock(15)
if not ret:
logging.error("获取锁失败,退出!")
quit()
time.sleep(15) # 模拟跨进程的同步操作!
# raise Exception('模拟操作异常,mysql会自动释放该进程持有的锁.')
# TODO something
print 'hello ok!'
l.unlock()
| 26.808696 | 91 | 0.543951 | [
"MIT"
] | 2581676612/python | contributed_modules/mysql/mysqldb_/mysql_lock.py | 3,349 | Python |
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..arithmetic import AddScalarVolumes
def test_AddScalarVolumes_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
inputVolume1=dict(argstr='%s',
position=-3,
),
inputVolume2=dict(argstr='%s',
position=-2,
),
order=dict(argstr='--order %s',
),
outputVolume=dict(argstr='%s',
hash_files=False,
position=-1,
),
terminal_output=dict(nohash=True,
),
)
inputs = AddScalarVolumes.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_AddScalarVolumes_outputs():
output_map = dict(outputVolume=dict(position=-1,
),
)
outputs = AddScalarVolumes.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| 25.695652 | 67 | 0.6489 | [
"Apache-2.0"
] | lighthall-lab/nipype-legacy | nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py | 1,182 | Python |
import dfstools as dt
import sys
if __name__ == "__main__":
print(sys.version)
print(sys.executable)
print("---pecanCookies Demo---")
print("Loading into dataframe from csv...", '\n')
data_list = dt.load_csv_to_df()
print("identifing relationships by column content....", '\n')
relationship_list = dt.find_related_cols_by_content(data_list)
print("printing relationships....", '\n')
print(relationship_list) | 26.352941 | 66 | 0.683036 | [
"Apache-2.0"
] | Jsostmann/comp410_summer2020 | peacn_cookies_sprint_one.py | 448 | Python |
#coding:utf-8
# Chainer version 3.2 (use version 3.x)
#
# This is based on <https://raw.githubusercontent.com/chainer/chainer/v3/examples/mnist/train_mnist.py>
#
# This used mean_absolute_error as loss function.
# Check version
# Python 3.6.4 on win32 (Windows 10)
# Chainer 3.2.0
# numpy 1.14.0
# matplotlib 2.1.1
from __future__ import print_function
import argparse
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import training, cuda
from chainer.training import extensions
from chainer.functions.loss.mean_squared_error import mean_squared_error
from chainer.functions.loss.mean_absolute_error import mean_absolute_error
from TM_dataset import *
from plot_report_logscale import *
class MLP(chainer.Chain):
def __init__(self, n_units, n_out):
super(MLP, self).__init__()
with self.init_scope():
# the size of the inputs to each layer will be inferred
self.l1 = L.Linear(None, n_units) # n_in -> n_units
self.l2 = L.Linear(None, n_units) # n_units -> n_units
self.l3 = L.Linear(None, n_units) # n_units -> n_units
self.l4 = L.Linear(None, n_units) # n_units -> n_units
self.l5 = L.Linear(None, n_out) # n_units -> n_out
# set random seed as fix value, avoid different result every time
np.random.seed(100)
def __call__(self, x):
h1 = F.relu(self.l1(x)) #F.sigmoid(self.l1(x))
h2 = F.relu(self.l2(h1)) #F.sigmoid(self.l2(h1))
h3 = F.relu(self.l3(h2)) #F.sigmoid(self.l3(h2))
h4 = F.relu(self.l4(h3)) #F.sigmoid(self.l4(h3))
return self.l5(h4)
IN_CHANNELS =1 # input MONOCOLOR
OUT_CHANNELS= 100 # Middle layer channels
class CNN(chainer.Chain):
# INPUT -> ((CONV -> RELU) -> POOL) ->((CONV -> RELU) -> POOL) -> ->((CONV -> RELU) -> POOL) -> FC
def __init__(self, n_units, n_out, in_channels=IN_CHANNELS, out_channels=OUT_CHANNELS):
super(CNN, self).__init__()
with self.init_scope():
self.conv1=L.Convolution2D(in_channels, out_channels, (3,1) , pad=0)
self.conv2=L.Convolution2D(out_channels, out_channels, (3,1) , pad=0)
self.conv3=L.Convolution2D(out_channels, out_channels, (3,1) , pad=0)
self.l1=L.Linear( None, n_out)
# set random seed as fix value, avoid different result every time
np.random.seed(100)
def __call__(self, x):
h1 = F.relu(self.conv1(x))
h2 = F.max_pooling_2d( h1, (2,1) )
h3 = F.relu(self.conv2(h2))
h4 = F.max_pooling_2d( h3, (2,1) )
h5 = F.relu(self.conv3(h4))
h6 = F.max_pooling_2d( h5, (2,1) )
y = self.l1(h6)
return y
def main():
parser = argparse.ArgumentParser(description='estimation from formant to vocal tube model parameter')
parser.add_argument('--batchsize', '-b', type=int, default=100,
help='Number of images in each mini-batch')
parser.add_argument('--epoch', '-e', type=int, default=650,
help='Number of sweeps over the dataset to train')
parser.add_argument('--frequency', '-f', type=int, default=-1,
help='Frequency of taking a snapshot')
parser.add_argument('--gpu', '-g', type=int, default=-1,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--out', '-o', default='result',
help='Prefix Directory Name to output the result')
parser.add_argument('--resume', '-r', default='',
help='Resume the training from snapshot')
parser.add_argument('--unit', '-u', type=int, default=100,
help='Number of units')
parser.add_argument('--noplot', dest='plot', action='store_false',
help='Disable PlotReport extension')
parser.add_argument('--delta', '-d', type=float, default=0.5,
help='delta for length and area: for train')
parser.add_argument('--delta_for_test', '-t', type=float, default=1.5,
help='delta for length and area: for test')
parser.add_argument('--model_type', '-m', default='MLP',
help='choice MLP or CNN')
args = parser.parse_args()
print('GPU: {}'.format(args.gpu))
print('# unit: {}'.format(args.unit))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# epoch: {}'.format(args.epoch))
# check model type
if args.model_type == 'CNN':
CNN_flag=True
print('# CNN')
else:
CNN_flag=False
print('# MLP')
# Load dataset
train = TM_DatsSet(args.delta, args.delta, CNN_flag)
test = TM_DatsSet(args.delta_for_test, args.delta_for_test, CNN_flag)
n_out= train.n_out
out_dir= args.out + train.suffix_list
print('# result directory: ', out_dir)
# Set up a neural network to train
# Classifier reports mean_absolute/squared_error loss and accuracy at everypha=
# iteration, which will be used by the PrintReport extension below.
# 損失の評価は独立事象ではないのでsoftmaxより距離の方が妥当かな
#
if CNN_flag:
model = L.Classifier(CNN(args.unit, n_out), lossfun=mean_absolute_error) #mean_squared_error)
else:
model = L.Classifier(MLP(args.unit, n_out), lossfun=mean_absolute_error) #mean_squared_error)
model.compute_accuracy= False # no need compute accuracy
if args.gpu >= 0:
# Make a specified GPU current
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu() # Copy the model to the GPU
# Setup an optimizer
optimizer = chainer.optimizers.Adam(alpha=0.001) #alpha=0.0001)
optimizer.setup(model)
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
test_iter = chainer.iterators.SerialIterator(test, args.batchsize,
repeat=False, shuffle=False)
# Set up a trainer
updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=out_dir)
# Evaluate the model with the test dataset for each epoch
trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))
# Dump a computational graph from 'loss' variable at the first iteration
# The "main" refers to the target link of the "main" optimizer.
trainer.extend(extensions.dump_graph('main/loss'))
# Take a snapshot for each specified epoch
frequency = args.epoch if args.frequency == -1 else max(1, args.frequency)
trainer.extend(extensions.snapshot(), trigger=(frequency, 'epoch'))
# Write a log of evaluation statistics for each epoch
trainer.extend(extensions.LogReport())
# Save two plot images to the result dir
if args.plot and extensions.PlotReport.available():
trainer.extend(
PlotReport2(['main/loss', 'validation/main/loss'],
'epoch', file_name='loss.png', LogScale=True))
# Print selected entries of the log to stdout
# Here "main" refers to the target link of the "main" optimizer again, and
# "validation" refers to the default name of the Evaluator extension.
# Entries other than 'epoch' are reported by the Classifier link, called by
# either the updater or the evaluator.
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss', 'elapsed_time']))
# Print a progress bar to stdout
trainer.extend(extensions.ProgressBar())
if args.resume:
# Resume from a snapshot
chainer.serializers.load_npz(args.resume, trainer)
# Run the training
trainer.run()
# Accuracy rate
print('checking accuracy rate... ') # transfer one by one data will take long time. It needs to improvement.
c0=0
for loop in range(train.__len__()):
x1=train.get_example(loop)
if args.gpu >= 0: # gpu
x_batch = cuda.cupy.asarray([x1[0]])
y_gpu=model.predictor(x_batch)
y=cuda.to_cpu(y_gpu.data)
else: # cpu
x_batch = np.asarray([x1[0]])
y_cpu=model.predictor(x_batch)
y=y_cpu.data[0]
#print ('input ', x1[0] )
#print (' predicted ', y, '(', np.round(y) , ')', 'correct ', x1[1] )
# 正解率、周波数のindex[整数]を指すことを想定しているので、四捨五入して一致していればOKとした。
if np.round(y) == np.round(x1[1]):
c0+= 1
print ('Accuracy rate (index is equal, ratio[%]) ', (c0 * 100.0 / train.__len__() ) )
if __name__ == '__main__':
main()
| 38.562771 | 113 | 0.610126 | [
"MIT"
] | shun60s/chainer-peak-detect | train.py | 9,050 | Python |
"""
Forward Chaining, K-Fold and Group K-Fold algorithms to split a given training dataset into train (X, y) and validation (Xcv, ycv) sets
"""
import numpy as np
def split_train_val_forwardChaining(sequence, numInputs, numOutputs, numJumps):
""" Returns sets to train and cross-validate a model using forward chaining technique
Parameters:
sequence (array) : Full training dataset
numInputs (int) : Number of inputs X and Xcv used at each training and validation
numOutputs (int) : Number of outputs y and ycv used at each training and validation
numJumps (int) : Number of sequence samples to be ignored between (X,y) sets
Returns:
X (2D array) : Array of numInputs arrays used for training
y (2D array) : Array of numOutputs arrays used for training
Xcv (2D array) : Array of numInputs arrays used for cross-validation
ycv (2D array) : Array of numOutputs arrays used for cross-validation
"""
X, y, Xcv, ycv = dict(), dict(), dict(), dict()
j=2; # Tracks index of CV set at each train/val split
# Iterate through all train/val splits
while 1:
start_ix=0; end_ix=0; startCv_ix=0; endCv_ix=0;
X_it, y_it, Xcv_it, ycv_it = list(), list(), list(), list()
i=0; # Index of individual training set at each train/val split
# Iterate until index of individual training set is smaller than index of cv set
while (i < j):
## TRAINING DATA
start_ix = numJumps*i;
end_ix = start_ix + numInputs;
seq_x = sequence[start_ix:end_ix]
X_it.append(seq_x)
seq_y = sequence[end_ix:end_ix+numOutputs]
y_it.append(seq_y)
i+=1;
# Once val data crosses time series length return
if (((end_ix+numInputs)+numOutputs) > len(sequence)):
break
## CROSS-VALIDATION DATA
startCv_ix = end_ix;
endCv_ix = end_ix + numInputs;
seq_xcv = sequence[startCv_ix:endCv_ix]
Xcv_it.append(seq_xcv)
seq_ycv = sequence[endCv_ix:endCv_ix+numOutputs]
ycv_it.append(seq_ycv)
## Add another train/val split
X[j-2] = np.array(X_it)
y[j-2] = np.array(y_it)
Xcv[j-2] = np.array(Xcv_it)
ycv[j-2] = np.array(ycv_it)
j+=1;
if (len(X)==0 or len(Xcv)==0):
print("The sequence provided does not has size enough to populate the return arrays")
return X, y, Xcv, ycv
def split_train_val_kFold(sequence, numInputs, numOutputs, numJumps):
""" Returns sets to train and cross-validate a model using K-Fold technique
Parameters:
sequence (array) : Full training dataset
numInputs (int) : Number of inputs X and Xcv used at each training
numOutputs (int) : Number of outputs y and ycv used at each training
numJumps (int) : Number of sequence samples to be ignored between (X,y) sets
Returns:
X (2D array) : Array of numInputs arrays used for training
y (2D array) : Array of numOutputs arrays used for training
Xcv (2D array) : Array of numInputs arrays used for cross-validation
ycv (2D array) : Array of numOutputs arrays used for cross-validation
"""
X, y, Xcv, ycv = dict(), dict(), dict(), dict()
j=2; # Tracks index of CV set at each train/val split
theEnd = 0; # Flag to terminate function
# Iterate until val set falls outside time series length
while 1:
start_ix=0; end_ix=0; startCv_ix=0; endCv_ix=0;
X_it, y_it, Xcv_it, ycv_it = list(), list(), list(), list()
i=0; # Index of individual training set at each train/val split
n=0; # Number of numJumps
# Iterate through all train/val splits
while 1:
if (i != j):
## TRAINING DATA
start_ix = endCv_ix + numJumps*n;
end_ix = start_ix + numInputs;
n +=1;
# Leave train/val split loop once training data crosses time series length
if end_ix+numOutputs > len(sequence):
break;
seq_x = sequence[start_ix:end_ix]
X_it.append(seq_x)
seq_y = sequence[end_ix:end_ix+numOutputs]
y_it.append(seq_y)
else:
## CROSS-VALIDATION DATA
startCv_ix = end_ix;
endCv_ix = end_ix + numInputs;
n = 0;
# Once val data crosses time series length exit tran/val split loop and return
if endCv_ix+numOutputs > len(sequence):
theEnd = 1;
break;
seq_xcv = sequence[startCv_ix:endCv_ix]
Xcv_it.append(seq_xcv)
seq_ycv = sequence[endCv_ix:endCv_ix+numOutputs]
ycv_it.append(seq_ycv)
i+=1;
# Only add a train/val split if the time series length has not been crossed
if (theEnd == 1):
break
## Add another train/val split
X[j-2] = np.array(X_it)
y[j-2] = np.array(y_it)
Xcv[j-2] = np.array(Xcv_it)
ycv[j-2] = np.array(ycv_it)
j+=1;
if (len(X)==0 or len(Xcv)==0):
print("The sequence provided does not has size enough to populate the return arrays")
return X, y, Xcv, ycv
def split_train_val_groupKFold(sequence, numInputs, numOutputs, numJumps):
""" Returns sets to train and cross-validate a model using group K-Fold technique
Parameters:
sequence (array) : Full training dataset
numInputs (int) : Number of inputs X and Xcv used at each training
numOutputs (int) : Number of outputs y and ycv used at each training
numJumps (int) : Number of sequence samples to be ignored between (X,y) sets
Returns:
X (2D array) : Array of numInputs arrays used for training
y (2D array) : Array of numOutputs arrays used for training
Xcv (2D array) : Array of numInputs arrays used for cross-validation
ycv (2D array) : Array of numOutputs arrays used for cross-validation
"""
X, y, Xcv, ycv = dict(), dict(), dict(), dict()
# Iterate through 5 train/val splits
for j in np.arange(5):
start_ix=0; end_ix=0; startCv_ix=0; endCv_ix=0;
X_it, y_it, Xcv_it, ycv_it = list(), list(), list(), list()
i=0; # Index of individual training set at each train/val split
n=0; # Number of numJumps
while 1:
if ((i+1+j)%(5) != 0):
# TRAINING DATA
start_ix = endCv_ix + numJumps*n;
end_ix = start_ix + numInputs;
n+=1;
# Leave train/val split loop once training data crosses time series length
if end_ix+numOutputs > len(sequence)-1:
break
seq_x = sequence[start_ix:end_ix]
X_it.append(seq_x)
seq_y = sequence[end_ix:end_ix+numOutputs]
y_it.append(seq_y)
else:
# CROSS-VALIDATION DATA
startCv_ix = end_ix;
endCv_ix = end_ix + numInputs;
n=0;
# Once val data crosses time series length return
if ((endCv_ix+numOutputs) > len(sequence)):
break
seq_xcv = sequence[startCv_ix:endCv_ix]
Xcv_it.append(seq_xcv)
seq_ycv = sequence[endCv_ix:endCv_ix+numOutputs]
ycv_it.append(seq_ycv)
i+=1;
## Add another train/val split
X[j] = np.array(X_it)
y[j] = np.array(y_it)
Xcv[j] = np.array(Xcv_it)
ycv[j] = np.array(ycv_it)
if (len(X)==0 or len(Xcv)==0):
print("The sequence provided does not has size enough to populate the return arrays")
return X, y, Xcv, ycv
| 37.681818 | 135 | 0.563088 | [
"MIT"
] | DidierRLopes/TimeSeriesCrossValidation | tsxv/splitTrainVal.py | 8,290 | Python |
import _plotly_utils.basevalidators
class LinewidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="linewidth", parent_name="carpet.aaxis", **kwargs):
super(LinewidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
**kwargs,
)
| 34.307692 | 86 | 0.656951 | [
"MIT"
] | labaran1/plotly.py | packages/python/plotly/plotly/validators/carpet/aaxis/_linewidth.py | 446 | Python |
from django.urls import path
from . import views
urlpatterns = [
path('list', views.list_view),
path('add', views.add_view),
] | 19.285714 | 34 | 0.681481 | [
"MIT"
] | StevenYwch/CloudNote | note/urls.py | 135 | Python |
# pass test
import numpy as np
def prepare_input(input_size):
return [np.random.rand(input_size), np.random.rand(input_size)]
def test_function(input_data):
return np.convolve(input_data[0], input_data[1])
| 24 | 67 | 0.75463 | [
"Apache-2.0"
] | Ashymad/praca.inz | tests/python/tests/conv/test.py | 216 | Python |
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of SpineNet model.
X. Du, T-Y. Lin, P. Jin, G. Ghiasi, M. Tan, Y. Cui, Q. V. Le, X. Song
SpineNet: Learning Scale-Permuted Backbone for Recognition and Localization
https://arxiv.org/abs/1912.05027
"""
import math
# Import libraries
from absl import logging
import tensorflow as tf
from official.modeling import tf_utils
from official.vision.beta.modeling.layers import nn_blocks
from official.vision.beta.ops import spatial_transform_ops
layers = tf.keras.layers
FILTER_SIZE_MAP = {
1: 32,
2: 64,
3: 128,
4: 256,
5: 256,
6: 256,
7: 256,
}
# The fixed SpineNet architecture discovered by NAS.
# Each element represents a specification of a building block:
# (block_level, block_fn, (input_offset0, input_offset1), is_output).
SPINENET_BLOCK_SPECS = [
(2, 'bottleneck', (0, 1), False),
(4, 'residual', (0, 1), False),
(3, 'bottleneck', (2, 3), False),
(4, 'bottleneck', (2, 4), False),
(6, 'residual', (3, 5), False),
(4, 'bottleneck', (3, 5), False),
(5, 'residual', (6, 7), False),
(7, 'residual', (6, 8), False),
(5, 'bottleneck', (8, 9), False),
(5, 'bottleneck', (8, 10), False),
(4, 'bottleneck', (5, 10), True),
(3, 'bottleneck', (4, 10), True),
(5, 'bottleneck', (7, 12), True),
(7, 'bottleneck', (5, 14), True),
(6, 'bottleneck', (12, 14), True),
]
SCALING_MAP = {
'49S': {
'endpoints_num_filters': 128,
'filter_size_scale': 0.65,
'resample_alpha': 0.5,
'block_repeats': 1,
},
'49': {
'endpoints_num_filters': 256,
'filter_size_scale': 1.0,
'resample_alpha': 0.5,
'block_repeats': 1,
},
'96': {
'endpoints_num_filters': 256,
'filter_size_scale': 1.0,
'resample_alpha': 0.5,
'block_repeats': 2,
},
'143': {
'endpoints_num_filters': 256,
'filter_size_scale': 1.0,
'resample_alpha': 1.0,
'block_repeats': 3,
},
'190': {
'endpoints_num_filters': 512,
'filter_size_scale': 1.3,
'resample_alpha': 1.0,
'block_repeats': 4,
},
}
class BlockSpec(object):
"""A container class that specifies the block configuration for SpineNet."""
def __init__(self, level, block_fn, input_offsets, is_output):
self.level = level
self.block_fn = block_fn
self.input_offsets = input_offsets
self.is_output = is_output
def build_block_specs(block_specs=None):
"""Builds the list of BlockSpec objects for SpineNet."""
if not block_specs:
block_specs = SPINENET_BLOCK_SPECS
logging.info('Building SpineNet block specs: %s', block_specs)
return [BlockSpec(*b) for b in block_specs]
@tf.keras.utils.register_keras_serializable(package='Vision')
class SpineNet(tf.keras.Model):
"""Class to build SpineNet models."""
def __init__(self,
input_specs=tf.keras.layers.InputSpec(shape=[None, 640, 640, 3]),
min_level=3,
max_level=7,
block_specs=build_block_specs(),
endpoints_num_filters=256,
resample_alpha=0.5,
block_repeats=1,
filter_size_scale=1.0,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
activation='relu',
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
**kwargs):
"""SpineNet model."""
self._input_specs = input_specs
self._min_level = min_level
self._max_level = max_level
self._block_specs = block_specs
self._endpoints_num_filters = endpoints_num_filters
self._resample_alpha = resample_alpha
self._block_repeats = block_repeats
self._filter_size_scale = filter_size_scale
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._activation = activation
self._use_sync_bn = use_sync_bn
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
if activation == 'relu':
self._activation_fn = tf.nn.relu
elif activation == 'swish':
self._activation_fn = tf.nn.swish
else:
raise ValueError('Activation {} not implemented.'.format(activation))
self._init_block_fn = 'bottleneck'
self._num_init_blocks = 2
if use_sync_bn:
self._norm = layers.experimental.SyncBatchNormalization
else:
self._norm = layers.BatchNormalization
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
# Build SpineNet.
inputs = tf.keras.Input(shape=input_specs.shape[1:])
net = self._build_stem(inputs=inputs)
net = self._build_scale_permuted_network(
net=net, input_width=input_specs.shape[1])
endpoints = self._build_endpoints(net=net)
self._output_specs = {l: endpoints[l].get_shape() for l in endpoints}
super(SpineNet, self).__init__(inputs=inputs, outputs=endpoints)
def _block_group(self,
inputs,
filters,
strides,
block_fn_cand,
block_repeats=1,
name='block_group'):
"""Creates one group of blocks for the SpineNet model."""
block_fn_candidates = {
'bottleneck': nn_blocks.BottleneckBlock,
'residual': nn_blocks.ResidualBlock,
}
block_fn = block_fn_candidates[block_fn_cand]
_, _, _, num_filters = inputs.get_shape().as_list()
if block_fn_cand == 'bottleneck':
use_projection = not (num_filters == (filters * 4) and strides == 1)
else:
use_projection = not (num_filters == filters and strides == 1)
x = block_fn(
filters=filters,
strides=strides,
use_projection=use_projection,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=self._activation,
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon)(
inputs)
for _ in range(1, block_repeats):
x = block_fn(
filters=filters,
strides=1,
use_projection=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=self._activation,
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon)(
x)
return tf.identity(x, name=name)
def _build_stem(self, inputs):
"""Build SpineNet stem."""
x = layers.Conv2D(
filters=64,
kernel_size=7,
strides=2,
use_bias=False,
padding='same',
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
inputs)
x = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)(
x)
x = tf_utils.get_activation(self._activation_fn)(x)
x = layers.MaxPool2D(pool_size=3, strides=2, padding='same')(x)
net = []
# Build the initial level 2 blocks.
for i in range(self._num_init_blocks):
x = self._block_group(
inputs=x,
filters=int(FILTER_SIZE_MAP[2] * self._filter_size_scale),
strides=1,
block_fn_cand=self._init_block_fn,
block_repeats=self._block_repeats,
name='stem_block_{}'.format(i + 1))
net.append(x)
return net
def _build_scale_permuted_network(self,
net,
input_width,
weighted_fusion=False):
"""Build scale-permuted network."""
net_sizes = [int(math.ceil(input_width / 2**2))] * len(net)
net_block_fns = [self._init_block_fn] * len(net)
num_outgoing_connections = [0] * len(net)
endpoints = {}
for i, block_spec in enumerate(self._block_specs):
# Find out specs for the target block.
target_width = int(math.ceil(input_width / 2**block_spec.level))
target_num_filters = int(FILTER_SIZE_MAP[block_spec.level] *
self._filter_size_scale)
target_block_fn = block_spec.block_fn
# Resample then merge input0 and input1.
parents = []
input0 = block_spec.input_offsets[0]
input1 = block_spec.input_offsets[1]
x0 = self._resample_with_alpha(
inputs=net[input0],
input_width=net_sizes[input0],
input_block_fn=net_block_fns[input0],
target_width=target_width,
target_num_filters=target_num_filters,
target_block_fn=target_block_fn,
alpha=self._resample_alpha)
parents.append(x0)
num_outgoing_connections[input0] += 1
x1 = self._resample_with_alpha(
inputs=net[input1],
input_width=net_sizes[input1],
input_block_fn=net_block_fns[input1],
target_width=target_width,
target_num_filters=target_num_filters,
target_block_fn=target_block_fn,
alpha=self._resample_alpha)
parents.append(x1)
num_outgoing_connections[input1] += 1
# Merge 0 outdegree blocks to the output block.
if block_spec.is_output:
for j, (j_feat,
j_connections) in enumerate(zip(net, num_outgoing_connections)):
if j_connections == 0 and (j_feat.shape[2] == target_width and
j_feat.shape[3] == x0.shape[3]):
parents.append(j_feat)
num_outgoing_connections[j] += 1
# pylint: disable=g-direct-tensorflow-import
if weighted_fusion:
dtype = parents[0].dtype
parent_weights = [
tf.nn.relu(tf.cast(tf.Variable(1.0, name='block{}_fusion{}'.format(
i, j)), dtype=dtype)) for j in range(len(parents))]
weights_sum = tf.add_n(parent_weights)
parents = [
parents[i] * parent_weights[i] / (weights_sum + 0.0001)
for i in range(len(parents))
]
# Fuse all parent nodes then build a new block.
x = tf_utils.get_activation(self._activation_fn)(tf.add_n(parents))
x = self._block_group(
inputs=x,
filters=target_num_filters,
strides=1,
block_fn_cand=target_block_fn,
block_repeats=self._block_repeats,
name='scale_permuted_block_{}'.format(i + 1))
net.append(x)
net_sizes.append(target_width)
net_block_fns.append(target_block_fn)
num_outgoing_connections.append(0)
# Save output feats.
if block_spec.is_output:
if block_spec.level in endpoints:
raise ValueError('Duplicate feats found for output level {}.'.format(
block_spec.level))
if (block_spec.level < self._min_level or
block_spec.level > self._max_level):
raise ValueError('Output level is out of range [{}, {}]'.format(
self._min_level, self._max_level))
endpoints[str(block_spec.level)] = x
return endpoints
def _build_endpoints(self, net):
"""Match filter size for endpoints before sharing conv layers."""
endpoints = {}
for level in range(self._min_level, self._max_level + 1):
x = layers.Conv2D(
filters=self._endpoints_num_filters,
kernel_size=1,
strides=1,
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
net[str(level)])
x = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)(
x)
x = tf_utils.get_activation(self._activation_fn)(x)
endpoints[str(level)] = x
return endpoints
def _resample_with_alpha(self,
inputs,
input_width,
input_block_fn,
target_width,
target_num_filters,
target_block_fn,
alpha=0.5):
"""Match resolution and feature dimension."""
_, _, _, input_num_filters = inputs.get_shape().as_list()
if input_block_fn == 'bottleneck':
input_num_filters /= 4
new_num_filters = int(input_num_filters * alpha)
x = layers.Conv2D(
filters=new_num_filters,
kernel_size=1,
strides=1,
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
inputs)
x = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)(
x)
x = tf_utils.get_activation(self._activation_fn)(x)
# Spatial resampling.
if input_width > target_width:
x = layers.Conv2D(
filters=new_num_filters,
kernel_size=3,
strides=2,
padding='SAME',
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
x)
x = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)(
x)
x = tf_utils.get_activation(self._activation_fn)(x)
input_width /= 2
while input_width > target_width:
x = layers.MaxPool2D(pool_size=3, strides=2, padding='SAME')(x)
input_width /= 2
elif input_width < target_width:
scale = target_width // input_width
x = spatial_transform_ops.nearest_upsampling(x, scale=scale)
# Last 1x1 conv to match filter size.
if target_block_fn == 'bottleneck':
target_num_filters *= 4
x = layers.Conv2D(
filters=target_num_filters,
kernel_size=1,
strides=1,
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
x)
x = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)(
x)
return x
def get_config(self):
config_dict = {
'min_level': self._min_level,
'max_level': self._max_level,
'endpoints_num_filters': self._endpoints_num_filters,
'resample_alpha': self._resample_alpha,
'block_repeats': self._block_repeats,
'filter_size_scale': self._filter_size_scale,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'activation': self._activation,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon
}
return config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@property
def output_specs(self):
"""A dict of {level: TensorShape} pairs for the model output."""
return self._output_specs
| 34.17119 | 80 | 0.625367 | [
"Apache-2.0"
] | GPhilo/models | official/vision/beta/modeling/backbones/spinenet.py | 16,368 | Python |
import logging
import requests
import moment
import utils
import time
import json
class weather(utils.utils):
def message_callback(self,ch, method, properties, body):
logging.info('messgae received weather')
time.sleep(1)
self.__get_weather(body)
ch.basic_ack(delivery_tag=method.delivery_tag) # 执行完再ack消息
logging.info('weather messgae is ack on :'+str(moment.now()))
def __get_weather(self,body):
params = self.get_params(body)
req = requests.get(params.request_url)
data = json.loads(req.text)
if(data['status'] == 200):
wea = data['data']['forecast'][0]
params.content='天气:'+wea['type']+' '+wea['high']+','+wea['low']+' '+wea['notice']
else:
params.content='错误'
self.notification(params)
self.notification_to_zhouyu(params)
def notification_to_zhouyu(self,params):
url='http://push.devzhou.t.cn/Push/'+params.title+'/'+params.content+'?url='+params.red_url
requests.get(url)
logging.info('messgae send successfully')
| 31.342857 | 99 | 0.632634 | [
"MIT"
] | hqs666666/python | Reptile/weather.py | 1,119 | Python |
"""This module implements row model of Amazon.co.jp CSV."""
from dataclasses import dataclass
from datetime import datetime
from typing import ClassVar, Optional
from zaimcsvconverter import CONFIG
from zaimcsvconverter.file_csv_convert import FileCsvConvert
from zaimcsvconverter.inputcsvformats import InputItemRow, InputItemRowData, InputRowFactory
from zaimcsvconverter.models import FileCsvConvertId, Store, StoreRowData
@dataclass
class Amazon201911RowData(InputItemRowData):
"""This class implements data class for wrapping list of Amazon.co.jp CSV row model."""
# Reason: This implement depends on design of CSV. pylint: disable=too-many-instance-attributes
ITEM_NAME_ENTIRE_ORDER: ClassVar[str] = "(注文全体)"
ITEM_NAME_BILLING_TO_CREDIT_CARD: ClassVar[str] = "(クレジットカードへの請求)"
ITEM_NAME_SHIPPING_HANDLING: ClassVar[str] = "(配送料・手数料)"
_ordered_date: str
order_id: str
_item_name: str
note: str
_price: str
_number: str
_subtotal_price_item: str
_total_order: str
destination: str
status: str
billing_address: str
billing_amount: str
credit_card_billing_date: str
credit_card_billing_amount: str
credit_card_identity: str
url_order_summary: str
url_receipt: str
url_item: str
@property
def date(self) -> datetime:
return datetime.strptime(self._ordered_date, "%Y/%m/%d")
@property
def item_name(self) -> str:
return self._item_name
@property
def price(self) -> Optional[int]:
return None if self._price == "" else int(self._price)
@property
def number(self) -> Optional[int]:
return None if self._number == "" else int(self._number)
@property
def total_order(self) -> Optional[int]:
return None if self._total_order == "" else int(self._total_order)
@property
def subtotal_price_item(self) -> Optional[int]:
return None if self._subtotal_price_item == "" else int(self._subtotal_price_item)
@property
def validate(self) -> bool:
self.stock_error(lambda: self.date, f"Invalid ordered date. Ordered date = {self._ordered_date}")
self.stock_error(lambda: self.price, f"Invalid price. Price = {self._price}")
self.stock_error(lambda: self.number, f"Invalid number. Number = {self._number}")
self.stock_error(lambda: self.total_order, f"Invalid total order. Total order = {self._total_order}")
return super().validate
@property
def is_entire_order(self) -> bool:
return (
self._item_name == self.ITEM_NAME_ENTIRE_ORDER
and self.price is None
and self.number is None
and self.subtotal_price_item is None
and self.total_order is not None
and self.total_order > 0
and self.credit_card_billing_date == ""
and self.credit_card_billing_amount == ""
)
@property
def is_billing_to_credit_card(self) -> bool:
return (
self._item_name == self.ITEM_NAME_BILLING_TO_CREDIT_CARD
and self.price is None
and self.number is None
and self.subtotal_price_item is None
and self.total_order is None
and self.credit_card_billing_date != ""
and self.credit_card_billing_amount != ""
)
@property
def is_shipping_handling(self) -> bool:
return (
self._item_name == self.ITEM_NAME_SHIPPING_HANDLING
and self.price is None
and self.number is None
and self.subtotal_price_item is not None
and self.total_order is None
and self.credit_card_billing_date == ""
and self.credit_card_billing_amount == ""
)
@property
def is_discount(self) -> bool:
# Includes Amazon point
return (
not self.is_entire_order
and not self.is_billing_to_credit_card
and not self.is_shipping_handling
and self.total_order is not None
and self.total_order < 0
)
@property
def is_payment(self) -> bool:
return (
not self.is_entire_order
and not self.is_billing_to_credit_card
and not self.is_shipping_handling
and not self.is_discount
and self.price is not None
and self.price > 0
and self.number is not None
and self.number > 0
)
@property
def is_free_kindle(self) -> bool:
return (
self.price == 0
and self.total_order == 0
and self.subtotal_price_item == 0
and self.destination == ""
and self.status.startswith("デジタル注文:")
and self.billing_amount == "0"
and self.credit_card_billing_date == ""
and self.credit_card_billing_amount == ""
and self.credit_card_identity == ""
)
class Amazon201911Row(InputItemRow):
"""This class implements row model of Amazon.co.jp CSV."""
def __init__(self, row_data: Amazon201911RowData):
super().__init__(FileCsvConvert.AMAZON.value, row_data)
self._store: Store = Store(FileCsvConvertId.AMAZON, StoreRowData("Amazon.co.jp", CONFIG.amazon.store_name_zaim))
@property
def store(self) -> Store:
return self._store
class Amazon201911RowToSkip(Amazon201911Row):
@property
def is_row_to_skip(self) -> bool:
return True
class Amazon201911DiscountRow(Amazon201911Row):
"""This class implements row model of Amazon.co.jp CSV."""
def __init__(self, row_data: Amazon201911RowData):
super().__init__(row_data)
self._total_order: Optional[int] = row_data.total_order
@property
def total_order(self) -> int:
if self._total_order is None:
raise ValueError("Total order on discount row is not allowed empty.")
return self._total_order
@property
def validate(self) -> bool:
self.stock_error(
lambda: self.total_order, f"Total order in discount row is required. Total order = {self._total_order}"
)
return super().validate
class Amazon201911ShippingHandlingRow(Amazon201911Row):
"""Row model of shipping / handling of Amazon.co.jp CSV."""
def __init__(self, row_data):
super().__init__(row_data)
self._subtotal_price_item: Optional[int] = row_data.subtotal_price_item
@property
def subtotal_price_item(self):
if self._subtotal_price_item is None:
raise ValueError("Subtotal price item on shipping handling row is not allowed empty.")
return self._subtotal_price_item
@property
def validate(self) -> bool:
self.stock_error(
lambda: self.subtotal_price_item,
"Subtotal price item in Shipping handling row is required. "
f"Subtotal price item = {self.subtotal_price_item}",
)
return super().validate
class Amazon201911PaymentRow(Amazon201911Row):
"""This class implements row model of Amazon.co.jp CSV."""
def __init__(self, row_data: Amazon201911RowData):
super().__init__(row_data)
self._price: Optional[int] = row_data.price
self._number: Optional[int] = row_data.number
@property
def price(self) -> int:
if self._price is None:
raise ValueError("Price on payment row is not allowed empty.")
return self._price
@property
def number(self) -> int:
if self._number is None:
raise ValueError("Number on payment row is not allowed empty.")
return self._number
@property
def validate(self) -> bool:
self.stock_error(lambda: self.price, f"Price in payment row is required. Price = {self._price}")
self.stock_error(lambda: self.number, f"Number in payment row is required. Number = {self._number}")
return super().validate
class Amazon201911RowFactory(InputRowFactory[Amazon201911RowData, Amazon201911Row]):
"""This class implements factory to create Amazon.co.jp CSV row instance."""
def create(self, input_row_data: Amazon201911RowData) -> Amazon201911Row:
# @see https://github.com/furyutei/amzOrderHistoryFilter/issues/3#issuecomment-543645937
if input_row_data.is_billing_to_credit_card or input_row_data.is_free_kindle:
return Amazon201911RowToSkip(input_row_data)
if input_row_data.is_discount:
return Amazon201911DiscountRow(input_row_data)
if input_row_data.is_shipping_handling:
return Amazon201911ShippingHandlingRow(input_row_data)
if input_row_data.is_payment:
return Amazon201911PaymentRow(input_row_data)
raise ValueError(
'Cash flow kind is not supported. "'
f'Order date = {input_row_data.date}, "'
f'"item name = {input_row_data.item_name}'
) # pragma: no cover
# Reason: This line is insurance for future development so process must be not able to reach
| 35.410156 | 120 | 0.661335 | [
"MIT"
] | yukihiko-shinoda/zaim-csv-converter | zaimcsvconverter/inputcsvformats/amazon_201911.py | 9,135 | Python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# flake8: noqa
#
# AppDaemon documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 11 14:36:18 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
autodoc_mock_imports = ["iso8601", "dateutil"]
sys.path.insert(0, os.path.abspath(".."))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.napoleon"]
autodoc_member_order = "bysource"
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master doctree document.
master_doc = "index"
# General information about the project.
project = "AppDaemon"
copyright = "2021, Andrew Cockburn"
author = "Andrew Cockburn"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "4.0.7"
# The full version, including alpha/beta/rc tags.
release = "4.0.7"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "AppDaemondoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "AppDaemon.tex", "AppDaemon Documentation", "Andrew Cockburn", "manual",),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "appdaemon", "AppDaemon Documentation", [author], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"AppDaemon",
"AppDaemon Documentation",
author,
"AppDaemon",
"Sandboxed python Apps for automation",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# omit class name during the generation of the doc
add_module_names = False
| 32.043624 | 91 | 0.709708 | [
"Apache-2.0"
] | ReneTode/appdaemon | docs/conf.py | 9,549 | Python |
# -*- coding: utf-8 -*-
import pytest
from pyleecan.Classes.LamSlotMag import LamSlotMag
from pyleecan.Classes.SlotM14 import SlotM14
from numpy import pi, exp, sqrt, angle
from pyleecan.Methods.Slot.Slot.comp_height import comp_height
from pyleecan.Methods.Slot.Slot.comp_surface import comp_surface
from pyleecan.Methods.Slot.Slot.comp_angle_opening import comp_angle_opening
from pyleecan.Methods.Slot.Slot.comp_height_active import comp_height_active
from pyleecan.Methods.Slot.Slot.comp_surface_active import comp_surface_active
from pyleecan.Methods import ParentMissingError
Mag14_test = list()
# Internal Slot inset
lam = LamSlotMag(Rint=40e-3, Rext=90e-3, is_internal=True)
lam.slot = SlotM14(Zs=4, W0=0.628, H0=0.02, Hmag=0.02, Wmag=0.628, Rtopm=0.04)
Mag14_test.append(
{
"test_obj": lam,
"Rmec": 90e-3,
"S_exp": 0.0010048,
"SA_exp": 9.022e-4,
"Ao": 0.628,
"H_exp": 0.02,
"HA_exp": 0.02,
}
)
# Internal slot surface
lam = LamSlotMag(Rint=40e-3, Rext=90e-3, is_internal=True)
lam.slot = SlotM14(Zs=8, W0=0.628, H0=0, Hmag=0.02, Wmag=0.628, Rtopm=0.05)
Mag14_test.append(
{
"test_obj": lam,
"Rmec": 0.11,
"S_exp": 0,
"SA_exp": 1.1089e-3,
"Ao": 0.628,
"H_exp": 0,
"HA_exp": 0.02,
}
)
# For AlmostEqual
DELTA = 1e-4
@pytest.mark.METHODS
class Test_Magnet_Type_14_meth(object):
"""unittest for MagnetType14 methods"""
@pytest.mark.parametrize("test_dict", Mag14_test)
def test_comp_surface(self, test_dict):
"""Check that the computation of the surface is correct"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.comp_surface()
a = result
b = test_dict["S_exp"]
msg = "Return " + str(a) + " expected " + str(b)
assert a == pytest.approx(b, rel=DELTA), msg
# Check that the analytical method returns the same result as the numerical one
b = comp_surface(test_obj.slot)
msg = "Return " + str(a) + " expected " + str(b)
assert a == pytest.approx(b, rel=DELTA), msg
@pytest.mark.parametrize("test_dict", Mag14_test)
def test_comp_surface_active(self, test_dict):
"""Check that the computation of the active surface is correct"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.comp_surface_active()
a = result
b = test_dict["SA_exp"]
msg = "Return " + str(a) + " expected " + str(b)
assert a == pytest.approx(b, rel=DELTA), msg
# Check that the analytical method returns the same result as the numerical one
b = comp_surface_active(test_obj.slot, Ndisc=1000)
msg = "Return " + str(a) + " expected " + str(b)
assert a == pytest.approx(b, rel=DELTA), msg
@pytest.mark.parametrize("test_dict", Mag14_test)
def test_comp_height(self, test_dict):
"""Check that the computation of the height is correct"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.comp_height()
a = result
b = test_dict["H_exp"]
msg = "Return " + str(a) + " expected " + str(b)
assert a == pytest.approx(b, rel=DELTA), msg
# Check that the analytical method returns the same result as the numerical one
b = comp_height(test_obj.slot)
msg = "Return " + str(a) + " expected " + str(b)
assert a == pytest.approx(b, rel=DELTA), msg
@pytest.mark.parametrize("test_dict", Mag14_test)
def test_comp_height_active(self, test_dict):
"""Check that the computation of the active height is correct"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.comp_height_active()
a = result
b = test_dict["HA_exp"]
msg = "Return " + str(a) + " expected " + str(b)
# assert a == pytest.approx(b, rel=DELTA), msg
# Check that the analytical method returns the same result as the numerical one
b = comp_height_active(test_obj.slot)
msg = "Return " + str(a) + " expected " + str(b)
assert a == pytest.approx(b, rel=DELTA), msg
@pytest.mark.parametrize("test_dict", Mag14_test)
def test_comp_angle_opening(self, test_dict):
"""Check that the computation of the average opening angle is correct"""
test_obj = test_dict["test_obj"]
a = test_obj.slot.comp_angle_opening()
assert a == pytest.approx(test_dict["Ao"], rel=DELTA)
# Check that the analytical method returns the same result as the numerical one
b = comp_angle_opening(test_obj.slot)
msg = "Return " + str(a) + " expected " + str(b)
assert a == pytest.approx(b, rel=DELTA)
@pytest.mark.parametrize("test_dict", Mag14_test)
def test_comp_width_opening(self, test_dict):
"""Check that the computation of the average opening width is correct"""
test_obj = test_dict["test_obj"]
a = test_obj.slot.comp_width_opening()
point_dict = test_obj.slot._comp_point_coordinate()
assert a == pytest.approx(abs(point_dict["Z1"] - point_dict["Z4"]), rel=DELTA)
@pytest.mark.parametrize("test_dict", Mag14_test)
def test_comp_mec_radius(self, test_dict):
"""Check that the computation of the mechanical radius is correct"""
test_obj = test_dict["test_obj"]
a = test_obj.comp_radius_mec()
assert a == pytest.approx(test_dict["Rmec"], rel=DELTA)
@pytest.mark.parametrize("test_dict", Mag14_test)
def test_comp_point_coordinate(self, test_dict):
"""Check that the point coordinates are correct"""
test_obj = test_dict["test_obj"]
point_dict = test_obj.slot._comp_point_coordinate()
Z1 = point_dict["Z1"]
Z2 = point_dict["Z2"]
Z3 = point_dict["Z3"]
Z4 = point_dict["Z4"]
ZM0 = point_dict["ZM0"]
ZM1 = point_dict["ZM1"]
ZM2 = point_dict["ZM2"]
ZM3 = point_dict["ZM3"]
ZM4 = point_dict["ZM4"]
W0 = test_obj.slot.W0
H0 = test_obj.slot.H0
Wmag = test_obj.slot.Wmag
Hmag = test_obj.slot.Hmag
Rbo = test_obj.get_Rbo()
assert abs(Z1) == pytest.approx(Rbo, rel=DELTA)
assert angle(Z1) == pytest.approx(-W0 / 2, rel=DELTA)
assert abs(Z4) == pytest.approx(Rbo, rel=DELTA)
assert angle(Z4) == pytest.approx(W0 / 2, rel=DELTA)
if test_obj.is_internal:
assert abs(Z2) == pytest.approx(Rbo - H0, rel=DELTA)
assert abs(Z3) == pytest.approx(Rbo - H0, rel=DELTA)
else:
assert abs(Z3) == pytest.approx(Rbo + H0, rel=DELTA)
assert abs(Z2) == pytest.approx(Rbo + H0, rel=DELTA)
assert angle(Z2) == pytest.approx(-W0 / 2, rel=DELTA)
assert angle(Z3) == pytest.approx(W0 / 2, rel=DELTA)
assert angle(ZM1) == pytest.approx(angle(ZM2), rel=DELTA)
assert angle(ZM1) == pytest.approx(-Wmag / 2, rel=DELTA)
assert angle(ZM3) == pytest.approx(angle(ZM4), rel=DELTA)
assert angle(ZM3) == pytest.approx(Wmag / 2, rel=DELTA)
if test_obj.is_internal:
assert ZM0 == pytest.approx(Rbo + Hmag - H0, rel=DELTA)
else:
assert ZM0 == pytest.approx(Rbo - Hmag + H0, rel=DELTA)
| 39.134409 | 87 | 0.629619 | [
"Apache-2.0"
] | ajpina/pyleecan | Tests/Methods/Slot/test_SlotM14_meth.py | 7,279 | Python |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.tests.v1_1 import fakes
from heat.engine.resources import instance as instances
from heat.engine.resources import nova_utils
from heat.common import template_format
from heat.engine import scheduler
from heat.tests.common import HeatTestCase
from heat.tests import utils
nokey_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "NoKey Test",
"Parameters" : {},
"Resources" : {
"WebServer": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId" : "foo",
"InstanceType" : "m1.large",
"UserData" : "some data"
}
}
}
}
'''
class nokeyTest(HeatTestCase):
def setUp(self):
super(nokeyTest, self).setUp()
self.fc = fakes.FakeClient()
utils.setup_dummy_db()
def test_nokey_create(self):
stack_name = 'instance_create_test_nokey_stack'
t = template_format.parse(nokey_template)
stack = utils.parse_stack(t, stack_name=stack_name)
t['Resources']['WebServer']['Properties']['ImageId'] = 'CentOS 5.2'
t['Resources']['WebServer']['Properties']['InstanceType'] = \
'256 MB Server'
instance = instances.Instance('create_instance_name',
t['Resources']['WebServer'], stack)
self.m.StubOutWithMock(instance, 'nova')
instance.nova().MultipleTimes().AndReturn(self.fc)
instance.t = instance.stack.resolve_runtime_data(instance.t)
# need to resolve the template functions
server_userdata = nova_utils.build_userdata(
instance,
instance.t['Properties']['UserData'])
instance.mime_string = server_userdata
self.m.StubOutWithMock(self.fc.servers, 'create')
self.fc.servers.create(
image=1, flavor=1, key_name=None,
name=utils.PhysName(stack_name, instance.name),
security_groups=None,
userdata=server_userdata, scheduler_hints=None,
meta=None, nics=None, availability_zone=None).AndReturn(
self.fc.servers.list()[1])
self.m.ReplayAll()
scheduler.TaskRunner(instance.create)()
| 34.121951 | 78 | 0.651894 | [
"Apache-2.0"
] | citrix-openstack-build/heat | heat/tests/test_nokey.py | 2,798 | Python |
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from six.moves import range
import numpy as np
import utool
from ibeis.control import SQLDatabaseControl as sqldbc
from ibeis.control._sql_helpers import _results_gen
from os.path import join
print, print_, printDBG, rrr, profile = utool.inject(__name__, '[TEST_SQL_NUMPY] ')
# list of 10,000 chips with 3,000 features apeice.
def grab_numpy_testdata(shape=(3e3, 128), dtype=np.uint8):
ndata = utool.get_argval('--ndata', type_=int, default=2)
print('[TEST] build ndata=%d numpy arrays with shape=%r' % (ndata, shape))
print(' * expected_memory(table_list) = %s' % utool.byte_str2(ndata * np.product(shape)))
table_list = [np.empty(shape, dtype=dtype) for i in range(ndata)]
print(' * memory+overhead(table_list) = %s' % utool.byte_str2(utool.get_object_size(table_list)))
return table_list
def TEST_SQL_NUMPY():
sqldb_fname = 'temp_test_sql_numpy.sqlite3'
sqldb_dpath = utool.util_cplat.get_app_resource_dir('ibeis', 'testfiles')
utool.ensuredir(sqldb_dpath)
utool.util_path.remove_file(join(sqldb_dpath, sqldb_fname), dryrun=False)
db = sqldbc.SQLDatabaseController(sqldb_dpath=sqldb_dpath,
sqldb_fname=sqldb_fname)
db.add_table('temp', [
('temp_id', 'INTEGER PRIMARY KEY'),
('temp_hash', 'NUMPY'),
])
tt = utool.tic()
feats_list = grab_numpy_testdata(shape=(3e3, 128), dtype=np.uint8)
print(' * numpy.new time=%r sec' % utool.toc(tt))
print('[TEST] insert numpy arrays')
tt = utool.tic()
feats_iter = ((feats, ) for feats in feats_list)
db.executemany(operation='''
INSERT
INTO temp
(
temp_hash
)
VALUES (?)
''', params_iter=feats_iter)
print(' * execute insert time=%r sec' % utool.toc(tt))
print('[TEST] save sql database')
tt = utool.tic()
#db.cur.commit()
db.connection.commit()
print(' * commit time=%r sec' % utool.toc(tt))
print('[TEST] read from sql database')
tt = utool.tic()
db.cur.execute('SELECT temp_hash FROM temp', [])
print(' * execute select time=%r sec' % utool.toc(tt))
tt = utool.tic()
result_list = _results_gen(db.cur)
print(' * iter results time=%r sec' % utool.toc(tt))
print(' * memory(result_list) = %s' % utool.byte_str2(utool.get_object_size(result_list)))
del result_list
#print('[TEST] result_list=%r' % result_list)
print('[TEST] dump sql database')
tt = utool.tic()
db.dump('temp.dump.txt')
print(' * dump time=%r sec' % utool.toc(tt))
#with open('temp.dump.txt') as file_:
# print(file_.read())
return locals()
if __name__ == '__main__':
import multiprocessing
multiprocessing.freeze_support() # For win32
test_locals = utool.run_test(TEST_SQL_NUMPY)
execstr = utool.execstr_dict(test_locals, 'test_locals')
exec(execstr)
| 34.574713 | 101 | 0.655918 | [
"Apache-2.0"
] | SU-ECE-17-7/ibeis | _broken/test_sql_numpy.py | 3,008 | Python |
"""
Tests for Markov Autoregression models
Author: Chad Fulton
License: BSD-3
"""
import warnings
import os
import numpy as np
from numpy.testing import assert_equal, assert_allclose
import pandas as pd
import pytest
from statsmodels.tools import add_constant
from statsmodels.tsa.regime_switching import markov_autoregression
current_path = os.path.dirname(os.path.abspath(__file__))
rgnp = [2.59316421, 2.20217133, 0.45827562, 0.9687438,
-0.24130757, 0.89647478, 2.05393219, 1.73353648,
0.93871289, -0.46477833, -0.80983406, -1.39763689,
-0.39886093, 1.1918416, 1.45620048, 2.11808228,
1.08957863, 1.32390273, 0.87296367, -0.19773273,
0.45420215, 0.07221876, 1.1030364, 0.82097489,
-0.05795795, 0.58447772, -1.56192672, -2.05041027,
0.53637183, 2.33676839, 2.34014559, 1.2339263,
1.8869648, -0.45920792, 0.84940469, 1.70139849,
-0.28756312, 0.09594627, -0.86080289, 1.03447127,
1.23685944, 1.42004502, 2.22410631, 1.30210173,
1.03517699, 0.9253425, -0.16559951, 1.3444382,
1.37500131, 1.73222184, 0.71605635, 2.21032143,
0.85333031, 1.00238776, 0.42725441, 2.14368343,
1.43789184, 1.57959926, 2.27469826, 1.95962656,
0.25992399, 1.01946914, 0.49016398, 0.5636338,
0.5959546, 1.43082857, 0.56230122, 1.15388393,
1.68722844, 0.77438205, -0.09647045, 1.39600146,
0.13646798, 0.55223715, -0.39944872, -0.61671102,
-0.08722561, 1.2101835, -0.90729755, 2.64916158,
-0.0080694, 0.51111895, -0.00401437, 2.16821432,
1.92586732, 1.03504717, 1.85897219, 2.32004929,
0.25570789, -0.09855274, 0.89073682, -0.55896485,
0.28350255, -1.31155407, -0.88278776, -1.97454941,
1.01275265, 1.68264723, 1.38271284, 1.86073637,
0.4447377, 0.41449001, 0.99202275, 1.36283576,
1.59970522, 1.98845816, -0.25684232, 0.87786949,
3.1095655, 0.85324478, 1.23337317, 0.00314302,
-0.09433369, 0.89883322, -0.19036628, 0.99772376,
-2.39120054, 0.06649673, 1.26136017, 1.91637838,
-0.3348029, 0.44207108, -1.40664911, -1.52129889,
0.29919869, -0.80197448, 0.15204792, 0.98585027,
2.13034606, 1.34397924, 1.61550522, 2.70930099,
1.24461412, 0.50835466, 0.14802167]
rec = [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]
def test_predict():
# AR(1) without mean, k_regimes=2
endog = np.ones(10)
with pytest.warns(FutureWarning):
markov_autoregression.MarkovAutoregression(
endog,
k_regimes=2,
order=1,
trend='nc'
)
mod = markov_autoregression.MarkovAutoregression(
endog, k_regimes=2, order=1, trend='n')
assert_equal(mod.nobs, 9)
assert_equal(mod.endog, np.ones(9))
params = np.r_[0.5, 0.5, 1., 0.1, 0.5]
mod_resid = mod._resid(params)
resids = np.zeros((2, 2, mod.nobs))
# Resids when: S_{t} = 0
resids[0, :, :] = np.ones(9) - 0.1 * np.ones(9)
assert_allclose(mod_resid[0, :, :], resids[0, :, :])
# Resids when: S_{t} = 1
resids[1, :, :] = np.ones(9) - 0.5 * np.ones(9)
assert_allclose(mod_resid[1, :, :], resids[1, :, :])
# AR(1) with mean, k_regimes=2
endog = np.arange(10)
mod = markov_autoregression.MarkovAutoregression(
endog, k_regimes=2, order=1)
assert_equal(mod.nobs, 9)
assert_equal(mod.endog, np.arange(1, 10))
params = np.r_[0.5, 0.5, 2., 3., 1., 0.1, 0.5]
mod_resid = mod._resid(params)
resids = np.zeros((2, 2, mod.nobs))
# Resids when: S_t = 0, S_{t-1} = 0
resids[0, 0, :] = (np.arange(1, 10) - 2.) - 0.1 * (np.arange(9) - 2.)
assert_allclose(mod_resid[0, 0, :], resids[0, 0, :])
# Resids when: S_t = 0, S_{t-1} = 1
resids[0, 1, :] = (np.arange(1, 10) - 2.) - 0.1 * (np.arange(9) - 3.)
assert_allclose(mod_resid[0, 1, :], resids[0, 1, :])
# Resids when: S_t = 1, S_{t-1} = 0
resids[1, 0, :] = (np.arange(1, 10) - 3.) - 0.5 * (np.arange(9) - 2.)
assert_allclose(mod_resid[1, 0, :], resids[1, 0, :])
# Resids when: S_t = 1, S_{t-1} = 1
resids[1, 1, :] = (np.arange(1, 10) - 3.) - 0.5 * (np.arange(9) - 3.)
assert_allclose(mod_resid[1, 1, :], resids[1, 1, :])
# AR(2) with mean, k_regimes=3
endog = np.arange(10)
mod = markov_autoregression.MarkovAutoregression(
endog, k_regimes=3, order=2)
assert_equal(mod.nobs, 8)
assert_equal(mod.endog, np.arange(2, 10))
params = np.r_[[0.3] * 6, 2., 3., 4, 1., 0.1, 0.5, 0.8, -0.05, -0.25, -0.4]
mod_resid = mod._resid(params)
resids = np.zeros((3, 3, 3, mod.nobs))
# Resids when: S_t = 0, S_{t-1} = 0, S_{t-2} = 0
resids[0, 0, 0, :] = (
(np.arange(2, 10) - 2.) -
0.1 * (np.arange(1, 9) - 2.) -
(-0.05) * (np.arange(8) - 2.))
assert_allclose(mod_resid[0, 0, 0, :], resids[0, 0, 0, :])
# Resids when: S_t = 1, S_{t-1} = 0, S_{t-2} = 0
resids[1, 0, 0, :] = (
(np.arange(2, 10) - 3.) -
0.5 * (np.arange(1, 9) - 2.) -
(-0.25) * (np.arange(8) - 2.))
assert_allclose(mod_resid[1, 0, 0, :], resids[1, 0, 0, :])
# Resids when: S_t = 0, S_{t-1} = 2, S_{t-2} = 1
resids[0, 2, 1, :] = (
(np.arange(2, 10) - 2.) -
0.1 * (np.arange(1, 9) - 4.) -
(-0.05) * (np.arange(8) - 3.))
assert_allclose(mod_resid[0, 2, 1, :], resids[0, 2, 1, :])
# AR(1) with mean + non-switching exog
endog = np.arange(10)
exog = np.r_[0.4, 5, 0.2, 1.2, -0.3, 2.5, 0.2, -0.7, 2., -1.1]
mod = markov_autoregression.MarkovAutoregression(
endog, k_regimes=2, order=1, exog=exog)
assert_equal(mod.nobs, 9)
assert_equal(mod.endog, np.arange(1, 10))
params = np.r_[0.5, 0.5, 2., 3., 1.5, 1., 0.1, 0.5]
mod_resid = mod._resid(params)
resids = np.zeros((2, 2, mod.nobs))
# Resids when: S_t = 0, S_{t-1} = 0
resids[0, 0, :] = (
(np.arange(1, 10) - 2. - 1.5 * exog[1:]) -
0.1 * (np.arange(9) - 2. - 1.5 * exog[:-1]))
assert_allclose(mod_resid[0, 0, :], resids[0, 0, :])
# Resids when: S_t = 0, S_{t-1} = 1
resids[0, 1, :] = (
(np.arange(1, 10) - 2. - 1.5 * exog[1:]) -
0.1 * (np.arange(9) - 3. - 1.5 * exog[:-1]))
assert_allclose(mod_resid[0, 1, :], resids[0, 1, :])
# Resids when: S_t = 1, S_{t-1} = 0
resids[1, 0, :] = (
(np.arange(1, 10) - 3. - 1.5 * exog[1:]) -
0.5 * (np.arange(9) - 2. - 1.5 * exog[:-1]))
assert_allclose(mod_resid[1, 0, :], resids[1, 0, :])
# Resids when: S_t = 1, S_{t-1} = 1
resids[1, 1, :] = (
(np.arange(1, 10) - 3. - 1.5 * exog[1:]) -
0.5 * (np.arange(9) - 3. - 1.5 * exog[:-1]))
assert_allclose(mod_resid[1, 1, :], resids[1, 1, :])
def test_conditional_loglikelihoods():
# AR(1) without mean, k_regimes=2, non-switching variance
endog = np.ones(10)
mod = markov_autoregression.MarkovAutoregression(
endog, k_regimes=2, order=1)
assert_equal(mod.nobs, 9)
assert_equal(mod.endog, np.ones(9))
params = np.r_[0.5, 0.5, 2., 3., 2., 0.1, 0.5]
resid = mod._resid(params)
conditional_likelihoods = (
np.exp(-0.5 * resid**2 / 2) / np.sqrt(2 * np.pi * 2))
assert_allclose(mod._conditional_loglikelihoods(params),
np.log(conditional_likelihoods))
# AR(1) without mean, k_regimes=3, switching variance
endog = np.ones(10)
mod = markov_autoregression.MarkovAutoregression(
endog, k_regimes=3, order=1, switching_variance=True)
assert_equal(mod.nobs, 9)
assert_equal(mod.endog, np.ones(9))
params = np.r_[[0.3]*6, 2., 3., 4., 1.5, 3., 4.5, 0.1, 0.5, 0.8]
mod_conditional_loglikelihoods = mod._conditional_loglikelihoods(params)
conditional_likelihoods = mod._resid(params)
# S_t = 0
conditional_likelihoods[0, :, :] = (
np.exp(-0.5 * conditional_likelihoods[0, :, :]**2 / 1.5) /
np.sqrt(2 * np.pi * 1.5))
assert_allclose(mod_conditional_loglikelihoods[0, :, :],
np.log(conditional_likelihoods[0, :, :]))
# S_t = 1
conditional_likelihoods[1, :, :] = (
np.exp(-0.5 * conditional_likelihoods[1, :, :]**2 / 3.) /
np.sqrt(2 * np.pi * 3.))
assert_allclose(mod_conditional_loglikelihoods[1, :, :],
np.log(conditional_likelihoods[1, :, :]))
# S_t = 2
conditional_likelihoods[2, :, :] = (
np.exp(-0.5 * conditional_likelihoods[2, :, :]**2 / 4.5) /
np.sqrt(2 * np.pi * 4.5))
assert_allclose(mod_conditional_loglikelihoods[2, :, :],
np.log(conditional_likelihoods[2, :, :]))
class MarkovAutoregression(object):
@classmethod
def setup_class(cls, true, endog, atol=1e-5, rtol=1e-7, **kwargs):
cls.model = markov_autoregression.MarkovAutoregression(endog, **kwargs)
cls.true = true
cls.result = cls.model.smooth(cls.true['params'])
cls.atol = atol
cls.rtol = rtol
def test_llf(self):
assert_allclose(self.result.llf, self.true['llf'], atol=self.atol,
rtol=self.rtol)
def test_fit(self, **kwargs):
# Test fitting against Stata
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res = self.model.fit(disp=False, **kwargs)
assert_allclose(res.llf, self.true['llf_fit'], atol=self.atol,
rtol=self.rtol)
@pytest.mark.smoke
def test_fit_em(self, **kwargs):
# Test EM fitting (smoke test)
res_em = self.model._fit_em(**kwargs)
assert_allclose(res_em.llf, self.true['llf_fit_em'], atol=self.atol,
rtol=self.rtol)
hamilton_ar2_short_filtered_joint_probabilities = np.array([
[[[4.99506987e-02, 6.44048275e-04, 6.22227140e-05,
4.45756755e-06, 5.26645567e-07, 7.99846146e-07,
1.19425705e-05, 6.87762063e-03],
[1.95930395e-02, 3.25884335e-04, 1.12955091e-04,
3.38537103e-04, 9.81927968e-06, 2.71696750e-05,
5.83828290e-03, 7.64261509e-02]],
[[1.97113193e-03, 9.50372207e-05, 1.98390978e-04,
1.88188953e-06, 4.83449400e-07, 1.14872860e-05,
4.02918239e-06, 4.35015431e-04],
[2.24870443e-02, 1.27331172e-03, 9.62155856e-03,
4.04178695e-03, 2.75516282e-04, 1.18179572e-02,
5.99778157e-02, 1.48149567e-01]]],
[[[6.70912859e-02, 1.84223872e-02, 2.55621792e-04,
4.48500688e-05, 7.80481515e-05, 2.73734559e-06,
7.59835896e-06, 1.42930726e-03],
[2.10053328e-02, 7.44036383e-03, 3.70388879e-04,
2.71878370e-03, 1.16152088e-03, 7.42182691e-05,
2.96490192e-03, 1.26774695e-02]],
[[8.09335679e-02, 8.31016518e-02, 2.49149080e-02,
5.78825626e-04, 2.19019941e-03, 1.20179130e-03,
7.83659430e-05, 2.76363377e-03],
[7.36967899e-01, 8.88697316e-01, 9.64463954e-01,
9.92270877e-01, 9.96283886e-01, 9.86863839e-01,
9.31117063e-01, 7.51241236e-01]]]])
hamilton_ar2_short_predicted_joint_probabilities = np.array([[
[[[1.20809334e-01, 3.76964436e-02, 4.86045844e-04,
4.69578023e-05, 3.36400588e-06, 3.97445190e-07,
6.03622290e-07, 9.01273552e-06],
[3.92723623e-02, 1.47863379e-02, 2.45936108e-04,
8.52441571e-05, 2.55484811e-04, 7.41034525e-06,
2.05042201e-05, 4.40599447e-03]],
[[4.99131230e-03, 1.48756005e-03, 7.17220245e-05,
1.49720314e-04, 1.42021122e-06, 3.64846209e-07,
8.66914462e-06, 3.04071516e-06],
[4.70476003e-02, 1.69703652e-02, 9.60933974e-04,
7.26113047e-03, 3.05022748e-03, 2.07924699e-04,
8.91869322e-03, 4.52636381e-02]]],
[[[4.99131230e-03, 6.43506069e-03, 1.76698327e-03,
2.45179642e-05, 4.30179435e-06, 7.48598845e-06,
2.62552503e-07, 7.28796600e-07],
[1.62256192e-03, 2.01472650e-03, 7.13642497e-04,
3.55258493e-05, 2.60772139e-04, 1.11407276e-04,
7.11864528e-06, 2.84378568e-04]],
[[5.97950448e-03, 7.76274317e-03, 7.97069493e-03,
2.38971340e-03, 5.55180599e-05, 2.10072977e-04,
1.15269812e-04, 7.51646942e-06],
[5.63621989e-02, 7.06862760e-02, 8.52394030e-02,
9.25065601e-02, 9.51736612e-02, 9.55585689e-02,
9.46550451e-02, 8.93080931e-02]]]],
[[[[3.92723623e-02, 1.22542551e-02, 1.58002431e-04,
1.52649118e-05, 1.09356167e-06, 1.29200377e-07,
1.96223855e-07, 2.92983500e-06],
[1.27665503e-02, 4.80670161e-03, 7.99482261e-05,
2.77109335e-05, 8.30522919e-05, 2.40893443e-06,
6.66545485e-06, 1.43228843e-03]],
[[1.62256192e-03, 4.83571884e-04, 2.33151963e-05,
4.86706634e-05, 4.61678312e-07, 1.18603191e-07,
2.81814142e-06, 9.88467229e-07],
[1.52941031e-02, 5.51667911e-03, 3.12377744e-04,
2.36042810e-03, 9.91559466e-04, 6.75915830e-05,
2.89926399e-03, 1.47141776e-02]]],
[[[4.70476003e-02, 6.06562252e-02, 1.66554040e-02,
2.31103828e-04, 4.05482745e-05, 7.05621631e-05,
2.47479309e-06, 6.86956236e-06],
[1.52941031e-02, 1.89906063e-02, 6.72672133e-03,
3.34863029e-04, 2.45801156e-03, 1.05011361e-03,
6.70996238e-05, 2.68052335e-03]],
[[5.63621989e-02, 7.31708248e-02, 7.51309569e-02,
2.25251946e-02, 5.23307566e-04, 1.98012644e-03,
1.08652148e-03, 7.08494735e-05],
[5.31264334e-01, 6.66281623e-01, 8.03457913e-01,
8.71957394e-01, 8.97097216e-01, 9.00725317e-01,
8.92208794e-01, 8.41808970e-01]]]]])
hamilton_ar2_short_smoothed_joint_probabilities = np.array([
[[[1.29898189e-02, 1.66298475e-04, 1.29822987e-05,
9.95268382e-07, 1.84473346e-07, 7.18761267e-07,
1.69576494e-05, 6.87762063e-03],
[5.09522472e-03, 8.41459714e-05, 2.35672254e-05,
7.55872505e-05, 3.43949612e-06, 2.44153330e-05,
8.28997024e-03, 7.64261509e-02]],
[[5.90021731e-04, 2.55342733e-05, 4.50698224e-05,
5.30734135e-07, 1.80741761e-07, 1.11483792e-05,
5.98539007e-06, 4.35015431e-04],
[6.73107901e-03, 3.42109009e-04, 2.18579464e-03,
1.13987259e-03, 1.03004157e-04, 1.14692946e-02,
8.90976350e-02, 1.48149567e-01]]],
[[[6.34648123e-02, 1.79187451e-02, 2.37462147e-04,
3.55542558e-05, 7.63980455e-05, 2.90520820e-06,
8.17644492e-06, 1.42930726e-03],
[1.98699352e-02, 7.23695477e-03, 3.44076057e-04,
2.15527721e-03, 1.13696383e-03, 7.87695658e-05,
3.19047276e-03, 1.26774695e-02]],
[[8.81925054e-02, 8.33092133e-02, 2.51106301e-02,
5.81007470e-04, 2.19065072e-03, 1.20221350e-03,
7.56893839e-05, 2.76363377e-03],
[8.03066603e-01, 8.90916999e-01, 9.72040418e-01,
9.96011175e-01, 9.96489179e-01, 9.87210535e-01,
8.99315113e-01, 7.51241236e-01]]]])
class TestHamiltonAR2Short(MarkovAutoregression):
# This is just a set of regression tests
@classmethod
def setup_class(cls):
true = {
'params': np.r_[0.754673, 0.095915, -0.358811, 1.163516,
np.exp(-0.262658)**2, 0.013486, -0.057521],
'llf': -10.14066,
'llf_fit': -4.0523073,
'llf_fit_em': -8.885836
}
super(TestHamiltonAR2Short, cls).setup_class(
true, rgnp[-10:], k_regimes=2, order=2, switching_ar=False)
def test_fit_em(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
super(TestHamiltonAR2Short, self).test_fit_em()
def test_filter_output(self, **kwargs):
res = self.result
# Filtered
assert_allclose(res.filtered_joint_probabilities,
hamilton_ar2_short_filtered_joint_probabilities)
# Predicted
desired = hamilton_ar2_short_predicted_joint_probabilities
if desired.ndim > res.predicted_joint_probabilities.ndim:
desired = desired.sum(axis=-2)
assert_allclose(res.predicted_joint_probabilities, desired)
def test_smoother_output(self, **kwargs):
res = self.result
# Filtered
assert_allclose(res.filtered_joint_probabilities,
hamilton_ar2_short_filtered_joint_probabilities)
# Predicted
desired = hamilton_ar2_short_predicted_joint_probabilities
if desired.ndim > res.predicted_joint_probabilities.ndim:
desired = desired.sum(axis=-2)
assert_allclose(res.predicted_joint_probabilities, desired)
# Smoothed, entry-by-entry
assert_allclose(
res.smoothed_joint_probabilities[..., -1],
hamilton_ar2_short_smoothed_joint_probabilities[..., -1])
assert_allclose(
res.smoothed_joint_probabilities[..., -2],
hamilton_ar2_short_smoothed_joint_probabilities[..., -2])
assert_allclose(
res.smoothed_joint_probabilities[..., -3],
hamilton_ar2_short_smoothed_joint_probabilities[..., -3])
assert_allclose(
res.smoothed_joint_probabilities[..., :-3],
hamilton_ar2_short_smoothed_joint_probabilities[..., :-3])
hamilton_ar4_filtered = [
0.776712, 0.949192, 0.996320, 0.990258, 0.940111, 0.537442,
0.140001, 0.008942, 0.048480, 0.614097, 0.910889, 0.995463,
0.979465, 0.992324, 0.984561, 0.751038, 0.776268, 0.522048,
0.814956, 0.821786, 0.472729, 0.673567, 0.029031, 0.001556,
0.433276, 0.985463, 0.995025, 0.966067, 0.998445, 0.801467,
0.960997, 0.996431, 0.461365, 0.199357, 0.027398, 0.703626,
0.946388, 0.985321, 0.998244, 0.989567, 0.984510, 0.986811,
0.793788, 0.973675, 0.984848, 0.990418, 0.918427, 0.998769,
0.977647, 0.978742, 0.927635, 0.998691, 0.988934, 0.991654,
0.999288, 0.999073, 0.918636, 0.987710, 0.966876, 0.910015,
0.826150, 0.969451, 0.844049, 0.941525, 0.993363, 0.949978,
0.615206, 0.970915, 0.787585, 0.707818, 0.200476, 0.050835,
0.140723, 0.809850, 0.086422, 0.990344, 0.785963, 0.817425,
0.659152, 0.996578, 0.992860, 0.948501, 0.996883, 0.999712,
0.906694, 0.725013, 0.963690, 0.386960, 0.241302, 0.009078,
0.015789, 0.000896, 0.541530, 0.928686, 0.953704, 0.992741,
0.935877, 0.918958, 0.977316, 0.987941, 0.987300, 0.996769,
0.645469, 0.921285, 0.999917, 0.949335, 0.968914, 0.886025,
0.777141, 0.904381, 0.368277, 0.607429, 0.002491, 0.227610,
0.871284, 0.987717, 0.288705, 0.512124, 0.030329, 0.005177,
0.256183, 0.020955, 0.051620, 0.549009, 0.991715, 0.987892,
0.995377, 0.999833, 0.993756, 0.956164, 0.927714]
hamilton_ar4_smoothed = [
0.968096, 0.991071, 0.998559, 0.958534, 0.540652, 0.072784,
0.010999, 0.006228, 0.172144, 0.898574, 0.989054, 0.998293,
0.986434, 0.993248, 0.976868, 0.858521, 0.847452, 0.675670,
0.596294, 0.165407, 0.035270, 0.127967, 0.007414, 0.004944,
0.815829, 0.998128, 0.998091, 0.993227, 0.999283, 0.921100,
0.977171, 0.971757, 0.124680, 0.063710, 0.114570, 0.954701,
0.994852, 0.997302, 0.999345, 0.995817, 0.996218, 0.994580,
0.933990, 0.996054, 0.998151, 0.996976, 0.971489, 0.999786,
0.997362, 0.996755, 0.993053, 0.999947, 0.998469, 0.997987,
0.999830, 0.999360, 0.953176, 0.992673, 0.975235, 0.938121,
0.946784, 0.986897, 0.905792, 0.969755, 0.995379, 0.914480,
0.772814, 0.931385, 0.541742, 0.394596, 0.063428, 0.027829,
0.124527, 0.286105, 0.069362, 0.995950, 0.961153, 0.962449,
0.945022, 0.999855, 0.998943, 0.980041, 0.999028, 0.999838,
0.863305, 0.607421, 0.575983, 0.013300, 0.007562, 0.000635,
0.001806, 0.002196, 0.803550, 0.972056, 0.984503, 0.998059,
0.985211, 0.988486, 0.994452, 0.994498, 0.998873, 0.999192,
0.870482, 0.976282, 0.999961, 0.984283, 0.973045, 0.786176,
0.403673, 0.275418, 0.115199, 0.257560, 0.004735, 0.493936,
0.907360, 0.873199, 0.052959, 0.076008, 0.001653, 0.000847,
0.062027, 0.021257, 0.219547, 0.955654, 0.999851, 0.997685,
0.998324, 0.999939, 0.996858, 0.969209, 0.927714]
class TestHamiltonAR4(MarkovAutoregression):
@classmethod
def setup_class(cls):
# Results from E-views:
# Dependent variable followed by a list of switching regressors:
# rgnp c
# List of non-switching regressors:
# ar(1) ar(2) ar(3) ar(4)
# Do not check "Regime specific error variances"
# Switching type: Markov
# Number of Regimes: 2
# Probability regressors:
# c
# Method SWITCHREG
# Sample 1951q1 1984q4
true = {
'params': np.r_[0.754673, 0.095915, -0.358811, 1.163516,
np.exp(-0.262658)**2, 0.013486, -0.057521,
-0.246983, -0.212923],
'llf': -181.26339,
'llf_fit': -181.26339,
'llf_fit_em': -183.85444,
'bse_oim': np.r_[.0965189, .0377362, .2645396, .0745187, np.nan,
.1199942, .137663, .1069103, .1105311, ]
}
super(TestHamiltonAR4, cls).setup_class(
true, rgnp, k_regimes=2, order=4, switching_ar=False)
def test_filtered_regimes(self):
res = self.result
assert_equal(len(res.filtered_marginal_probabilities[:, 1]),
self.model.nobs)
assert_allclose(res.filtered_marginal_probabilities[:, 1],
hamilton_ar4_filtered, atol=1e-5)
def test_smoothed_regimes(self):
res = self.result
assert_equal(len(res.smoothed_marginal_probabilities[:, 1]),
self.model.nobs)
assert_allclose(res.smoothed_marginal_probabilities[:, 1],
hamilton_ar4_smoothed, atol=1e-5)
def test_bse(self):
# Cannot compare middle element of bse because we estimate sigma^2
# rather than sigma
bse = self.result.cov_params_approx.diagonal()**0.5
assert_allclose(bse[:4], self.true['bse_oim'][:4], atol=1e-6)
assert_allclose(bse[6:], self.true['bse_oim'][6:], atol=1e-6)
class TestHamiltonAR2Switch(MarkovAutoregression):
# Results from Stata, see http://www.stata.com/manuals14/tsmswitch.pdf
@classmethod
def setup_class(cls):
path = os.path.join(current_path, 'results',
'results_predict_rgnp.csv')
results = pd.read_csv(path)
true = {
'params': np.r_[.3812383, .3564492, -.0055216, 1.195482,
.6677098**2, .3710719, .4621503, .7002937,
-.3206652],
'llf': -179.32354,
'llf_fit': -179.38684,
'llf_fit_em': -184.99606,
'bse_oim': np.r_[.1424841, .0994742, .2057086, .1225987, np.nan,
.1754383, .1652473, .187409, .1295937],
'smoothed0': results.iloc[3:]['switchar2_sm1'],
'smoothed1': results.iloc[3:]['switchar2_sm2'],
'predict0': results.iloc[3:]['switchar2_yhat1'],
'predict1': results.iloc[3:]['switchar2_yhat2'],
'predict_predicted': results.iloc[3:]['switchar2_pyhat'],
'predict_filtered': results.iloc[3:]['switchar2_fyhat'],
'predict_smoothed': results.iloc[3:]['switchar2_syhat'],
}
super(TestHamiltonAR2Switch, cls).setup_class(
true, rgnp, k_regimes=2, order=2)
def test_smoothed_marginal_probabilities(self):
assert_allclose(self.result.smoothed_marginal_probabilities[:, 0],
self.true['smoothed0'], atol=1e-6)
assert_allclose(self.result.smoothed_marginal_probabilities[:, 1],
self.true['smoothed1'], atol=1e-6)
def test_predict(self):
# Smoothed
actual = self.model.predict(
self.true['params'], probabilities='smoothed')
assert_allclose(actual, self.true['predict_smoothed'], atol=1e-6)
actual = self.model.predict(
self.true['params'], probabilities=None)
assert_allclose(actual, self.true['predict_smoothed'], atol=1e-6)
actual = self.result.predict(probabilities='smoothed')
assert_allclose(actual, self.true['predict_smoothed'], atol=1e-6)
actual = self.result.predict(probabilities=None)
assert_allclose(actual, self.true['predict_smoothed'], atol=1e-6)
def test_bse(self):
# Cannot compare middle element of bse because we estimate sigma^2
# rather than sigma
bse = self.result.cov_params_approx.diagonal()**0.5
assert_allclose(bse[:4], self.true['bse_oim'][:4], atol=1e-7)
assert_allclose(bse[6:], self.true['bse_oim'][6:], atol=1e-7)
hamilton_ar1_switch_filtered = [
0.840288, 0.730337, 0.900234, 0.596492, 0.921618, 0.983828,
0.959039, 0.898366, 0.477335, 0.251089, 0.049367, 0.386782,
0.942868, 0.965632, 0.982857, 0.897603, 0.946986, 0.916413,
0.640912, 0.849296, 0.778371, 0.954420, 0.929906, 0.723930,
0.891196, 0.061163, 0.004806, 0.977369, 0.997871, 0.977950,
0.896580, 0.963246, 0.430539, 0.906586, 0.974589, 0.514506,
0.683457, 0.276571, 0.956475, 0.966993, 0.971618, 0.987019,
0.916670, 0.921652, 0.930265, 0.655554, 0.965858, 0.964981,
0.976790, 0.868267, 0.983240, 0.852052, 0.919150, 0.854467,
0.987868, 0.935840, 0.958138, 0.979535, 0.956541, 0.716322,
0.919035, 0.866437, 0.899609, 0.914667, 0.976448, 0.867252,
0.953075, 0.977850, 0.884242, 0.688299, 0.968461, 0.737517,
0.870674, 0.559413, 0.380339, 0.582813, 0.941311, 0.240020,
0.999349, 0.619258, 0.828343, 0.729726, 0.991009, 0.966291,
0.899148, 0.970798, 0.977684, 0.695877, 0.637555, 0.915824,
0.434600, 0.771277, 0.113756, 0.144002, 0.008466, 0.994860,
0.993173, 0.961722, 0.978555, 0.789225, 0.836283, 0.940383,
0.968368, 0.974473, 0.980248, 0.518125, 0.904086, 0.993023,
0.802936, 0.920906, 0.685445, 0.666524, 0.923285, 0.643861,
0.938184, 0.008862, 0.945406, 0.990061, 0.991500, 0.486669,
0.805039, 0.089036, 0.025067, 0.863309, 0.352784, 0.733295,
0.928710, 0.984257, 0.926597, 0.959887, 0.984051, 0.872682,
0.824375, 0.780157]
hamilton_ar1_switch_smoothed = [
0.900074, 0.758232, 0.914068, 0.637248, 0.901951, 0.979905,
0.958935, 0.888641, 0.261602, 0.148761, 0.056919, 0.424396,
0.932184, 0.954962, 0.983958, 0.895595, 0.949519, 0.923473,
0.678898, 0.848793, 0.807294, 0.958868, 0.942936, 0.809137,
0.960892, 0.032947, 0.007127, 0.967967, 0.996551, 0.979278,
0.896181, 0.987462, 0.498965, 0.908803, 0.986893, 0.488720,
0.640492, 0.325552, 0.951996, 0.959703, 0.960914, 0.986989,
0.916779, 0.924570, 0.935348, 0.677118, 0.960749, 0.958966,
0.976974, 0.838045, 0.986562, 0.847774, 0.908866, 0.821110,
0.984965, 0.915302, 0.938196, 0.976518, 0.973780, 0.744159,
0.922006, 0.873292, 0.904035, 0.917547, 0.978559, 0.870915,
0.948420, 0.979747, 0.884791, 0.711085, 0.973235, 0.726311,
0.828305, 0.446642, 0.411135, 0.639357, 0.973151, 0.141707,
0.999805, 0.618207, 0.783239, 0.672193, 0.987618, 0.964655,
0.877390, 0.962437, 0.989002, 0.692689, 0.699370, 0.937934,
0.522535, 0.824567, 0.058746, 0.146549, 0.009864, 0.994072,
0.992084, 0.956945, 0.984297, 0.795926, 0.845698, 0.935364,
0.963285, 0.972767, 0.992168, 0.528278, 0.826349, 0.996574,
0.811431, 0.930873, 0.680756, 0.721072, 0.937977, 0.731879,
0.996745, 0.016121, 0.951187, 0.989820, 0.996968, 0.592477,
0.889144, 0.036015, 0.040084, 0.858128, 0.418984, 0.746265,
0.907990, 0.980984, 0.900449, 0.934741, 0.986807, 0.872818,
0.812080, 0.780157]
class TestHamiltonAR1Switch(MarkovAutoregression):
@classmethod
def setup_class(cls):
# Results from E-views:
# Dependent variable followed by a list of switching regressors:
# rgnp c ar(1)
# List of non-switching regressors: <blank>
# Do not check "Regime specific error variances"
# Switching type: Markov
# Number of Regimes: 2
# Probability regressors:
# c
# Method SWITCHREG
# Sample 1951q1 1984q4
true = {
'params': np.r_[0.85472458, 0.53662099, 1.041419, -0.479157,
np.exp(-0.231404)**2, 0.243128, 0.713029],
'llf': -186.7575,
'llf_fit': -186.7575,
'llf_fit_em': -189.25446
}
super(TestHamiltonAR1Switch, cls).setup_class(
true, rgnp, k_regimes=2, order=1)
def test_filtered_regimes(self):
assert_allclose(self.result.filtered_marginal_probabilities[:, 0],
hamilton_ar1_switch_filtered, atol=1e-5)
def test_smoothed_regimes(self):
assert_allclose(self.result.smoothed_marginal_probabilities[:, 0],
hamilton_ar1_switch_smoothed, atol=1e-5)
def test_expected_durations(self):
expected_durations = [6.883477, 1.863513]
assert_allclose(self.result.expected_durations, expected_durations,
atol=1e-5)
hamilton_ar1_switch_tvtp_filtered = [
0.999996, 0.999211, 0.999849, 0.996007, 0.999825, 0.999991,
0.999981, 0.999819, 0.041745, 0.001116, 1.74e-05, 0.000155,
0.999976, 0.999958, 0.999993, 0.999878, 0.999940, 0.999791,
0.996553, 0.999486, 0.998485, 0.999894, 0.999765, 0.997657,
0.999619, 0.002853, 1.09e-05, 0.999884, 0.999996, 0.999997,
0.999919, 0.999987, 0.989762, 0.999807, 0.999978, 0.050734,
0.010660, 0.000217, 0.006174, 0.999977, 0.999954, 0.999995,
0.999934, 0.999867, 0.999824, 0.996783, 0.999941, 0.999948,
0.999981, 0.999658, 0.999994, 0.999753, 0.999859, 0.999330,
0.999993, 0.999956, 0.999970, 0.999996, 0.999991, 0.998674,
0.999869, 0.999432, 0.999570, 0.999600, 0.999954, 0.999499,
0.999906, 0.999978, 0.999712, 0.997441, 0.999948, 0.998379,
0.999578, 0.994745, 0.045936, 0.006816, 0.027384, 0.000278,
1.000000, 0.996382, 0.999541, 0.998130, 0.999992, 0.999990,
0.999860, 0.999986, 0.999997, 0.998520, 0.997777, 0.999821,
0.033353, 0.011629, 6.95e-05, 4.52e-05, 2.04e-06, 0.999963,
0.999977, 0.999949, 0.999986, 0.999240, 0.999373, 0.999858,
0.999946, 0.999972, 0.999991, 0.994039, 0.999817, 0.999999,
0.999715, 0.999924, 0.997763, 0.997944, 0.999825, 0.996592,
0.695147, 0.000161, 0.999665, 0.999928, 0.999988, 0.992742,
0.374214, 0.001569, 2.16e-05, 0.000941, 4.32e-05, 0.000556,
0.999955, 0.999993, 0.999942, 0.999973, 0.999999, 0.999919,
0.999438, 0.998738]
hamilton_ar1_switch_tvtp_smoothed = [
0.999997, 0.999246, 0.999918, 0.996118, 0.999740, 0.999990,
0.999984, 0.999783, 0.035454, 0.000958, 1.53e-05, 0.000139,
0.999973, 0.999939, 0.999994, 0.999870, 0.999948, 0.999884,
0.997243, 0.999668, 0.998424, 0.999909, 0.999860, 0.998037,
0.999559, 0.002533, 1.16e-05, 0.999801, 0.999993, 0.999997,
0.999891, 0.999994, 0.990096, 0.999753, 0.999974, 0.048495,
0.009289, 0.000542, 0.005991, 0.999974, 0.999929, 0.999995,
0.999939, 0.999880, 0.999901, 0.996221, 0.999937, 0.999935,
0.999985, 0.999450, 0.999995, 0.999768, 0.999897, 0.998930,
0.999992, 0.999949, 0.999954, 0.999995, 0.999994, 0.998687,
0.999902, 0.999547, 0.999653, 0.999538, 0.999966, 0.999485,
0.999883, 0.999982, 0.999831, 0.996940, 0.999968, 0.998678,
0.999780, 0.993895, 0.055372, 0.020421, 0.022913, 0.000127,
1.000000, 0.997072, 0.999715, 0.996893, 0.999990, 0.999991,
0.999811, 0.999978, 0.999998, 0.999100, 0.997866, 0.999787,
0.034912, 0.009932, 5.91e-05, 3.99e-05, 1.77e-06, 0.999954,
0.999976, 0.999932, 0.999991, 0.999429, 0.999393, 0.999845,
0.999936, 0.999961, 0.999995, 0.994246, 0.999570, 1.000000,
0.999702, 0.999955, 0.998611, 0.998019, 0.999902, 0.998486,
0.673991, 0.000205, 0.999627, 0.999902, 0.999994, 0.993707,
0.338707, 0.001359, 2.36e-05, 0.000792, 4.47e-05, 0.000565,
0.999932, 0.999993, 0.999931, 0.999950, 0.999999, 0.999940,
0.999626, 0.998738]
expected_durations = [
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [1.223309, 1864.084],
[1.223309, 1864.084], [1.223309, 1864.084], [1.223309, 1864.084],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [1.223309, 1864.084], [1.223309, 1864.084],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [1.223309, 1864.084],
[1.223309, 1864.084], [1.223309, 1864.084], [1.223309, 1864.084],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [1.223309, 1864.084],
[1.223309, 1864.084], [1.223309, 1864.084], [1.223309, 1864.084],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[1.223309, 1864.084], [1.223309, 1864.084], [1.223309, 1864.084],
[1.223309, 1864.084], [1.223309, 1864.084], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[1.223309, 1864.084], [1.223309, 1864.084], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[1.223309, 1864.084], [1.223309, 1864.084], [1.223309, 1864.084],
[1.223309, 1864.084], [1.223309, 1864.084], [1.223309, 1864.084],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391]]
class TestHamiltonAR1SwitchTVTP(MarkovAutoregression):
@classmethod
def setup_class(cls):
# Results from E-views:
# Dependent variable followed by a list of switching regressors:
# rgnp c ar(1)
# List of non-switching regressors: <blank>
# Do not check "Regime specific error variances"
# Switching type: Markov
# Number of Regimes: 2
# Probability regressors:
# c recession
# Method SWITCHREG
# Sample 1951q1 1984q4
true = {
'params': np.r_[6.564923, 7.846371, -8.064123, -15.37636,
1.027190, -0.719760,
np.exp(-0.217003)**2, 0.161489, 0.022536],
'llf': -163.914049,
'llf_fit': -161.786477,
'llf_fit_em': -163.914049
}
exog_tvtp = np.c_[np.ones(len(rgnp)), rec]
super(TestHamiltonAR1SwitchTVTP, cls).setup_class(
true, rgnp, k_regimes=2, order=1, exog_tvtp=exog_tvtp)
@pytest.mark.skip # TODO(ChadFulton): give reason for skip
def test_fit_em(self):
pass
def test_filtered_regimes(self):
assert_allclose(self.result.filtered_marginal_probabilities[:, 0],
hamilton_ar1_switch_tvtp_filtered, atol=1e-5)
def test_smoothed_regimes(self):
assert_allclose(self.result.smoothed_marginal_probabilities[:, 0],
hamilton_ar1_switch_tvtp_smoothed, atol=1e-5)
def test_expected_durations(self):
assert_allclose(self.result.expected_durations, expected_durations,
rtol=1e-5, atol=1e-7)
class TestFilardo(MarkovAutoregression):
@classmethod
def setup_class(cls):
path = os.path.join(current_path, 'results', 'mar_filardo.csv')
cls.mar_filardo = pd.read_csv(path)
true = {
'params': np.r_[4.35941747, -1.6493936, 1.7702123, 0.9945672,
0.517298, -0.865888,
np.exp(-0.362469)**2,
0.189474, 0.079344, 0.110944, 0.122251],
'llf': -586.5718,
'llf_fit': -586.5718,
'llf_fit_em': -586.5718
}
endog = cls.mar_filardo['dlip'].iloc[1:].values
exog_tvtp = add_constant(
cls.mar_filardo['dmdlleading'].iloc[:-1].values)
super(TestFilardo, cls).setup_class(
true, endog, k_regimes=2, order=4, switching_ar=False,
exog_tvtp=exog_tvtp)
@pytest.mark.skip # TODO(ChadFulton): give reason for skip
def test_fit(self, **kwargs):
pass
@pytest.mark.skip # TODO(ChadFulton): give reason for skip
def test_fit_em(self):
pass
def test_filtered_regimes(self):
assert_allclose(self.result.filtered_marginal_probabilities[:, 0],
self.mar_filardo['filtered_0'].iloc[5:], atol=1e-5)
def test_smoothed_regimes(self):
assert_allclose(self.result.smoothed_marginal_probabilities[:, 0],
self.mar_filardo['smoothed_0'].iloc[5:], atol=1e-5)
def test_expected_durations(self):
assert_allclose(self.result.expected_durations,
self.mar_filardo[['duration0', 'duration1']].iloc[5:],
rtol=1e-5, atol=1e-7)
class TestFilardoPandas(MarkovAutoregression):
@classmethod
def setup_class(cls):
path = os.path.join(current_path, 'results', 'mar_filardo.csv')
cls.mar_filardo = pd.read_csv(path)
cls.mar_filardo.index = pd.date_range('1948-02-01', '1991-04-01',
freq='MS')
true = {
'params': np.r_[4.35941747, -1.6493936, 1.7702123, 0.9945672,
0.517298, -0.865888,
np.exp(-0.362469)**2,
0.189474, 0.079344, 0.110944, 0.122251],
'llf': -586.5718,
'llf_fit': -586.5718,
'llf_fit_em': -586.5718
}
endog = cls.mar_filardo['dlip'].iloc[1:]
exog_tvtp = add_constant(
cls.mar_filardo['dmdlleading'].iloc[:-1])
super(TestFilardoPandas, cls).setup_class(
true, endog, k_regimes=2, order=4, switching_ar=False,
exog_tvtp=exog_tvtp)
@pytest.mark.skip # TODO(ChadFulton): give reason for skip
def test_fit(self, **kwargs):
pass
@pytest.mark.skip # TODO(ChadFulton): give reason for skip
def test_fit_em(self):
pass
def test_filtered_regimes(self):
assert_allclose(self.result.filtered_marginal_probabilities[0],
self.mar_filardo['filtered_0'].iloc[5:], atol=1e-5)
def test_smoothed_regimes(self):
assert_allclose(self.result.smoothed_marginal_probabilities[0],
self.mar_filardo['smoothed_0'].iloc[5:], atol=1e-5)
def test_expected_durations(self):
assert_allclose(self.result.expected_durations,
self.mar_filardo[['duration0', 'duration1']].iloc[5:],
rtol=1e-5, atol=1e-7)
| 45.644983 | 79 | 0.59401 | [
"BSD-3-Clause"
] | AKSoo/statsmodels | statsmodels/tsa/regime_switching/tests/test_markov_autoregression.py | 41,400 | Python |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from LovaszSoftmax.pytorch.lovasz_losses import lovasz_hinge
class SoftDiceLoss(nn.Module):
def __init__(self):
super(SoftDiceLoss, self).__init__()
def forward(self, input, target):
smooth = 1e-5
input = F.sigmoid(input)
num = target.size(0)
input = input.view(num, -1)
target = target.view(num, -1)
intersection = (input * target)
score = (2. * intersection.sum(1) + smooth) / (input.sum(1) + target.sum(1) + smooth)
score = 1 - score.sum() / num
return score
class BCEDiceLoss(nn.Module):
def __init__(self):
super(BCEDiceLoss, self).__init__()
def forward(self, input, target):
bce = F.binary_cross_entropy_with_logits(input, target)
smooth = 1e-5
input = F.sigmoid(input)
num = target.size(0)
input = input.view(num, -1)
target = target.view(num, -1)
intersection = (input * target)
dice = (2. * intersection.sum(1) + smooth) / (input.sum(1) + target.sum(1) + smooth)
dice = 1 - dice.sum() / num
return 0.5 * bce + 0.5 * dice
class LovaszHingeLoss(nn.Module):
def __init__(self):
super(LovaszHingeLoss, self).__init__()
def forward(self, input, target):
input = input.squeeze(1)
target = target.squeeze(1)
loss = lovasz_hinge(input, target, per_image=True)
return loss
class DSVLovaszHingeLoss(nn.Module):
def __init__(self):
super(DSVLovaszHingeLoss, self).__init__()
def forward(self, input, target):
for i in range(target.shape[0]):
if not torch.sum(target[i]).data.cpu().numpy() > 1:
target[i] = -1
input = input.squeeze(1)
target = target.squeeze(1)
loss = lovasz_hinge(input, target, per_image=True, ignore=-1)
return loss
| 28.705882 | 93 | 0.603996 | [
"MIT"
] | 4uiiurz1/kaggle-tgs-salt-identification-challenge | losses.py | 1,952 | Python |
from inspect import cleandoc
from setuptools import setup
_version = {}
exec(open('yamlschema/_version.py').read(), _version)
setup(
name = 'yamlschema',
packages = ['yamlschema', 'yamlschema.test'],
version = _version['__version__'],
description = 'A schema validator for YAML files',
author = 'Ashley Fisher',
author_email = '[email protected]',
url = 'https://github.com/Brightmd/yamlschema',
keywords = ['yaml', 'schema'],
classifiers = [],
scripts = ['bin/yamlschema'],
install_requires=cleandoc('''
click>=5.0,<8.0
jsonschema==2.6.0
''').split()
)
| 22.846154 | 53 | 0.6633 | [
"MIT"
] | Brightmd/yamlschema | setup.py | 594 | Python |
import tensorflow as tf
# DISCLAIMER:
# Parts of this code file were originally forked from
# https://github.com/tkipf/gcn
# which itself was very inspired by the keras package
def masked_logit_cross_entropy(preds, labels, mask):
"""Logit cross-entropy loss with masking."""
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=preds, labels=labels)
loss = tf.reduce_sum(input_tensor=loss, axis=1)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.maximum(tf.reduce_sum(input_tensor=mask), tf.constant([1.]))
loss *= mask
return tf.reduce_mean(input_tensor=loss)
def masked_softmax_cross_entropy(preds, labels, mask):
"""Softmax cross-entropy loss with masking."""
loss = tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=tf.stop_gradient(labels))
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.maximum(tf.reduce_sum(input_tensor=mask), tf.constant([1.]))
loss *= mask
return tf.reduce_mean(input_tensor=loss)
def masked_l2(preds, actuals, mask):
"""L2 loss with masking."""
loss = tf.nn.l2_loss(preds, actuals)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.reduce_mean(input_tensor=mask)
loss *= mask
return tf.reduce_mean(input_tensor=loss)
def masked_accuracy(preds, labels, mask):
"""Accuracy with masking."""
correct_prediction = tf.equal(tf.argmax(input=preds, axis=1), tf.argmax(input=labels, axis=1))
accuracy_all = tf.cast(correct_prediction, tf.float32)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.reduce_mean(input_tensor=mask)
accuracy_all *= mask
return tf.reduce_mean(input_tensor=accuracy_all)
| 41.04878 | 99 | 0.7041 | [
"MIT"
] | gelareh1985/GraphSAGE | graphsage/metrics.py | 1,683 | Python |
import requests
import re
import random
import time
from bs4 import BeautifulSoup
import os
import lpmysql
import json
def getindex():
url = 'http://freeget.co'
headers = {'User-Agent': "Mozilla/5.0 (iPhone; CPU iPhone OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1",
"contentType":"text/html;charset=utf-8",
# Accept:*/*
# Accept-Encoding:gzip, deflate
# Accept-Language:zh-CN,zh;q=0.8
# Connection:keep-alive
}
html = requests.get(url,headers=headers) ##这儿更改了一下(是不是发现 self 没见了?)
print(html.content)
print(dir(html))
print(html.headers)
def getbtn():
url = 'http://freeget.co/video/extraction'
headers = {'User-Agent': "Mozilla/5.0 (iPhone; CPU iPhone OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1",
"contentType":"text/html;charset=utf-8",
# "X - CSRFToken":"1504169114##45f7200f8dba99432cc422ed552b3bbf3baff85b",
"X - Requested - With": "XMLHttpRequest",
# X - CSRFToken: 1504164180 ##fdbd5ae5ec0c76632937754c20e90c582f2f7c28
# X - Requested - With: XMLHttpRequest
# Accept:*/*
# Accept-Encoding:gzip, deflate
# Accept-Language:zh-CN,zh;q=0.8
# Connection:keep-alive
}
payload = {"url":"1111111111111111111111111111111111111111111111","_csrf" : "1504169114##45f7200f8dba99432cc422ed552b3bbf3baff85b"}
html = requests.post(url,headers=headers,data=payload) ##这儿更改了一下(是不是发现 self 没见了?)
print(html.content)
print(dir(html))
print(html.headers)
# getindex()
getbtn()
# http://www.cnblogs.com/xwang/p/3757711.html
# pythonrequests 设置CSRF
# http://blog.csdn.net/u011061889/article/details/72904821
| 30.269841 | 165 | 0.635553 | [
"BSD-2-Clause"
] | leifufeng/free91 | freeget.py | 1,983 | Python |
"""Python Interface for Residue-Residue Contact Predictions"""
import os
import sys
from distutils.command.build import build
from distutils.util import convert_path
from setuptools import setup, Extension
from Cython.Distutils import build_ext
import numpy as np
# ==============================================================
# Setup.py command extensions
# ==============================================================
# Credits to http://stackoverflow.com/a/33181352
class BuildCommand(build):
user_options = build.user_options + [
("script-python-path=", None, "Path to Python interpreter to be included in the scripts")
]
def initialize_options(self):
build.initialize_options(self)
self.script_python_path = None
def finalize_options(self):
build.finalize_options(self)
def run(self):
global script_python_path
script_python_path = self.script_python_path
build.run(self)
# ==============================================================
# Functions, functions, functions ...
# ==============================================================
def dependencies():
with open("requirements.txt", "r") as f_in:
deps = f_in.read().splitlines()
return deps
def extensions():
exts = ["conkit/core/ext/c_contactmap.pyx", "conkit/core/ext/c_sequencefile.pyx", "conkit/misc/ext/c_bandwidth.pyx"]
extensions = []
for ext in exts:
extensions.append(
Extension(
ext.replace('/', '.').rsplit('.', 1)[0],
[ext],
include_dirs=[np.get_include()],
))
return extensions
def readme():
with open("README.rst", "r") as f_in:
return f_in.read()
def scripts():
extension = ".bat" if sys.platform.startswith("win") else ""
header = "" if sys.platform.startswith("win") else "#!/bin/sh"
bin_dir = "bin"
command_dir = convert_path("conkit/command_line")
scripts = []
for file in os.listdir(command_dir):
if not file.startswith("_") and file.endswith(".py"):
# Make sure we have a workable name
f_name = os.path.basename(file).rsplit(".", 1)[0]
for c in [".", "_"]:
new_f_name = f_name.replace(c, "-")
# Write the content of the script
script = os.path.join(bin_dir, new_f_name + extension)
with open(script, "w") as f_out:
f_out.write(header + os.linesep)
# BATCH file
if sys.platform.startswith("win"):
string = "@{0} -m conkit.command_line.{1} %*"
# BASH file
else:
string = '{0} -m conkit.command_line.{1} "$@"'
f_out.write(string.format(PYTHON_EXE, f_name) + os.linesep)
os.chmod(script, 0o777)
scripts.append(script)
return scripts
def version():
# Credits to http://stackoverflow.com/a/24517154
main_ns = {}
ver_path = convert_path("conkit/version.py")
with open(ver_path) as f_in:
exec(f_in.read(), main_ns)
return main_ns["__version__"]
# ==============================================================
# Determine the Python executable
# ==============================================================
PYTHON_EXE = None
for arg in sys.argv:
if arg[0:20] == "--script-python-path" and len(arg) == 20:
option, value = arg, sys.argv[sys.argv.index(arg) + 1]
PYTHON_EXE = value
elif arg[0:20] == "--script-python-path" and arg[20] == "=":
option, value = arg[:20], arg[21:]
PYTHON_EXE = value
if not PYTHON_EXE:
PYTHON_EXE = sys.executable
# ==============================================================
# Define all the relevant options
# ==============================================================
AUTHOR = "Felix Simkovic"
AUTHOR_EMAIL = "[email protected]"
DESCRIPTION = __doc__.replace("\n", "")
DEPENDENCIES = dependencies()
EXT_MODULES = extensions()
LICENSE = "BSD License"
LONG_DESCRIPTION = readme()
PACKAGE_DIR = "conkit"
PACKAGE_NAME = "conkit"
PLATFORMS = ["POSIX", "Mac OS", "Windows", "Unix"]
SCRIPTS = scripts()
URL = "http://www.conkit.org/en/latest/"
VERSION = version()
PACKAGES = [
"conkit",
"conkit/applications",
"conkit/command_line",
"conkit/core",
"conkit/core/ext",
"conkit/io",
"conkit/misc",
"conkit/misc/ext",
"conkit/plot",
]
CLASSIFIERS = [
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Scientific/Engineering :: Bio-Informatics",
]
TEST_REQUIREMENTS = [
"codecov",
"coverage",
"pytest",
"pytest-cov",
"pytest-pep8",
"pytest-helpers-namespace",
]
setup(
cmdclass={
'build': BuildCommand,
'build_ext': build_ext,
},
author=AUTHOR,
author_email=AUTHOR_EMAIL,
name=PACKAGE_NAME,
description=DESCRIPTION,
ext_modules=EXT_MODULES,
include_dirs=[np.get_include()],
long_description=LONG_DESCRIPTION,
license=LICENSE,
version=VERSION,
url=URL,
packages=PACKAGES,
package_dir={PACKAGE_NAME: PACKAGE_DIR},
scripts=SCRIPTS,
platforms=PLATFORMS,
classifiers=CLASSIFIERS,
install_requires=DEPENDENCIES,
tests_require=TEST_REQUIREMENTS,
setup_requires=['pytest-runner'],
include_package_data=True,
zip_safe=False,
)
| 29.298969 | 120 | 0.567734 | [
"BSD-3-Clause"
] | fsimkovic/cptbx | setup.py | 5,684 | Python |
"""
Django settings for mymedicalassistant project.
Generated by 'django-admin startproject' using Django 3.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'va(i+)gea_&5z@=q%_-d7&ezvkqhq9#$sq1_oco8k!n#2yl!7&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'apptSchedule',
'clinicInformation',
# 'medInformation',
'medSchedule',
'user',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mymedicalassistant.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mymedicalassistant.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
# STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES':[
'rest_framework.permissions.AllowAny',
]
}
# ??????do we need this for all the models or just for the user models!!!??????
AUTH_USER_MODEL = 'user.CustomUser'
LOGIN_REDIRECT_URL = 'home'
LOGOUT_REDIRECT_URL = 'home'
| 25.631206 | 91 | 0.696458 | [
"MIT"
] | MyMedicalAssistant/MyMedicalAssistant | mymedicalassistant/settings.py | 3,614 | Python |
from deeplearning import logger, tf_util as U
import tensorflow as tf
from rl.runner import Runner
from rl.vec_env.subproc_vec_env import SubprocVecEnv
from collections import namedtuple
import os, time
class RLExperiment(U.Experiment):
def load_env_fn(self):
fname = os.path.join(self.logdir, 'checkpoints/env_fn.pkl')
assert os.path.exists(fname), "No env function saved."
return U.load(fname)
def save_env_fn(self, env_fn):
fname = os.path.join(self.logdir, 'checkpoints/env_fn.pkl')
U.save(fname, env_fn)
class OnlineRLAlgorithm(object):
def __init__(self, logdir, env_fn, model_fn, nenv, rollout_length, batch_size, callback=None, runner_flags=[], **kwargs):
self.exp = RLExperiment(logdir)
self.exp.save_model_fn(model_fn)
self.exp.save_env_fn(env_fn)
logger.configure(os.path.join(logdir, 'logs'), ['stdout', 'log', 'json'])
self.logdir = logdir
self.batch_size = batch_size
self.rollout_length = rollout_length
self.args = namedtuple('Args', kwargs.keys())(**kwargs)
self.nenv = nenv
self.timesteps_per_step = self.nenv * self.rollout_length
self.env = self._make_env(env_fn, nenv)
self.actor = model_fn(self.env)
self.actor.build('model', self.nenv, 1)
self.loss = self._def_loss(model_fn, self.env)
self.opt = self._def_opt(self.loss)
self.opt.build('model', self.nenv, batch_size, reuse=tf.AUTO_REUSE)
self.runner = Runner(self.env, self.actor, rollout_length, *runner_flags)
self.callback = callback
if callback is not None:
assert callable(callback)
self.init_session()
self.load()
def _make_env(self, env_fn, nenv):
def make_env(rank):
def _env():
return env_fn(rank)
return _env
return SubprocVecEnv([make_env(i) for i in range(nenv)])
def _def_loss(self, model_fn, env):
"""
returns a module for and the loss
"""
raise NotImplementedError
def _def_opt(self, loss):
"""
returns a module for and the optimizer
"""
raise NotImplementedError
def _before_step(self):
pass
def _process_rollout(self, rollout):
raise NotImplementedError
def _update_model(self, data):
raise NotImplementedError
def _after_step(self, rollout, data, update_out):
pass
def step(self):
if self.callback is not None:
self.callback(locals(), globals())
self._before_step()
rollout = self.runner.rollout()
self.t += self.timesteps_per_step
data = self._process_rollout(rollout)
outs = self._update_model(data)
self._after_step(rollout, data, outs)
def train(self, maxtimesteps=None, maxseconds=None, save_freq=None):
assert maxtimesteps is not None or maxseconds is not None
start_time = time.time()
while True:
if maxtimesteps is not None and self.t >= maxtimesteps:
break
if maxseconds is not None and time.time() - start_time >= maxtimesteps:
break
t = self.t
self.step()
if save_freq and t // save_freq != self.t // save_freq:
self.save()
self.save()
def save(self):
self.exp.save(self.t)
def load(self, t=None):
self.t = self.exp.load(t)
def init_session(self):
if tf.get_default_session() is None:
U.make_session().__enter__()
U.initialize()
def close(self):
if hasattr(self.env, 'close'):
self.env.close()
tf.get_default_session().__exit__(None, None, None)
logger.reset()
if __name__=='__main__':
from deeplearning.layers import Adam, Placeholder
from deeplearning.module import Module
from rl.rl_module import Policy
import tensorflow as tf
import gym
from rl import util
class TestAlg(OnlineRLAlgorithm):
def _def_loss(self, model_fn):
class Ent(Module):
def _build(self, inputs):
return self.modules[0]._entropy
return Ent('l', model_fn(self.env))
def _def_opt(self, loss):
return Adam('opt', loss)
def _before_step(self):
logger.log("Before Step")
def _process_rollout(self, rollout):
return rollout.numpy()
def _update_model(self, data):
self.opt.update(util.swap01andflatten(data['obs']))
def _after_step(self, rollout, data, update_outs):
logger.log("After Step")
def model_fn(env):
x = Placeholder(tf.float32, env.observation_space.shape, 'x')
return Policy('pi', x, ac_space=env.action_space)
def env_fn(rank):
env = gym.make('CartPole-v1')
env.seed(rank)
return env
alg = TestAlg('./test_logs', env_fn, model_fn, 2, 64, 64)
alg.train(1024, save_freq=128)
| 30.190476 | 125 | 0.614353 | [
"MIT"
] | cbschaff/nlimb | rl/algorithms/core.py | 5,072 | Python |
import datetime
import logging
import time
import dataset
import discord
import privatebinapi
from discord.ext import commands
from discord.ext.commands import Cog, Bot
from discord_slash import cog_ext, SlashContext
from discord_slash.model import SlashCommandPermissionType
from discord_slash.utils.manage_commands import create_option, create_permission
import utils.duration
from cogs.commands import settings
from utils import database
from utils import embeds
from utils.moderation import can_action_member
from utils.record import record_usage
# Enabling logs
log = logging.getLogger(__name__)
class MuteCog(Cog):
""" Mute Cog """
def __init__(self, bot):
self.bot = bot
@staticmethod
async def mute_member(ctx: SlashContext, member: discord.Member, reason: str, temporary: bool = False, end_time: float = None) -> None:
role = discord.utils.get(ctx.guild.roles, id=settings.get_value("role_muted"))
await member.add_roles(role, reason=reason)
# Open a connection to the database.
db = dataset.connect(database.get_db())
# Add the mute to the mod_log database.
db["mod_logs"].insert(dict(
user_id=member.id, mod_id=ctx.author.id, timestamp=int(time.time()), reason=reason, type="mute"
))
# Occurs when the duration parameter in /mute is specified (tempmute).
if temporary:
db["timed_mod_actions"].insert(dict(
user_id=member.id,
mod_id=ctx.author.id,
action_type="mute",
reason=reason,
start_time=datetime.datetime.now(tz=datetime.timezone.utc).timestamp(),
end_time=end_time,
is_done=False
))
# Commit the changes to the database.
db.commit()
db.close()
async def unmute_member(self, member: discord.Member, reason: str, ctx: SlashContext = None, guild: discord.Guild = None) -> None:
guild = guild or ctx.guild
moderator = ctx.author if ctx else self.bot.user
# Removes "Muted" role from member.
role = discord.utils.get(guild.roles, id=settings.get_value("role_muted"))
await member.remove_roles(role, reason=reason)
# Open a connection to the database.
db = dataset.connect(database.get_db())
# Add the unmute to the mod_log database.
db["mod_logs"].insert(dict(
user_id=member.id, mod_id=moderator.id, timestamp=int(time.time()), reason=reason, type="unmute"
))
tempmute_entry = db["timed_mod_actions"].find_one(user_id=member.id, is_done=False)
if tempmute_entry:
db["timed_mod_actions"].update(dict(id=tempmute_entry["id"], is_done=True), ["id"])
# Commit the changes to the database and close the connection.
db.commit()
db.close()
@staticmethod
async def is_user_muted(ctx: SlashContext, member: discord.Member) -> bool:
if discord.utils.get(ctx.guild.roles, id=settings.get_value("role_muted")) in member.roles:
return True
return False
@staticmethod
async def send_muted_dm_embed(ctx: SlashContext, member: discord.Member, channel: discord.TextChannel, reason: str = None, duration: str = None) -> bool:
if not duration:
duration = "Indefinite"
try: # In case user has DMs blocked.
dm_channel = await member.create_dm()
embed = embeds.make_embed(
author=False,
title=f"Uh-oh, you've been muted!",
description="If you believe this was a mistake, contact staff.",
color=0x8083b0
)
embed.add_field(name="Server:", value=f"[{ctx.guild}](https://discord.gg/piracy/)", inline=True)
embed.add_field(name="Moderator:", value=ctx.author.mention, inline=True)
embed.add_field(name="Length:", value=duration, inline=True)
embed.add_field(name="Mute Channel:", value=channel.mention, inline=True)
embed.add_field(name="Reason:", value=reason, inline=False)
embed.set_image(url="https://i.imgur.com/840Q48l.gif")
await dm_channel.send(embed=embed)
return True
except discord.HTTPException:
return False
async def send_unmuted_dm_embed(self, member: discord.Member, reason: str, ctx: SlashContext = None, guild: discord.Guild = None) -> bool:
guild = guild or ctx.guild
moderator = ctx.author if ctx else self.bot.user
# Send member message telling them that they were unmuted and why.
try: # In case user has DMs blocked.
channel = await member.create_dm()
embed = embeds.make_embed(
author=False,
title=f"Yay, you've been unmuted!",
description="Review our server rules to avoid being actioned again in the future.",
color=0x8a3ac5
)
embed.add_field(name="Server:", value=f"[{guild}](https://discord.gg/piracy/)", inline=True)
embed.add_field(name="Moderator:", value=moderator.mention, inline=True)
embed.add_field(name="Reason:", value=reason, inline=False)
embed.set_image(url="https://i.imgur.com/U5Fvr2Y.gif")
await channel.send(embed=embed)
return True
except discord.HTTPException:
return False
@staticmethod
async def create_mute_channel(ctx: SlashContext, member: discord.Member, reason: str, duration: str = None):
if not duration:
duration = "Indefinite"
# Create a channel in the category specified in settings.
category = discord.utils.get(ctx.guild.categories, id=settings.get_value("category_tickets"))
channel = await ctx.guild.create_text_channel(f"mute-{member.id}", category=category)
# Give both the staff and the user perms to access the channel.
await channel.set_permissions(discord.utils.get(ctx.guild.roles, id=settings.get_value("role_trial_mod")), read_messages=True)
await channel.set_permissions(discord.utils.get(ctx.guild.roles, id=settings.get_value("role_staff")), read_messages=True)
await channel.set_permissions(member, read_messages=True)
# Create embed at the start of the channel letting the user know how long they're muted for and why.
embed = embeds.make_embed(title="🤐 You were muted", description="If you have any questions or concerns about your mute, you may voice them here.")
embed.add_field(name="User:", value=member.mention, inline=True)
embed.add_field(name="Moderator:", value=ctx.author.mention, inline=True)
embed.add_field(name="Length:", value=duration, inline=True)
embed.add_field(name="Reason:", value=reason, inline=False)
await channel.send(embed=embed)
# Embed mentions don't count as a ping so this is a workaround to that.
ping = await channel.send(member.mention)
await ping.delete()
return channel
async def archive_mute_channel(self, user_id: int, reason: str = None, ctx: SlashContext = None, guild: int = None):
# Discord caps embed fields at a ridiculously low character limit, avoids problems with future embeds.
if not reason:
reason = "No reason provided."
# Discord caps embed fields at a ridiculously low character limit, avoids problems with future embeds.
elif len(reason) > 512:
await embeds.error_message(ctx=ctx, description="Reason must be less than 512 characters.")
return
guild = guild or ctx.guild
category = discord.utils.get(guild.categories, id=settings.get_value("category_tickets"))
mute_channel = discord.utils.get(category.channels, name=f"mute-{user_id}")
# Open a connection to the database.
db = dataset.connect(database.get_db())
# TODO: Get the mute reason by looking up the latest mute for the user and getting the reason column data.
table = db["mod_logs"]
# Gets the most recent mute for the user, sorted by descending (-) ID.
mute_entry = table.find_one(user_id=user_id, type="mute", order_by="-id")
unmute_entry = table.find_one(user_id=user_id, type="unmute", order_by="-id")
mute_reason = mute_entry["reason"]
muter = await self.bot.fetch_user(mute_entry["mod_id"])
unmuter = await self.bot.fetch_user(unmute_entry["mod_id"])
# Commit the changes to the database and close the connection.
db.commit()
db.close()
# Get the member object of the ticket creator.
member = await self.bot.fetch_user(user_id)
# Initialize the PrivateBin message log string.
message_log = (
f"Muted User: {member} ({member.id})\n\n"
f"Muted By: {muter} ({muter.id})\n"
f"Mute Reason: {mute_reason}\n\n"
f"Unmuted By: {unmuter} ({unmuter.id})\n"
f"Unmute Reason: {reason}\n\n"
)
# Initialize a list of moderator IDs as a set for no duplicates.
mod_list = set()
# Add the original muting moderator to avoid a blank embed field if no one interacts.
mod_list.add(muter)
# Fetch the staff and trial mod role.
role_staff = discord.utils.get(guild.roles, id=settings.get_value("role_staff"))
role_trial_mod = discord.utils.get(guild.roles, id=settings.get_value("role_trial_mod"))
# TODO: Implement so it gets the channel when the moderator is the bot
# Loop through all messages in the ticket from old to new.
async for message in mute_channel.history(oldest_first=True):
# Ignore the bot replies.
if not message.author.bot:
# Time format is unnecessarily lengthy so trimming it down and keep the log go easier on the eyes.
formatted_time = str(message.created_at).split(".")[-2]
# Append the new messages to the current log as we loop.
message_log += f"[{formatted_time}] {message.author}: {message.content}\n"
# Iterates only through members that is still in the server.
if isinstance(member, discord.Member):
# If the messenger has either staff role or trial mod role, add their ID to the mod_list set.
if role_staff in message.author.roles or role_trial_mod in message.author.roles:
mod_list.add(message.author)
# Dump message log to PrivateBin. This returns a dictionary, but only the url is needed for the embed.
url = privatebinapi.send("https://bin.piracy.moe", text=message_log, expiration="never")["full_url"]
# Get the amount of time elapsed since the user was muted.
time_delta = datetime.datetime.utcnow() - mute_channel.created_at
days = time_delta.days
# Hours are the time delta in seconds divided by 3600.
hours, remainder = divmod(time_delta.seconds, 3600)
# Minutes are the hour remainder divided by 60. The minutes remainder are the seconds.
minutes, seconds = divmod(remainder, 60)
# String that will store the duration in a more digestible format.
elapsed_time = ""
duration = dict(
days=days,
hours=hours,
minutes=minutes,
seconds=seconds
)
for time_unit in duration:
# If the time value is 0, skip it.
if duration[time_unit] == 0:
continue
# If the time value is 1, make the time unit into singular form.
if duration[time_unit] == 1:
elapsed_time += f"{duration[time_unit]} {time_unit[:-1]} "
else:
elapsed_time += f"{duration[time_unit]} {time_unit} "
# Create the embed in #mute-log.
embed = embeds.make_embed(
title=f"{mute_channel.name} archived",
thumbnail_url="https://i.imgur.com/A4c19BJ.png",
color="blurple"
)
embed.add_field(name="Muted User:", value=member.mention, inline=True)
embed.add_field(name="Muted By:", value=muter.mention, inline=True)
embed.add_field(name="Unmuted By:", value=unmuter.mention, inline=True)
embed.add_field(name="Mute Reason:", value=mute_reason, inline=False)
embed.add_field(name="Unmute Reason:", value=reason, inline=False)
embed.add_field(name="Duration:", value=elapsed_time, inline=False)
embed.add_field(name="Participating Moderators:", value=" ".join(mod.mention for mod in mod_list), inline=False)
embed.add_field(name="Mute Log: ", value=url, inline=False)
# Send the embed to #mute-log.
mute_log = discord.utils.get(guild.channels, id=settings.get_value("channel_mute_log"))
await mute_log.send(embed=embed)
# Delete the mute channel.
await mute_channel.delete()
@commands.bot_has_permissions(manage_roles=True, send_messages=True)
@commands.before_invoke(record_usage)
@cog_ext.cog_slash(
name="mute",
description="Mutes a member in the server",
guild_ids=[settings.get_value("guild_id")],
options=[
create_option(
name="member",
description="The member that will be muted",
option_type=6,
required=True
),
create_option(
name="reason",
description="The reason why the member is being muted",
option_type=3,
required=False
),
create_option(
name="duration",
description="The length of time the user will be muted for",
option_type=3,
required=False
),
],
default_permission=False,
permissions={
settings.get_value("guild_id"): [
create_permission(settings.get_value("role_staff"), SlashCommandPermissionType.ROLE, True),
create_permission(settings.get_value("role_trial_mod"), SlashCommandPermissionType.ROLE, True)
]
}
)
async def mute(self, ctx: SlashContext, member: discord.Member, duration: str = None, reason: str = None):
""" Mutes member in guild. """
await ctx.defer()
# If we received an int instead of a discord.Member, the user is not in the server.
if not isinstance(member, discord.Member):
await embeds.error_message(ctx=ctx, description=f"That user is not in the server.")
return
# Checks if invoker can action that member (self, bot, etc.)
if not await can_action_member(bot=self.bot, ctx=ctx, member=member):
await embeds.error_message(ctx=ctx, description=f"You cannot action {member.mention}.")
return
# Check if the user is muted already.
if await self.is_user_muted(ctx=ctx, member=member):
await embeds.error_message(ctx=ctx, description=f"{member.mention} is already muted.")
return
# Automatically default the reason string to N/A when the moderator does not provide a reason.
if not reason:
reason = "No reason provided."
# Discord caps embed fields at a ridiculously low character limit, avoids problems with future embeds.
elif len(reason) > 512:
await embeds.error_message(ctx=ctx, description="Reason must be less than 512 characters.")
return
# If the duration is not specified, default it to a permanent mute.
if not duration:
# Start creating the embed that will be used to alert the moderator that the user was successfully muted.
embed = embeds.make_embed(
ctx=ctx,
title=f"Muting member: {member.name}",
description=f"{member.mention} was muted by {ctx.author.mention} for: {reason}",
thumbnail_url="https://i.imgur.com/rHtYWIt.png",
color="soft_red",
)
# Create the mute channel in the Staff category.
channel = await self.create_mute_channel(ctx=ctx, member=member, reason=reason)
# Attempt to DM the user to let them know they were muted.
if not await self.send_muted_dm_embed(ctx=ctx, member=member, channel=channel, reason=reason):
embed.add_field(name="Notice:", value=f"Unable to message {member.mention} about this action. This can be caused by the user not being in the server, having DMs disabled, or having the bot blocked.")
# Mutes the user and returns the embed letting the moderator know they were successfully muted.
await self.mute_member(ctx=ctx, member=member, reason=reason)
await ctx.send(embed=embed)
return
# Get the duration string for embed and mute end time for the specified duration.
duration_string, mute_end_time = utils.duration.get_duration(duration=duration)
# If the duration string is empty due to Regex not matching anything, send and error embed and return.
if not duration_string:
await embeds.error_message(ctx=ctx, description=f"Duration syntax: `#d#h#m#s` (day, hour, min, sec)\nYou can specify up to all four but you only need one.")
return
# Start creating the embed that will be used to alert the moderator that the user was successfully muted.
embed = embeds.make_embed(
ctx=ctx,
title=f"Muting member: {member}",
thumbnail_url="https://i.imgur.com/rHtYWIt.png",
color="soft_red"
)
embed.description = f"{member.mention} was muted by {ctx.author.mention} for: {reason}"
embed.add_field(name="Duration:", value=duration_string, inline=False)
# Create the mute channel in the Staff category.
channel = await self.create_mute_channel(ctx=ctx, member=member, reason=reason, duration=duration_string)
# Attempt to DM the user to let them know they were muted.
if not await self.send_muted_dm_embed(ctx=ctx, member=member, channel=channel, reason=reason, duration=duration_string):
embed.add_field(name="Notice:", value=f"Unable to message {member.mention} about this action. This can be caused by the user not being in the server, having DMs disabled, or having the bot blocked.")
# Mutes the user and stores the unmute time in the database for the background task.
await self.mute_member(ctx=ctx, member=member, reason=reason, temporary=True, end_time=mute_end_time.timestamp())
await ctx.send(embed=embed)
@commands.bot_has_permissions(manage_roles=True, send_messages=True)
@commands.before_invoke(record_usage)
@cog_ext.cog_slash(
name="unmute",
description="Unmutes a member in the server",
guild_ids=[settings.get_value("guild_id")],
options=[
create_option(
name="member",
description="The member that will be unmuted",
option_type=6,
required=True
),
create_option(
name="reason",
description="The reason why the member is being unmuted",
option_type=3,
required=False
),
],
default_permission=False,
permissions={
settings.get_value("guild_id"): [
create_permission(settings.get_value("role_staff"), SlashCommandPermissionType.ROLE, True),
create_permission(settings.get_value("role_trial_mod"), SlashCommandPermissionType.ROLE, True)
]
}
)
async def unmute(self, ctx: SlashContext, member: discord.Member, reason: str = None):
""" Unmutes member in guild. """
await ctx.defer()
# If we received an int instead of a discord.Member, the user is not in the server.
if not isinstance(member, discord.Member):
await embeds.error_message(ctx=ctx, description=f"That user is not in the server.")
return
# Checks if invoker can action that member (self, bot, etc.)
if not await can_action_member(bot=self.bot, ctx=ctx, member=member):
await embeds.error_message(ctx=ctx, description=f"You cannot action {member.mention}.")
return
# Check if the user is not muted already.
if not await self.is_user_muted(ctx=ctx, member=member):
await embeds.error_message(ctx=ctx, description=f"{member.mention} is not muted.")
return
# Automatically default the reason string to N/A when the moderator does not provide a reason.
if not reason:
reason = "No reason provided."
# Discord caps embed fields at a ridiculously low character limit, avoids problems with future embeds.
elif len(reason) > 512:
await embeds.error_message(ctx=ctx, description="Reason must be less than 512 characters.")
return
# Start creating the embed that will be used to alert the moderator that the user was successfully unmuted.
embed = embeds.make_embed(
ctx=ctx,
title=f"Unmuting member: {member.name}",
color="soft_green",
thumbnail_url="https://i.imgur.com/W7DpUHC.png"
)
embed.description = f"{member.mention} was unmuted by {ctx.author.mention} for: {reason}"
# Unmutes the user and and archives the channel. Execution order is important here, otherwise the wrong unmuter will be used in the embed.
await self.unmute_member(ctx=ctx, member=member, reason=reason)
await self.archive_mute_channel(ctx=ctx, user_id=member.id, reason=reason)
# Attempt to DM the user to let them and the mods know they were unmuted.
if not await self.send_unmuted_dm_embed(ctx=ctx, member=member, reason=reason):
embed.add_field(name="Notice:", value=f"Unable to message {member.mention} about this action. This can be caused by the user not being in the server, having DMs disabled, or having the bot blocked.")
# If the mod sent the /unmute in the mute channel, this will cause a errors.NotFound 404.
# We cannot send the embed and then archive the channel because that will cause a error.AlreadyResponded.
try:
await ctx.send(embed=embed)
except discord.HTTPException:
pass
def setup(bot: Bot) -> None:
""" Load the Mute cog. """
bot.add_cog(MuteCog(bot))
log.info("Commands loaded: mutes")
| 47.488565 | 215 | 0.641012 | [
"Unlicense"
] | y0usef-2E/chiya | cogs/commands/moderation/mutes.py | 22,845 | Python |
# -*- coding: utf-8 -*-
### 기본 라이브러리 불러오기
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
'''
[Step 1] 데이터 준비 - read_csv() 함수로 자동차 연비 데이터셋 가져오기
'''
# CSV 파일을 데이터프레임으로 변환
df = pd.read_csv('./auto-mpg.csv', header=None)
# 열 이름 지정
df.columns = ['mpg','cylinders','displacement','horsepower','weight',
'acceleration','model year','origin','name']
# 데이터 살펴보기
print(df.head())
print('\n')
# IPython 디스플레이 설정 - 출력할 열의 개수 한도 늘리기
pd.set_option('display.max_columns', 10)
print(df.head())
print('\n')
'''
[Step 2] 데이터 탐색
'''
# 데이터 자료형 확인
print(df.info())
print('\n')
# 데이터 통계 요약정보 확인
print(df.describe())
print('\n')
# horsepower 열의 자료형 변경 (문자열 ->숫자)
print(df['horsepower'].unique()) # horsepower 열의 고유값 확인
print('\n')
df['horsepower'].replace('?', np.nan, inplace=True) # '?'을 np.nan으로 변경
df.dropna(subset=['horsepower'], axis=0, inplace=True) # 누락데이터 행을 삭제
df['horsepower'] = df['horsepower'].astype('float') # 문자열을 실수형으로 변환
print(df.describe()) # 데이터 통계 요약정보 확인
print('\n')
'''
[Step 3] 속성(feature 또는 variable) 선택
'''
# 분석에 활용할 열(속성)을 선택 (연비, 실린더, 출력, 중량)
ndf = df[['mpg', 'cylinders', 'horsepower', 'weight']]
print(ndf.head())
print('\n')
### 종속 변수 Y인 "연비(mpg)"와 다른 변수 간의 선형관계를 그래프(산점도)로 확인
# Matplotlib으로 산점도 그리기
ndf.plot(kind='scatter', x='weight', y='mpg', c='coral', s=10, figsize=(10, 5))
plt.show()
plt.close()
# seaborn으로 산점도 그리기
fig = plt.figure(figsize=(10, 5))
ax1 = fig.add_subplot(1, 2, 1)
ax2 = fig.add_subplot(1, 2, 2)
sns.regplot(x='weight', y='mpg', data=ndf, ax=ax1) # 회귀선 표시
sns.regplot(x='weight', y='mpg', data=ndf, ax=ax2, fit_reg=False) #회귀선 미표시
plt.show()
plt.close()
# seaborn 조인트 그래프 - 산점도, 히스토그램
sns.jointplot(x='weight', y='mpg', data=ndf) # 회귀선 없음
sns.jointplot(x='weight', y='mpg', kind='reg', data=ndf) # 회귀선 표시
plt.show()
plt.close()
# seaborn pariplot으로 두 변수 간의 모든 경우의 수 그리기
sns.pairplot(ndf)
plt.show()
plt.close()
'''
Step 4: 데이터셋 구분 - 훈련용(train data)/ 검증용(test data)
'''
# 속성(변수) 선택
X=ndf[['weight']] #독립 변수 X
y=ndf['mpg'] #종속 변수 Y
# train data 와 test data로 구분(7:3 비율)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, #독립 변수
y, #종속 변수
test_size=0.3, #검증 30%
random_state=10) #랜덤 추출 값
print('train data 개수: ', len(X_train))
print('test data 개수: ', len(X_test))
'''
Step 5: 단순회귀분석 모형 - sklearn 사용
'''
# sklearn 라이브러리에서 선형회귀분석 모듈 가져오기
from sklearn.linear_model import LinearRegression
# 단순회귀분석 모형 객체 생성
lr = LinearRegression()
# train data를 가지고 모형 학습
lr.fit(X_train, y_train)
# 학습을 마친 모형에 test data를 적용하여 결정계수(R-제곱) 계산
r_square = lr.score(X_test, y_test)
print(r_square)
print('\n')
# 회귀식의 기울기
print('기울기 a: ', lr.coef_)
print('\n')
# 회귀식의 y절편
print('y절편 b', lr.intercept_)
print('\n')
# 모형에 전체 X 데이터를 입력하여 예측한 값 y_hat을 실제 값 y와 비교
y_hat = lr.predict(X)
plt.figure(figsize=(10, 5))
ax1 = sns.distplot(y, hist=False, label="y")
ax2 = sns.distplot(y_hat, hist=False, label="y_hat", ax=ax1)
plt.show()
plt.close() | 23.361702 | 80 | 0.595628 | [
"MIT"
] | Adrian123K/pandas_ml | 7.1_simple_linear_regression.py | 4,174 | Python |
from datetime import datetime, timedelta as td
import json
import os
import re
from secrets import token_urlsafe
from urllib.parse import urlencode
from cron_descriptor import ExpressionDescriptor
from croniter import croniter
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core import signing
from django.core.exceptions import PermissionDenied
from django.db.models import Count
from django.http import (
Http404,
HttpResponse,
HttpResponseBadRequest,
HttpResponseForbidden,
JsonResponse,
)
from django.shortcuts import get_object_or_404, redirect, render
from django.template.loader import get_template, render_to_string
from django.urls import reverse
from django.utils import timezone
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from hc.accounts.models import Project, Member
from hc.api.models import (
DEFAULT_GRACE,
DEFAULT_TIMEOUT,
MAX_DELTA,
Channel,
Check,
Ping,
Notification,
)
from hc.api.transports import Telegram
from hc.front.decorators import require_setting
from hc.front import forms
from hc.front.schemas import telegram_callback
from hc.front.templatetags.hc_extras import (
num_down_title,
down_title,
sortchecks,
site_hostname,
site_scheme,
)
from hc.lib import jsonschema
from hc.lib.badges import get_badge_url
import pytz
from pytz.exceptions import UnknownTimeZoneError
import requests
VALID_SORT_VALUES = ("name", "-name", "last_ping", "-last_ping", "created")
STATUS_TEXT_TMPL = get_template("front/log_status_text.html")
LAST_PING_TMPL = get_template("front/last_ping_cell.html")
EVENTS_TMPL = get_template("front/details_events.html")
DOWNTIMES_TMPL = get_template("front/details_downtimes.html")
def _tags_statuses(checks):
tags, down, grace, num_down = {}, {}, {}, 0
for check in checks:
status = check.get_status()
if status == "down":
num_down += 1
for tag in check.tags_list():
down[tag] = "down"
elif status == "grace":
for tag in check.tags_list():
grace[tag] = "grace"
else:
for tag in check.tags_list():
tags[tag] = "up"
tags.update(grace)
tags.update(down)
return tags, num_down
def _get_check_for_user(request, code):
""" Return specified check if current user has access to it. """
assert request.user.is_authenticated
check = get_object_or_404(Check.objects.select_related("project"), code=code)
if request.user.is_superuser:
return check, True
if request.user.id == check.project.owner_id:
return check, True
membership = get_object_or_404(Member, project=check.project, user=request.user)
return check, membership.rw
def _get_rw_check_for_user(request, code):
check, rw = _get_check_for_user(request, code)
if not rw:
raise PermissionDenied
return check
def _get_channel_for_user(request, code):
""" Return specified channel if current user has access to it. """
assert request.user.is_authenticated
channel = get_object_or_404(Channel.objects.select_related("project"), code=code)
if request.user.is_superuser:
return channel, True
if request.user.id == channel.project.owner_id:
return channel, True
membership = get_object_or_404(Member, project=channel.project, user=request.user)
return channel, membership.rw
def _get_rw_channel_for_user(request, code):
channel, rw = _get_channel_for_user(request, code)
if not rw:
raise PermissionDenied
return channel
def _get_project_for_user(request, project_code):
""" Check access, return (project, rw) tuple. """
project = get_object_or_404(Project, code=project_code)
if request.user.is_superuser:
return project, True
if request.user.id == project.owner_id:
return project, True
membership = get_object_or_404(Member, project=project, user=request.user)
return project, membership.rw
def _get_rw_project_for_user(request, project_code):
""" Check access, return (project, rw) tuple. """
project, rw = _get_project_for_user(request, project_code)
if not rw:
raise PermissionDenied
return project
def _refresh_last_active_date(profile):
""" Update last_active_date if it is more than a day old. """
now = timezone.now()
if profile.last_active_date is None or (now - profile.last_active_date).days > 0:
profile.last_active_date = now
profile.save()
@login_required
def my_checks(request, code):
_refresh_last_active_date(request.profile)
project, rw = _get_project_for_user(request, code)
if request.GET.get("sort") in VALID_SORT_VALUES:
request.profile.sort = request.GET["sort"]
request.profile.save()
if request.session.get("last_project_id") != project.id:
request.session["last_project_id"] = project.id
q = Check.objects.filter(project=project)
checks = list(q.prefetch_related("channel_set"))
sortchecks(checks, request.profile.sort)
tags_statuses, num_down = _tags_statuses(checks)
pairs = list(tags_statuses.items())
pairs.sort(key=lambda pair: pair[0].lower())
channels = Channel.objects.filter(project=project)
channels = list(channels.order_by("created"))
hidden_checks = set()
# Hide checks that don't match selected tags:
selected_tags = set(request.GET.getlist("tag", []))
if selected_tags:
for check in checks:
if not selected_tags.issubset(check.tags_list()):
hidden_checks.add(check)
# Hide checks that don't match the search string:
search = request.GET.get("search", "")
if search:
for check in checks:
search_key = "%s\n%s" % (check.name.lower(), check.code)
if search not in search_key:
hidden_checks.add(check)
# Do we need to show the "Last Duration" header?
show_last_duration = False
for check in checks:
if check.clamped_last_duration():
show_last_duration = True
break
ctx = {
"page": "checks",
"rw": rw,
"checks": checks,
"channels": channels,
"num_down": num_down,
"tags": pairs,
"ping_endpoint": settings.PING_ENDPOINT,
"timezones": pytz.all_timezones,
"project": project,
"num_available": project.num_checks_available(),
"sort": request.profile.sort,
"selected_tags": selected_tags,
"search": search,
"hidden_checks": hidden_checks,
"show_last_duration": show_last_duration,
}
return render(request, "front/my_checks.html", ctx)
@login_required
def status(request, code):
_get_project_for_user(request, code)
checks = list(Check.objects.filter(project__code=code))
details = []
for check in checks:
ctx = {"check": check}
details.append(
{
"code": str(check.code),
"status": check.get_status(),
"last_ping": LAST_PING_TMPL.render(ctx),
"started": check.last_start is not None,
}
)
tags_statuses, num_down = _tags_statuses(checks)
return JsonResponse(
{"details": details, "tags": tags_statuses, "title": num_down_title(num_down)}
)
@login_required
@require_POST
def switch_channel(request, code, channel_code):
check = _get_rw_check_for_user(request, code)
channel = get_object_or_404(Channel, code=channel_code)
if channel.project_id != check.project_id:
return HttpResponseBadRequest()
if request.POST.get("state") == "on":
channel.checks.add(check)
else:
channel.checks.remove(check)
return HttpResponse()
def index(request):
if request.user.is_authenticated:
projects = list(request.profile.projects())
ctx = {
"page": "projects",
"projects": projects,
"last_project_id": request.session.get("last_project_id"),
}
return render(request, "front/projects.html", ctx)
check = Check()
ctx = {
"page": "welcome",
"check": check,
"ping_url": check.url(),
"enable_apprise": settings.APPRISE_ENABLED is True,
"enable_call": settings.TWILIO_AUTH is not None,
"enable_discord": settings.DISCORD_CLIENT_ID is not None,
"enable_linenotify": settings.LINENOTIFY_CLIENT_ID is not None,
"enable_matrix": settings.MATRIX_ACCESS_TOKEN is not None,
"enable_pdc": settings.PD_VENDOR_KEY is not None,
"enable_pushbullet": settings.PUSHBULLET_CLIENT_ID is not None,
"enable_pushover": settings.PUSHOVER_API_TOKEN is not None,
"enable_shell": settings.SHELL_ENABLED is True,
"enable_slack_btn": settings.SLACK_CLIENT_ID is not None,
"enable_sms": settings.TWILIO_AUTH is not None,
"enable_telegram": settings.TELEGRAM_TOKEN is not None,
"enable_trello": settings.TRELLO_APP_KEY is not None,
"enable_whatsapp": settings.TWILIO_USE_WHATSAPP,
"registration_open": settings.REGISTRATION_OPEN,
}
return render(request, "front/welcome.html", ctx)
def dashboard(request):
return render(request, "front/dashboard.html", {})
def serve_doc(request, doc="introduction"):
# Filenames in /templates/docs/ consist of lowercase letters and underscores,
# -- make sure we don't accept anything else
if not re.match(r"^[a-z_]+$", doc):
raise Http404("not found")
path = os.path.join(settings.BASE_DIR, "templates/docs", doc + ".html")
if not os.path.exists(path):
raise Http404("not found")
replaces = {
"{{ default_timeout }}": str(int(DEFAULT_TIMEOUT.total_seconds())),
"{{ default_grace }}": str(int(DEFAULT_GRACE.total_seconds())),
"SITE_NAME": settings.SITE_NAME,
"SITE_ROOT": settings.SITE_ROOT,
"SITE_HOSTNAME": site_hostname(),
"SITE_SCHEME": site_scheme(),
"PING_ENDPOINT": settings.PING_ENDPOINT,
"PING_URL": settings.PING_ENDPOINT + "your-uuid-here",
"IMG_URL": os.path.join(settings.STATIC_URL, "img/docs"),
}
content = open(path, "r", encoding="utf-8").read()
for placeholder, value in replaces.items():
content = content.replace(placeholder, value)
ctx = {
"page": "docs",
"section": doc,
"content": content,
"first_line": content.split("\n")[0],
}
return render(request, "front/docs_single.html", ctx)
def docs_cron(request):
return render(request, "front/docs_cron.html", {})
@require_POST
@login_required
def add_check(request, code):
project = _get_rw_project_for_user(request, code)
if project.num_checks_available() <= 0:
return HttpResponseBadRequest()
check = Check(project=project)
check.save()
check.assign_all_channels()
url = reverse("hc-details", args=[check.code])
return redirect(url + "?new")
@require_POST
@login_required
def update_name(request, code):
check = _get_rw_check_for_user(request, code)
form = forms.NameTagsForm(request.POST)
if form.is_valid():
check.name = form.cleaned_data["name"]
check.tags = form.cleaned_data["tags"]
check.desc = form.cleaned_data["desc"]
check.save()
if "/details/" in request.META.get("HTTP_REFERER", ""):
return redirect("hc-details", code)
return redirect("hc-checks", check.project.code)
@require_POST
@login_required
def filtering_rules(request, code):
check = _get_rw_check_for_user(request, code)
form = forms.FilteringRulesForm(request.POST)
if form.is_valid():
check.subject = form.cleaned_data["subject"]
check.subject_fail = form.cleaned_data["subject_fail"]
check.methods = form.cleaned_data["methods"]
check.manual_resume = form.cleaned_data["manual_resume"]
check.save()
return redirect("hc-details", code)
@require_POST
@login_required
def update_timeout(request, code):
check = _get_rw_check_for_user(request, code)
kind = request.POST.get("kind")
if kind == "simple":
form = forms.TimeoutForm(request.POST)
if not form.is_valid():
return HttpResponseBadRequest()
check.kind = "simple"
check.timeout = form.cleaned_data["timeout"]
check.grace = form.cleaned_data["grace"]
elif kind == "cron":
form = forms.CronForm(request.POST)
if not form.is_valid():
return HttpResponseBadRequest()
check.kind = "cron"
check.schedule = form.cleaned_data["schedule"]
check.tz = form.cleaned_data["tz"]
check.grace = td(minutes=form.cleaned_data["grace"])
check.alert_after = check.going_down_after()
if check.status == "up" and check.alert_after < timezone.now():
# Checks can flip from "up" to "down" state as a result of changing check's
# schedule. We don't want to send notifications when changing schedule
# interactively in the web UI. So we update the `alert_after` and `status`
# fields here the same way as `sendalerts` would do, but without sending
# an actual alert:
check.alert_after = None
check.status = "down"
check.save()
if "/details/" in request.META.get("HTTP_REFERER", ""):
return redirect("hc-details", code)
return redirect("hc-checks", check.project.code)
@require_POST
def cron_preview(request):
schedule = request.POST.get("schedule", "")
tz = request.POST.get("tz")
ctx = {"tz": tz, "dates": []}
try:
zone = pytz.timezone(tz)
now_local = timezone.localtime(timezone.now(), zone)
if len(schedule.split()) != 5:
raise ValueError()
it = croniter(schedule, now_local)
for i in range(0, 6):
ctx["dates"].append(it.get_next(datetime))
ctx["desc"] = str(ExpressionDescriptor(schedule, use_24hour_time_format=True))
except UnknownTimeZoneError:
ctx["bad_tz"] = True
except:
ctx["bad_schedule"] = True
return render(request, "front/cron_preview.html", ctx)
@login_required
def ping_details(request, code, n=None):
check, rw = _get_check_for_user(request, code)
q = Ping.objects.filter(owner=check)
if n:
q = q.filter(n=n)
try:
ping = q.latest("created")
except Ping.DoesNotExist:
return render(request, "front/ping_details_not_found.html")
ctx = {"check": check, "ping": ping}
return render(request, "front/ping_details.html", ctx)
@require_POST
@login_required
def pause(request, code):
check = _get_rw_check_for_user(request, code)
check.status = "paused"
check.last_start = None
check.alert_after = None
check.save()
# Don't redirect after an AJAX request:
if request.META.get("HTTP_X_REQUESTED_WITH") == "XMLHttpRequest":
return HttpResponse()
return redirect("hc-details", code)
@require_POST
@login_required
def resume(request, code):
check = _get_rw_check_for_user(request, code)
check.status = "new"
check.last_start = None
check.last_ping = None
check.alert_after = None
check.save()
return redirect("hc-details", code)
@require_POST
@login_required
def remove_check(request, code):
check = _get_rw_check_for_user(request, code)
project = check.project
check.delete()
return redirect("hc-checks", project.code)
def _get_events(check, limit):
pings = Ping.objects.filter(owner=check).order_by("-id")[:limit]
pings = list(pings)
prev = None
for ping in reversed(pings):
if ping.kind != "start" and prev and prev.kind == "start":
delta = ping.created - prev.created
if delta < MAX_DELTA:
setattr(ping, "delta", delta)
prev = ping
alerts = []
if len(pings):
cutoff = pings[-1].created
alerts = Notification.objects.select_related("channel").filter(
owner=check, check_status="down", created__gt=cutoff
)
events = pings + list(alerts)
events.sort(key=lambda el: el.created, reverse=True)
return events
@login_required
def log(request, code):
check, rw = _get_check_for_user(request, code)
limit = check.project.owner_profile.ping_log_limit
ctx = {
"project": check.project,
"check": check,
"events": _get_events(check, limit),
"limit": limit,
"show_limit_notice": check.n_pings > limit and settings.USE_PAYMENTS,
}
return render(request, "front/log.html", ctx)
@login_required
def details(request, code):
_refresh_last_active_date(request.profile)
check, rw = _get_check_for_user(request, code)
channels = Channel.objects.filter(project=check.project)
channels = list(channels.order_by("created"))
all_tags = set()
q = Check.objects.filter(project=check.project).exclude(tags="")
for tags in q.values_list("tags", flat=True):
all_tags.update(tags.split(" "))
ctx = {
"page": "details",
"project": check.project,
"check": check,
"rw": rw,
"channels": channels,
"enabled_channels": list(check.channel_set.all()),
"timezones": pytz.all_timezones,
"downtimes": check.downtimes(months=3),
"is_new": "new" in request.GET,
"is_copied": "copied" in request.GET,
"all_tags": " ".join(sorted(all_tags)),
}
return render(request, "front/details.html", ctx)
@login_required
def transfer(request, code):
check = _get_rw_check_for_user(request, code)
if request.method == "POST":
target_project = _get_rw_project_for_user(request, request.POST["project"])
if target_project.num_checks_available() <= 0:
return HttpResponseBadRequest()
check.project = target_project
check.save()
check.assign_all_channels()
messages.success(request, "Check transferred successfully!")
return redirect("hc-details", code)
ctx = {"check": check}
return render(request, "front/transfer_modal.html", ctx)
@require_POST
@login_required
def copy(request, code):
check = _get_rw_check_for_user(request, code)
if check.project.num_checks_available() <= 0:
return HttpResponseBadRequest()
new_name = check.name + " (copy)"
# Make sure we don't exceed the 100 character db field limit:
if len(new_name) > 100:
new_name = check.name[:90] + "... (copy)"
copied = Check(project=check.project)
copied.name = new_name
copied.desc, copied.tags = check.desc, check.tags
copied.subject, copied.subject_fail = check.subject, check.subject_fail
copied.methods = check.methods
copied.manual_resume = check.manual_resume
copied.kind = check.kind
copied.timeout, copied.grace = check.timeout, check.grace
copied.schedule, copied.tz = check.schedule, check.tz
copied.save()
copied.channel_set.add(*check.channel_set.all())
url = reverse("hc-details", args=[copied.code])
return redirect(url + "?copied")
@login_required
def status_single(request, code):
check, rw = _get_check_for_user(request, code)
status = check.get_status()
events = _get_events(check, 20)
updated = "1"
if len(events):
updated = str(events[0].created.timestamp())
doc = {
"status": status,
"status_text": STATUS_TEXT_TMPL.render({"check": check, "rw": rw}),
"title": down_title(check),
"updated": updated,
}
if updated != request.GET.get("u"):
doc["events"] = EVENTS_TMPL.render({"check": check, "events": events})
doc["downtimes"] = DOWNTIMES_TMPL.render({"downtimes": check.downtimes(3)})
return JsonResponse(doc)
@login_required
def badges(request, code):
project, rw = _get_project_for_user(request, code)
tags = set()
for check in Check.objects.filter(project=project):
tags.update(check.tags_list())
sorted_tags = sorted(tags, key=lambda s: s.lower())
sorted_tags.append("*") # For the "overall status" badge
key = project.badge_key
urls = []
for tag in sorted_tags:
urls.append(
{
"tag": tag,
"svg": get_badge_url(key, tag),
"svg3": get_badge_url(key, tag, with_late=True),
"json": get_badge_url(key, tag, fmt="json"),
"json3": get_badge_url(key, tag, fmt="json", with_late=True),
"shields": get_badge_url(key, tag, fmt="shields"),
"shields3": get_badge_url(key, tag, fmt="shields", with_late=True),
}
)
ctx = {
"have_tags": len(urls) > 1,
"page": "badges",
"project": project,
"badges": urls,
}
return render(request, "front/badges.html", ctx)
@login_required
def channels(request, code):
project, rw = _get_project_for_user(request, code)
if request.method == "POST":
if not rw:
return HttpResponseForbidden()
code = request.POST["channel"]
try:
channel = Channel.objects.get(code=code)
except Channel.DoesNotExist:
return HttpResponseBadRequest()
if channel.project_id != project.id:
return HttpResponseForbidden()
new_checks = []
for key in request.POST:
if key.startswith("check-"):
code = key[6:]
try:
check = Check.objects.get(code=code)
except Check.DoesNotExist:
return HttpResponseBadRequest()
if check.project_id != project.id:
return HttpResponseForbidden()
new_checks.append(check)
channel.checks.set(new_checks)
return redirect("hc-channels", project.code)
channels = Channel.objects.filter(project=project)
channels = channels.order_by("created")
channels = channels.annotate(n_checks=Count("checks"))
ctx = {
"page": "channels",
"rw": rw,
"project": project,
"profile": project.owner_profile,
"channels": channels,
"enable_apprise": settings.APPRISE_ENABLED is True,
"enable_call": settings.TWILIO_AUTH is not None,
"enable_discord": settings.DISCORD_CLIENT_ID is not None,
"enable_linenotify": settings.LINENOTIFY_CLIENT_ID is not None,
"enable_matrix": settings.MATRIX_ACCESS_TOKEN is not None,
"enable_pdc": settings.PD_VENDOR_KEY is not None,
"enable_pushbullet": settings.PUSHBULLET_CLIENT_ID is not None,
"enable_pushover": settings.PUSHOVER_API_TOKEN is not None,
"enable_shell": settings.SHELL_ENABLED is True,
"enable_slack_btn": settings.SLACK_CLIENT_ID is not None,
"enable_sms": settings.TWILIO_AUTH is not None,
"enable_telegram": settings.TELEGRAM_TOKEN is not None,
"enable_trello": settings.TRELLO_APP_KEY is not None,
"enable_whatsapp": settings.TWILIO_USE_WHATSAPP,
"use_payments": settings.USE_PAYMENTS,
}
return render(request, "front/channels.html", ctx)
@login_required
def channel_checks(request, code):
channel = _get_rw_channel_for_user(request, code)
assigned = set(channel.checks.values_list("code", flat=True).distinct())
checks = Check.objects.filter(project=channel.project).order_by("created")
ctx = {"checks": checks, "assigned": assigned, "channel": channel}
return render(request, "front/channel_checks.html", ctx)
@require_POST
@login_required
def update_channel_name(request, code):
channel = _get_rw_channel_for_user(request, code)
form = forms.ChannelNameForm(request.POST)
if form.is_valid():
channel.name = form.cleaned_data["name"]
channel.save()
return redirect("hc-channels", channel.project.code)
def verify_email(request, code, token):
channel = get_object_or_404(Channel, code=code)
if channel.make_token() == token:
channel.email_verified = True
channel.save()
return render(request, "front/verify_email_success.html")
return render(request, "bad_link.html")
@csrf_exempt
def unsubscribe_email(request, code, signed_token):
# Some email servers open links in emails to check for malicious content.
# To work around this, on GET requests we serve a confirmation form.
# If the signature is at least 5 minutes old, we also include JS code to
# auto-submit the form.
ctx = {}
if ":" in signed_token:
signer = signing.TimestampSigner(salt="alerts")
# First, check the signature without looking at the timestamp:
try:
token = signer.unsign(signed_token)
except signing.BadSignature:
return render(request, "bad_link.html")
# Check if timestamp is older than 5 minutes:
try:
signer.unsign(signed_token, max_age=300)
except signing.SignatureExpired:
ctx["autosubmit"] = True
else:
token = signed_token
channel = get_object_or_404(Channel, code=code, kind="email")
if channel.make_token() != token:
return render(request, "bad_link.html")
if request.method != "POST":
return render(request, "accounts/unsubscribe_submit.html", ctx)
channel.delete()
return render(request, "front/unsubscribe_success.html")
@require_POST
@login_required
def send_test_notification(request, code):
channel, rw = _get_channel_for_user(request, code)
dummy = Check(name="TEST", status="down")
dummy.last_ping = timezone.now() - td(days=1)
dummy.n_pings = 42
if channel.kind == "webhook" and not channel.url_down:
if channel.url_up:
# If we don't have url_down, but do have have url_up then
# send "TEST is UP" notification instead:
dummy.status = "up"
# Delete all older test notifications for this channel
Notification.objects.filter(channel=channel, owner=None).delete()
# Send the test notification
error = channel.notify(dummy, is_test=True)
if error:
messages.warning(request, "Could not send a test notification. %s" % error)
else:
messages.success(request, "Test notification sent!")
return redirect("hc-channels", channel.project.code)
@require_POST
@login_required
def remove_channel(request, code):
channel = _get_rw_channel_for_user(request, code)
project = channel.project
channel.delete()
return redirect("hc-channels", project.code)
@login_required
def add_email(request, code):
project = _get_rw_project_for_user(request, code)
if request.method == "POST":
form = forms.AddEmailForm(request.POST)
if form.is_valid():
channel = Channel(project=project, kind="email")
channel.value = json.dumps(
{
"value": form.cleaned_data["value"],
"up": form.cleaned_data["up"],
"down": form.cleaned_data["down"],
}
)
channel.save()
channel.assign_all_checks()
is_own_email = form.cleaned_data["value"] == request.user.email
if is_own_email or not settings.EMAIL_USE_VERIFICATION:
# If user is subscribing *their own* address
# we can skip the verification step.
# Additionally, in self-hosted setting, administator has the
# option to disable the email verification step altogether.
channel.email_verified = True
channel.save()
else:
channel.send_verify_link()
return redirect("hc-channels", project.code)
else:
form = forms.AddEmailForm()
ctx = {
"page": "channels",
"project": project,
"use_verification": settings.EMAIL_USE_VERIFICATION,
"form": form,
}
return render(request, "integrations/add_email.html", ctx)
@login_required
def add_webhook(request, code):
project = _get_rw_project_for_user(request, code)
if request.method == "POST":
form = forms.WebhookForm(request.POST)
if form.is_valid():
channel = Channel(project=project, kind="webhook")
channel.name = form.cleaned_data["name"]
channel.value = form.get_value()
channel.save()
channel.assign_all_checks()
return redirect("hc-channels", project.code)
else:
form = forms.WebhookForm()
ctx = {
"page": "channels",
"project": project,
"form": form,
}
return render(request, "integrations/webhook_form.html", ctx)
@login_required
def edit_webhook(request, code):
channel = _get_rw_channel_for_user(request, code)
if channel.kind != "webhook":
return HttpResponseBadRequest()
if request.method == "POST":
form = forms.WebhookForm(request.POST)
if form.is_valid():
channel.name = form.cleaned_data["name"]
channel.value = form.get_value()
channel.save()
return redirect("hc-channels", channel.project.code)
else:
def flatten(d):
return "\n".join("%s: %s" % pair for pair in d.items())
doc = json.loads(channel.value)
doc["headers_down"] = flatten(doc["headers_down"])
doc["headers_up"] = flatten(doc["headers_up"])
doc["name"] = channel.name
form = forms.WebhookForm(doc)
ctx = {
"page": "channels",
"project": channel.project,
"channel": channel,
"form": form,
}
return render(request, "integrations/webhook_form.html", ctx)
@require_setting("SHELL_ENABLED")
@login_required
def add_shell(request, code):
project = _get_rw_project_for_user(request, code)
if request.method == "POST":
form = forms.AddShellForm(request.POST)
if form.is_valid():
channel = Channel(project=project, kind="shell")
channel.value = form.get_value()
channel.save()
channel.assign_all_checks()
return redirect("hc-channels", project.code)
else:
form = forms.AddShellForm()
ctx = {
"page": "channels",
"project": project,
"form": form,
}
return render(request, "integrations/add_shell.html", ctx)
@login_required
def add_pd(request, code):
project = _get_rw_project_for_user(request, code)
if request.method == "POST":
form = forms.AddPdForm(request.POST)
if form.is_valid():
channel = Channel(project=project, kind="pd")
channel.value = form.cleaned_data["value"]
channel.save()
channel.assign_all_checks()
return redirect("hc-channels", project.code)
else:
form = forms.AddPdForm()
ctx = {"page": "channels", "form": form}
return render(request, "integrations/add_pd.html", ctx)
@require_setting("PD_VENDOR_KEY")
def pdc_help(request):
ctx = {"page": "channels"}
return render(request, "integrations/add_pdc.html", ctx)
@require_setting("PD_VENDOR_KEY")
@login_required
def add_pdc(request, code):
project = _get_rw_project_for_user(request, code)
state = token_urlsafe()
callback = settings.SITE_ROOT + reverse(
"hc-add-pdc-complete", args=[project.code, state]
)
connect_url = "https://connect.pagerduty.com/connect?" + urlencode(
{"vendor": settings.PD_VENDOR_KEY, "callback": callback}
)
ctx = {"page": "channels", "project": project, "connect_url": connect_url}
request.session["pd"] = state
return render(request, "integrations/add_pdc.html", ctx)
@require_setting("PD_VENDOR_KEY")
@login_required
def add_pdc_complete(request, code, state):
if "pd" not in request.session:
return HttpResponseBadRequest()
project = _get_rw_project_for_user(request, code)
session_state = request.session.pop("pd")
if session_state != state:
return HttpResponseBadRequest()
if request.GET.get("error") == "cancelled":
messages.warning(request, "PagerDuty setup was cancelled.")
return redirect("hc-channels", project.code)
channel = Channel(kind="pd", project=project)
channel.value = json.dumps(
{
"service_key": request.GET.get("service_key"),
"account": request.GET.get("account"),
}
)
channel.save()
channel.assign_all_checks()
messages.success(request, "The PagerDuty integration has been added!")
return redirect("hc-channels", project.code)
@login_required
def add_pagertree(request, code):
project = _get_rw_project_for_user(request, code)
if request.method == "POST":
form = forms.AddUrlForm(request.POST)
if form.is_valid():
channel = Channel(project=project, kind="pagertree")
channel.value = form.cleaned_data["value"]
channel.save()
channel.assign_all_checks()
return redirect("hc-channels", project.code)
else:
form = forms.AddUrlForm()
ctx = {"page": "channels", "project": project, "form": form}
return render(request, "integrations/add_pagertree.html", ctx)
@login_required
def add_slack(request, code):
project = _get_rw_project_for_user(request, code)
if request.method == "POST":
form = forms.AddUrlForm(request.POST)
if form.is_valid():
channel = Channel(project=project, kind="slack")
channel.value = form.cleaned_data["value"]
channel.save()
channel.assign_all_checks()
return redirect("hc-channels", project.code)
else:
form = forms.AddUrlForm()
ctx = {
"page": "channels",
"form": form,
}
return render(request, "integrations/add_slack.html", ctx)
@require_setting("SLACK_CLIENT_ID")
def slack_help(request):
ctx = {"page": "channels"}
return render(request, "integrations/add_slack_btn.html", ctx)
@require_setting("SLACK_CLIENT_ID")
@login_required
def add_slack_btn(request, code):
project = _get_rw_project_for_user(request, code)
state = token_urlsafe()
authorize_url = "https://slack.com/oauth/v2/authorize?" + urlencode(
{
"scope": "incoming-webhook",
"client_id": settings.SLACK_CLIENT_ID,
"state": state,
}
)
ctx = {
"project": project,
"page": "channels",
"authorize_url": authorize_url,
}
request.session["add_slack"] = (state, str(project.code))
return render(request, "integrations/add_slack_btn.html", ctx)
@require_setting("SLACK_CLIENT_ID")
@login_required
def add_slack_complete(request):
if "add_slack" not in request.session:
return HttpResponseForbidden()
state, code = request.session.pop("add_slack")
project = _get_rw_project_for_user(request, code)
if request.GET.get("error") == "access_denied":
messages.warning(request, "Slack setup was cancelled.")
return redirect("hc-channels", project.code)
if request.GET.get("state") != state:
return HttpResponseForbidden()
result = requests.post(
"https://slack.com/api/oauth.v2.access",
{
"client_id": settings.SLACK_CLIENT_ID,
"client_secret": settings.SLACK_CLIENT_SECRET,
"code": request.GET.get("code"),
},
)
doc = result.json()
if doc.get("ok"):
channel = Channel(kind="slack", project=project)
channel.value = result.text
channel.save()
channel.assign_all_checks()
messages.success(request, "The Slack integration has been added!")
else:
s = doc.get("error")
messages.warning(request, "Error message from slack: %s" % s)
return redirect("hc-channels", project.code)
@login_required
def add_mattermost(request, code):
project = _get_rw_project_for_user(request, code)
if request.method == "POST":
form = forms.AddUrlForm(request.POST)
if form.is_valid():
channel = Channel(project=project, kind="mattermost")
channel.value = form.cleaned_data["value"]
channel.save()
channel.assign_all_checks()
return redirect("hc-channels", project.code)
else:
form = forms.AddUrlForm()
ctx = {"page": "channels", "form": form, "project": project}
return render(request, "integrations/add_mattermost.html", ctx)
@require_setting("PUSHBULLET_CLIENT_ID")
@login_required
def add_pushbullet(request, code):
project = _get_rw_project_for_user(request, code)
state = token_urlsafe()
authorize_url = "https://www.pushbullet.com/authorize?" + urlencode(
{
"client_id": settings.PUSHBULLET_CLIENT_ID,
"redirect_uri": settings.SITE_ROOT + reverse(add_pushbullet_complete),
"response_type": "code",
"state": state,
}
)
ctx = {
"page": "channels",
"project": project,
"authorize_url": authorize_url,
}
request.session["add_pushbullet"] = (state, str(project.code))
return render(request, "integrations/add_pushbullet.html", ctx)
@require_setting("PUSHBULLET_CLIENT_ID")
@login_required
def add_pushbullet_complete(request):
if "add_pushbullet" not in request.session:
return HttpResponseForbidden()
state, code = request.session.pop("add_pushbullet")
project = _get_rw_project_for_user(request, code)
if request.GET.get("error") == "access_denied":
messages.warning(request, "Pushbullet setup was cancelled.")
return redirect("hc-channels", project.code)
if request.GET.get("state") != state:
return HttpResponseForbidden()
result = requests.post(
"https://api.pushbullet.com/oauth2/token",
{
"client_id": settings.PUSHBULLET_CLIENT_ID,
"client_secret": settings.PUSHBULLET_CLIENT_SECRET,
"code": request.GET.get("code"),
"grant_type": "authorization_code",
},
)
doc = result.json()
if "access_token" in doc:
channel = Channel(kind="pushbullet", project=project)
channel.value = doc["access_token"]
channel.save()
channel.assign_all_checks()
messages.success(request, "The Pushbullet integration has been added!")
else:
messages.warning(request, "Something went wrong")
return redirect("hc-channels", project.code)
@require_setting("DISCORD_CLIENT_ID")
@login_required
def add_discord(request, code):
project = _get_rw_project_for_user(request, code)
state = token_urlsafe()
auth_url = "https://discordapp.com/api/oauth2/authorize?" + urlencode(
{
"client_id": settings.DISCORD_CLIENT_ID,
"scope": "webhook.incoming",
"redirect_uri": settings.SITE_ROOT + reverse(add_discord_complete),
"response_type": "code",
"state": state,
}
)
ctx = {"page": "channels", "project": project, "authorize_url": auth_url}
request.session["add_discord"] = (state, str(project.code))
return render(request, "integrations/add_discord.html", ctx)
@require_setting("DISCORD_CLIENT_ID")
@login_required
def add_discord_complete(request):
if "add_discord" not in request.session:
return HttpResponseForbidden()
state, code = request.session.pop("add_discord")
project = _get_rw_project_for_user(request, code)
if request.GET.get("error") == "access_denied":
messages.warning(request, "Discord setup was cancelled.")
return redirect("hc-channels", project.code)
if request.GET.get("state") != state:
return HttpResponseForbidden()
result = requests.post(
"https://discordapp.com/api/oauth2/token",
{
"client_id": settings.DISCORD_CLIENT_ID,
"client_secret": settings.DISCORD_CLIENT_SECRET,
"code": request.GET.get("code"),
"grant_type": "authorization_code",
"redirect_uri": settings.SITE_ROOT + reverse(add_discord_complete),
},
)
doc = result.json()
if "access_token" in doc:
channel = Channel(kind="discord", project=project)
channel.value = result.text
channel.save()
channel.assign_all_checks()
messages.success(request, "The Discord integration has been added!")
else:
messages.warning(request, "Something went wrong.")
return redirect("hc-channels", project.code)
@require_setting("PUSHOVER_API_TOKEN")
def pushover_help(request):
ctx = {"page": "channels"}
return render(request, "integrations/add_pushover_help.html", ctx)
@require_setting("PUSHOVER_API_TOKEN")
@login_required
def add_pushover(request, code):
project = _get_rw_project_for_user(request, code)
if request.method == "POST":
state = token_urlsafe()
failure_url = settings.SITE_ROOT + reverse("hc-channels", args=[project.code])
success_url = (
settings.SITE_ROOT
+ reverse("hc-add-pushover", args=[project.code])
+ "?"
+ urlencode(
{
"state": state,
"prio": request.POST.get("po_priority", "0"),
"prio_up": request.POST.get("po_priority_up", "0"),
}
)
)
subscription_url = (
settings.PUSHOVER_SUBSCRIPTION_URL
+ "?"
+ urlencode({"success": success_url, "failure": failure_url})
)
request.session["pushover"] = state
return redirect(subscription_url)
# Handle successful subscriptions
if "pushover_user_key" in request.GET:
if "pushover" not in request.session:
return HttpResponseForbidden()
state = request.session.pop("pushover")
if request.GET.get("state") != state:
return HttpResponseForbidden()
if request.GET.get("pushover_unsubscribed") == "1":
# Unsubscription: delete all Pushover channels for this project
Channel.objects.filter(project=project, kind="po").delete()
return redirect("hc-channels", project.code)
form = forms.AddPushoverForm(request.GET)
if not form.is_valid():
return HttpResponseBadRequest()
channel = Channel(project=project, kind="po")
channel.value = form.get_value()
channel.save()
channel.assign_all_checks()
messages.success(request, "The Pushover integration has been added!")
return redirect("hc-channels", project.code)
# Show Integration Settings form
ctx = {
"page": "channels",
"project": project,
"po_retry_delay": td(seconds=settings.PUSHOVER_EMERGENCY_RETRY_DELAY),
"po_expiration": td(seconds=settings.PUSHOVER_EMERGENCY_EXPIRATION),
}
return render(request, "integrations/add_pushover.html", ctx)
@login_required
def add_opsgenie(request, code):
project = _get_rw_project_for_user(request, code)
if request.method == "POST":
form = forms.AddOpsGenieForm(request.POST)
if form.is_valid():
channel = Channel(project=project, kind="opsgenie")
v = {"region": form.cleaned_data["region"], "key": form.cleaned_data["key"]}
channel.value = json.dumps(v)
channel.save()
channel.assign_all_checks()
return redirect("hc-channels", project.code)
else:
form = forms.AddOpsGenieForm()
ctx = {"page": "channels", "project": project, "form": form}
return render(request, "integrations/add_opsgenie.html", ctx)
@login_required
def add_victorops(request, code):
project = _get_rw_project_for_user(request, code)
if request.method == "POST":
form = forms.AddUrlForm(request.POST)
if form.is_valid():
channel = Channel(project=project, kind="victorops")
channel.value = form.cleaned_data["value"]
channel.save()
channel.assign_all_checks()
return redirect("hc-channels", project.code)
else:
form = forms.AddUrlForm()
ctx = {"page": "channels", "project": project, "form": form}
return render(request, "integrations/add_victorops.html", ctx)
@login_required
def add_zulip(request, code):
project = _get_rw_project_for_user(request, code)
if request.method == "POST":
form = forms.AddZulipForm(request.POST)
if form.is_valid():
channel = Channel(project=project, kind="zulip")
channel.value = form.get_value()
channel.save()
channel.assign_all_checks()
return redirect("hc-channels", project.code)
else:
form = forms.AddZulipForm()
ctx = {"page": "channels", "project": project, "form": form}
return render(request, "integrations/add_zulip.html", ctx)
@csrf_exempt
@require_POST
def telegram_bot(request):
try:
doc = json.loads(request.body.decode())
jsonschema.validate(doc, telegram_callback)
except ValueError:
return HttpResponseBadRequest()
except jsonschema.ValidationError:
# We don't recognize the message format, but don't want Telegram
# retrying this over and over again, so respond with 200 OK
return HttpResponse()
if "/start" not in doc["message"]["text"]:
return HttpResponse()
chat = doc["message"]["chat"]
name = max(chat.get("title", ""), chat.get("username", ""))
invite = render_to_string(
"integrations/telegram_invite.html",
{"qs": signing.dumps((chat["id"], chat["type"], name))},
)
Telegram.send(chat["id"], invite)
return HttpResponse()
@require_setting("TELEGRAM_TOKEN")
def telegram_help(request):
ctx = {
"page": "channels",
"bot_name": settings.TELEGRAM_BOT_NAME,
}
return render(request, "integrations/add_telegram.html", ctx)
@require_setting("TELEGRAM_TOKEN")
@login_required
def add_telegram(request):
chat_id, chat_type, chat_name = None, None, None
qs = request.META["QUERY_STRING"]
if qs:
try:
chat_id, chat_type, chat_name = signing.loads(qs, max_age=600)
except signing.BadSignature:
return render(request, "bad_link.html")
if request.method == "POST":
project = _get_rw_project_for_user(request, request.POST.get("project"))
channel = Channel(project=project, kind="telegram")
channel.value = json.dumps(
{"id": chat_id, "type": chat_type, "name": chat_name}
)
channel.save()
channel.assign_all_checks()
messages.success(request, "The Telegram integration has been added!")
return redirect("hc-channels", project.code)
ctx = {
"page": "channels",
"projects": request.profile.projects(),
"chat_id": chat_id,
"chat_type": chat_type,
"chat_name": chat_name,
"bot_name": settings.TELEGRAM_BOT_NAME,
}
return render(request, "integrations/add_telegram.html", ctx)
@require_setting("TWILIO_AUTH")
@login_required
def add_sms(request, code):
project = _get_rw_project_for_user(request, code)
if request.method == "POST":
form = forms.AddSmsForm(request.POST)
if form.is_valid():
channel = Channel(project=project, kind="sms")
channel.name = form.cleaned_data["label"]
channel.value = json.dumps({"value": form.cleaned_data["value"]})
channel.save()
channel.assign_all_checks()
return redirect("hc-channels", project.code)
else:
form = forms.AddSmsForm()
ctx = {
"page": "channels",
"project": project,
"form": form,
"profile": project.owner_profile,
}
return render(request, "integrations/add_sms.html", ctx)
@require_setting("TWILIO_AUTH")
@login_required
def add_call(request, code):
project = _get_rw_project_for_user(request, code)
if request.method == "POST":
form = forms.AddSmsForm(request.POST)
if form.is_valid():
channel = Channel(project=project, kind="call")
channel.name = form.cleaned_data["label"]
channel.value = json.dumps({"value": form.cleaned_data["value"]})
channel.save()
channel.assign_all_checks()
return redirect("hc-channels", project.code)
else:
form = forms.AddSmsForm()
ctx = {
"page": "channels",
"project": project,
"form": form,
"profile": project.owner_profile,
}
return render(request, "integrations/add_call.html", ctx)
@require_setting("TWILIO_USE_WHATSAPP")
@login_required
def add_whatsapp(request, code):
project = _get_rw_project_for_user(request, code)
if request.method == "POST":
form = forms.AddSmsForm(request.POST)
if form.is_valid():
channel = Channel(project=project, kind="whatsapp")
channel.name = form.cleaned_data["label"]
channel.value = json.dumps(
{
"value": form.cleaned_data["value"],
"up": form.cleaned_data["up"],
"down": form.cleaned_data["down"],
}
)
channel.save()
channel.assign_all_checks()
return redirect("hc-channels", project.code)
else:
form = forms.AddSmsForm()
ctx = {
"page": "channels",
"project": project,
"form": form,
"profile": project.owner_profile,
}
return render(request, "integrations/add_whatsapp.html", ctx)
@require_setting("TRELLO_APP_KEY")
@login_required
def add_trello(request, code):
project = _get_rw_project_for_user(request, code)
if request.method == "POST":
channel = Channel(project=project, kind="trello")
channel.value = request.POST["settings"]
channel.save()
channel.assign_all_checks()
return redirect("hc-channels", project.code)
return_url = settings.SITE_ROOT + reverse("hc-add-trello", args=[project.code])
authorize_url = "https://trello.com/1/authorize?" + urlencode(
{
"expiration": "never",
"name": settings.SITE_NAME,
"scope": "read,write",
"response_type": "token",
"key": settings.TRELLO_APP_KEY,
"return_url": return_url,
}
)
ctx = {
"page": "channels",
"project": project,
"authorize_url": authorize_url,
}
return render(request, "integrations/add_trello.html", ctx)
@require_setting("MATRIX_ACCESS_TOKEN")
@login_required
def add_matrix(request, code):
project = _get_rw_project_for_user(request, code)
if request.method == "POST":
form = forms.AddMatrixForm(request.POST)
if form.is_valid():
channel = Channel(project=project, kind="matrix")
channel.value = form.cleaned_data["room_id"]
# If user supplied room alias instead of ID, use it as channel name
alias = form.cleaned_data["alias"]
if not alias.startswith("!"):
channel.name = alias
channel.save()
channel.assign_all_checks()
messages.success(request, "The Matrix integration has been added!")
return redirect("hc-channels", project.code)
else:
form = forms.AddMatrixForm()
ctx = {
"page": "channels",
"project": project,
"form": form,
"matrix_user_id": settings.MATRIX_USER_ID,
}
return render(request, "integrations/add_matrix.html", ctx)
@require_setting("APPRISE_ENABLED")
@login_required
def add_apprise(request, code):
project = _get_rw_project_for_user(request, code)
if request.method == "POST":
form = forms.AddAppriseForm(request.POST)
if form.is_valid():
channel = Channel(project=project, kind="apprise")
channel.value = form.cleaned_data["url"]
channel.save()
channel.assign_all_checks()
messages.success(request, "The Apprise integration has been added!")
return redirect("hc-channels", project.code)
else:
form = forms.AddAppriseForm()
ctx = {"page": "channels", "project": project, "form": form}
return render(request, "integrations/add_apprise.html", ctx)
@require_setting("TRELLO_APP_KEY")
@login_required
@require_POST
def trello_settings(request):
token = request.POST.get("token")
url = "https://api.trello.com/1/members/me/boards?" + urlencode(
{
"key": settings.TRELLO_APP_KEY,
"token": token,
"fields": "id,name",
"lists": "open",
"list_fields": "id,name",
}
)
r = requests.get(url)
ctx = {"token": token, "data": r.json()}
return render(request, "integrations/trello_settings.html", ctx)
@login_required
def add_msteams(request, code):
project = _get_rw_project_for_user(request, code)
if request.method == "POST":
form = forms.AddUrlForm(request.POST)
if form.is_valid():
channel = Channel(project=project, kind="msteams")
channel.value = form.cleaned_data["value"]
channel.save()
channel.assign_all_checks()
return redirect("hc-channels", project.code)
else:
form = forms.AddUrlForm()
ctx = {"page": "channels", "project": project, "form": form}
return render(request, "integrations/add_msteams.html", ctx)
@login_required
def add_prometheus(request, code):
project, rw = _get_project_for_user(request, code)
ctx = {"page": "channels", "project": project}
return render(request, "integrations/add_prometheus.html", ctx)
def metrics(request, code, key):
if len(key) != 32:
return HttpResponseBadRequest()
q = Project.objects.filter(code=code, api_key_readonly=key)
try:
project = q.get()
except Project.DoesNotExist:
return HttpResponseForbidden()
checks = Check.objects.filter(project_id=project.id).order_by("id")
def esc(s):
return s.replace("\\", "\\\\").replace('"', '\\"').replace("\n", "\\n")
def output(checks):
yield "# HELP hc_check_up Whether the check is currently up (1 for yes, 0 for no).\n"
yield "# TYPE hc_check_up gauge\n"
TMPL = """hc_check_up{name="%s", tags="%s", unique_key="%s"} %d\n"""
for check in checks:
value = 0 if check.get_status() == "down" else 1
yield TMPL % (esc(check.name), esc(check.tags), check.unique_key, value)
tags_statuses, num_down = _tags_statuses(checks)
yield "\n"
yield "# HELP hc_tag_up Whether all checks with this tag are up (1 for yes, 0 for no).\n"
yield "# TYPE hc_tag_up gauge\n"
TMPL = """hc_tag_up{tag="%s"} %d\n"""
for tag in sorted(tags_statuses):
value = 0 if tags_statuses[tag] == "down" else 1
yield TMPL % (esc(tag), value)
yield "\n"
yield "# HELP hc_checks_total The total number of checks.\n"
yield "# TYPE hc_checks_total gauge\n"
yield "hc_checks_total %d\n" % len(checks)
yield "\n"
yield "# HELP hc_checks_down_total The number of checks currently down.\n"
yield "# TYPE hc_checks_down_total gauge\n"
yield "hc_checks_down_total %d\n" % num_down
return HttpResponse(output(checks), content_type="text/plain")
@login_required
def add_spike(request, code):
project = _get_rw_project_for_user(request, code)
if request.method == "POST":
form = forms.AddUrlForm(request.POST)
if form.is_valid():
channel = Channel(project=project, kind="spike")
channel.value = form.cleaned_data["value"]
channel.save()
channel.assign_all_checks()
return redirect("hc-channels", project.code)
else:
form = forms.AddUrlForm()
ctx = {"page": "channels", "project": project, "form": form}
return render(request, "integrations/add_spike.html", ctx)
@require_setting("LINENOTIFY_CLIENT_ID")
@login_required
def add_linenotify(request, code):
project = _get_rw_project_for_user(request, code)
state = token_urlsafe()
authorize_url = " https://notify-bot.line.me/oauth/authorize?" + urlencode(
{
"client_id": settings.LINENOTIFY_CLIENT_ID,
"redirect_uri": settings.SITE_ROOT + reverse(add_linenotify_complete),
"response_type": "code",
"state": state,
"scope": "notify",
}
)
ctx = {
"page": "channels",
"project": project,
"authorize_url": authorize_url,
}
request.session["add_linenotify"] = (state, str(project.code))
return render(request, "integrations/add_linenotify.html", ctx)
@require_setting("LINENOTIFY_CLIENT_ID")
@login_required
def add_linenotify_complete(request):
if "add_linenotify" not in request.session:
return HttpResponseForbidden()
state, code = request.session.pop("add_linenotify")
if request.GET.get("state") != state:
return HttpResponseForbidden()
project = _get_rw_project_for_user(request, code)
if request.GET.get("error") == "access_denied":
messages.warning(request, "LINE Notify setup was cancelled.")
return redirect("hc-channels", project.code)
# Exchange code for access token
result = requests.post(
"https://notify-bot.line.me/oauth/token",
{
"grant_type": "authorization_code",
"code": request.GET.get("code"),
"redirect_uri": settings.SITE_ROOT + reverse(add_linenotify_complete),
"client_id": settings.LINENOTIFY_CLIENT_ID,
"client_secret": settings.LINENOTIFY_CLIENT_SECRET,
},
)
doc = result.json()
if doc.get("status") != 200:
messages.warning(request, "Something went wrong.")
return redirect("hc-channels", project.code)
# Fetch notification target's name, will use it as channel name:
token = doc["access_token"]
result = requests.get(
"https://notify-api.line.me/api/status",
headers={"Authorization": "Bearer %s" % token},
)
doc = result.json()
channel = Channel(kind="linenotify", project=project)
channel.name = doc.get("target")
channel.value = token
channel.save()
channel.assign_all_checks()
messages.success(request, "The LINE Notify integration has been added!")
return redirect("hc-channels", project.code)
# Forks: add custom views after this line
def stats(request):
return render(request, "front/stats.html", {})
| 30.955637 | 97 | 0.638785 | [
"BSD-3-Clause"
] | srvz/healthchecks | hc/front/views.py | 59,311 | Python |
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union
import numpy as np
import torch
from .utils import get_mask_edges, get_surface_distance
def compute_average_surface_distance(
seg_pred: Union[np.ndarray, torch.Tensor],
seg_gt: Union[np.ndarray, torch.Tensor],
label_idx: int,
symmetric: bool = False,
distance_metric: str = "euclidean",
):
"""
This function is used to compute the Average Surface Distance from `seg_pred` to `seg_gt`
under the default setting.
In addition, if sets ``symmetric = True``, the average symmetric surface distance between
these two inputs will be returned.
Args:
seg_pred: first binary or labelfield image.
seg_gt: second binary or labelfield image.
label_idx: for labelfield images, convert to binary with
`seg_pred = seg_pred == label_idx`.
symmetric: if calculate the symmetric average surface distance between
`seg_pred` and `seg_gt`. Defaults to ``False``.
distance_metric: : [``"euclidean"``, ``"chessboard"``, ``"taxicab"``]
the metric used to compute surface distance. Defaults to ``"euclidean"``.
"""
(edges_pred, edges_gt) = get_mask_edges(seg_pred, seg_gt, label_idx)
surface_distance = get_surface_distance(edges_pred, edges_gt, label_idx, distance_metric=distance_metric)
if surface_distance.shape == (0,):
return np.inf
avg_surface_distance = surface_distance.mean()
if not symmetric:
return avg_surface_distance
surface_distance_2 = get_surface_distance(edges_gt, edges_pred, label_idx, distance_metric=distance_metric)
if surface_distance_2.shape == (0,):
return np.inf
avg_surface_distance_2 = surface_distance_2.mean()
return np.mean((avg_surface_distance, avg_surface_distance_2))
| 40.87931 | 111 | 0.724589 | [
"Apache-2.0"
] | Alxaline/MONAI | monai/metrics/surface_distance.py | 2,371 | Python |
"""
This version considers task's datasets have equal number of labeled samples
"""
import os
import json
from collections import defaultdict
import numpy as np
from tensorboardX import SummaryWriter
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.autograd import grad as torch_grad
import torch.optim as optim
from torch.utils.data import DataLoader
import torchvision
from torchvision import transforms
import util
from util import in_feature_size
import alpha_opt
import data_loading as db
from torch.optim import lr_scheduler
class MTL_pairwise(object):
def __init__(self, ft_extrctor_prp, hypoth_prp, discrm_prp, **kwargs):
final_results = defaultdict()
# ######################### argument definition ###############
self.criterion = kwargs ['criterion']
self.c3_value = kwargs['c3']
self.grad_weight = kwargs['grad_weight']
self.img_size = kwargs['img_size']
self.num_chnnl = kwargs['chnnl']
self.lr = kwargs['lr']
self.momentum = kwargs['momentum']
self.epochs = kwargs['epochs']
num_tr_smpl = kwargs['tr_smpl']
num_test_smpl = kwargs['test_smpl']
self.trial = kwargs['Trials']
self.tsklist = kwargs['tsk_list']
self.num_tsk = len(self.tsklist)
if self.criterion=='wasserstien': self.stp_sz_sch = 30
else: self.stp_sz_sch = 50
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.alpha = np.ones((self.num_tsk, self.num_tsk)) * (0.1 / (self.num_tsk - 1))
np.fill_diagonal(self.alpha, 0.9)
self.wrdir = os.path.join(os.getcwd(), '_'.join( self.tsklist)+'_'+str(num_tr_smpl)+'_'+ str(self.epochs)+'_'+self.criterion, 'runs_'+str(self.c3_value))
try:
os.makedirs(self.wrdir)
except OSError:
if not os.path.isdir(self.wrdir):
raise
with open(os.path.join(self.wrdir, 'info_itr_'+str(self.trial)+'.json'), 'a') as outfile:
json.dump([ft_extrctor_prp,hypoth_prp,discrm_prp], outfile)
json.dump(kwargs, outfile)
# Constructing F -> H and F -> D
self.FE = util.feature_extractor(ft_extrctor_prp).construct().to(self.device)
print (self.FE)
self.hypothesis = [util.classifier(hypoth_prp).to(self.device) for _ in range(self.num_tsk)]
print (self.hypothesis[0])
self.discrm = {'{}{}'.format(i, j): util.classifier(discrm_prp).to(self.device)for i in range(self.num_tsk) for
j in range(i + 1, self.num_tsk)}
print (self.discrm['01'])
all_parameters_h = sum([list(h.parameters()) for h in self.hypothesis], [])
all_parameters_discrm = sum([list(self.discrm[d].parameters()) for d in self.discrm], [])
self.optimizer = optim.SGD(list(self.FE.parameters()) + list(all_parameters_h) + list(all_parameters_discrm),
lr=self.lr,
momentum=self.momentum)
self.scheduler = lr_scheduler.StepLR(self.optimizer, step_size=self.stp_sz_sch, gamma=0.5)
train_loader, test_loader, validation_loader = db.data_loading(self.img_size, num_tr_smpl,num_test_smpl, self.tsklist )
self.writer = SummaryWriter(os.path.join(self.wrdir, 'itr'+str(self.trial)))
Total_loss = []
for epoch in range(self.epochs):
self.scheduler.step(epoch)
whole_loss = self.model_fit(train_loader, epoch)
Total_loss.append(whole_loss)
tasks_trAcc = self.model_eval(train_loader, epoch, 'train')
tasks_valAcc = self.model_eval(validation_loader, epoch, 'validation')
tasks_teAcc = self.model_eval(test_loader, epoch, 'test')
# if np.abs(np.mean(Total_loss[-5:-1]) - Total_loss[-1]) < 0.002 :
# print('Stop learning, reach to a stable point at epoch {:d} with total loss {:.4f}'.format(epoch,
# Total_loss[-1]))
# break
if 1.5*np.mean(Total_loss[-5:-1]) < Total_loss[-1]:
print ('****** Increasing of training error')
break
final_results['alpha_c3_'+str(self.c3_value)] = (self.alpha).tolist()
final_results['Tasks_val_Acc_c3_'+str(self.c3_value)] = (tasks_valAcc).tolist()
final_results['Tasks_test_Acc_c3_' + str(self.c3_value) ] = (tasks_teAcc).tolist()
final_results['Tasks_train_Acc_c3_'+str(self.c3_value)] = (tasks_trAcc).tolist()
with open(os.path.join(self.wrdir, 'info_itr_'+str(self.trial)+'.json'), 'a') as outfile:
json.dump(final_results, outfile)
final_prmtr = defaultdict()
final_prmtr['FE'] = self.FE.state_dict()
for i,h in enumerate(self.hypothesis):
final_prmtr['hypo'+str(i)] = h.state_dict()
for k, D in self.discrm.items():
final_prmtr['dicrm'+k] = D.state_dict()
torch.save(final_prmtr, os.path.join(self.wrdir, 'itr'+str(self.trial),'MTL_parameters.pt'))
self.writer.close()
def model_fit(self, data_loader, epoch):
discrm_distnc_mtrx = np.zeros((self.num_tsk, self.num_tsk))
loss_mtrx_hypo_vlue = np.zeros((self.num_tsk, self.num_tsk))
weigh_loss_hypo_vlue, correct_hypo = np.zeros(self.num_tsk), np.zeros(self.num_tsk)
Total_loss = 0
n_batch = 0
# set train mode
self.FE.train()
for t in range(self.num_tsk):
self.hypothesis[t].train()
for j in range(t + 1, self.num_tsk):
self.discrm['{}{}'.format(t, j)].train()
# #####
for tasks_batch in zip(*data_loader):
Loss_1, Loss_2 = 0, 0
n_batch += 1
# data = (x,y)
inputs = torch.cat([batch[0] for batch in tasks_batch])
btch_sz = len(tasks_batch[0][0])
targets = torch.cat([batch[1] for batch in tasks_batch])
# inputs = (x1,...,xT) targets = (y1,...,yT)
inputs = inputs.to(self.device)
targets = targets.to(self.device)
features = self.FE(inputs)
features = features.view(features.size(0), -1)
for t in range(self.num_tsk):
w = torch.tensor([np.tile(self.alpha[t, i], reps=len(data[0])) for i, data in enumerate(tasks_batch)],
dtype=torch.float).view(-1)
w = w.to(self.device)
label_prob = self.hypothesis[t](features)
pred = label_prob[t * (btch_sz):(t + 1) * btch_sz].argmax(dim=1, keepdim=True)
correct_hypo[t] += (
(pred.eq(targets[t * btch_sz:(t + 1) * btch_sz].view_as(pred)).sum().item()) / btch_sz)
hypo_loss = torch.mean(w * F.cross_entropy(label_prob, targets, reduction='none'))
# definition of loss to be optimized
Loss_1 += hypo_loss
weigh_loss_hypo_vlue[t] += hypo_loss.item()
loss_mtrx_hypo_vlue[t, :] += [F.cross_entropy(label_prob[j * (btch_sz):(j + 1) * btch_sz, :],
targets[j * (btch_sz):(j + 1) * btch_sz],
reduction='mean').item() for j in range(self.num_tsk)]
for k in range(t + 1, self.num_tsk):
# w = (alpha_{tk}+alpha_{kt}) assumption: matrix alpha is not symmetric
alpha_domain = torch.tensor(self.alpha[t, k] + self.alpha[k, t], dtype=torch.float)
alpha_domain = alpha_domain.to(self.device)
if self.criterion =='h_divergence':
domain_y = torch.cat([torch.ones(len(tasks_batch[t][0]), dtype=torch.float),
torch.zeros(len(tasks_batch[k][0]), dtype=torch.float)])
# domain_x = torch.cat([tasks_batch[t-1][0], tasks_batch[k-1][0] ])
domain_y = domain_y.to(self.device)
domain_features = torch.cat([features[t * btch_sz:(t + 1) * btch_sz], features[k * btch_sz:(k + 1) * btch_sz]])
domain_features = domain_features.view(domain_features.size(0), -1)
domain_pred = self.discrm['{}{}'.format(t, k)](domain_features).squeeze()
disc_loss = F.binary_cross_entropy(domain_pred, domain_y)
# discriminator accuracy defines H-divergence
domain_lbl = domain_pred >= 0.5
domain_lbl = domain_lbl.type(torch.cuda.FloatTensor)
discrm_distnc_mtrx[t, k] += (domain_lbl.eq(domain_y).sum().item()) / len(domain_y)
discrm_distnc_mtrx[k, t] = discrm_distnc_mtrx[t, k]
print(discrm_distnc_mtrx[t, :])
elif self.criterion =='wasserstien':
features_t = features[t * btch_sz:(t + 1) * btch_sz]
features_t = features_t.view(features_t.size(0), -1)
features_k = features[k * btch_sz:(k + 1) * btch_sz]
features_k = features_k.view(features_k.size(0), -1)
pred_k = self.discrm['{}{}'.format(t, k)](features_k).squeeze()
pred_t = self.discrm['{}{}'.format(t, k)](features_t).squeeze()
gradient_pntly=self.gradient_penalty(inputs[t * btch_sz:(t + 1) * btch_sz],inputs[k * btch_sz:(k + 1) * btch_sz], t, k)
# critic loss ---> E(f(x)) - E(f(y)) + gamma* ||grad(f(x+y/2))-1||
disc_loss = (pred_t.mean() - pred_k.mean() ) + self.grad_weight *gradient_pntly
# negative sign compute wasserstien distance
discrm_distnc_mtrx[t, k] += -(pred_t.mean() - pred_k.mean()).item()
discrm_distnc_mtrx[k, t] = discrm_distnc_mtrx[t, k]
disc_loss = alpha_domain * disc_loss
Loss_2 += disc_loss
if n_batch % 500 == 0:
grid_img = torchvision.utils.make_grid(inputs, nrow=5, padding=30)
self.writer.add_image('result Image', grid_img)
Loss = torch.mean(Loss_1) + Loss_2 * (1 / self.num_tsk)
Total_loss += Loss.item()
# loss formula for all tasks regarding the current batch
self.optimizer.zero_grad()
Loss.backward()
self.optimizer.step()
discrm_distnc_mtrx /= n_batch
weigh_loss_hypo_vlue /= n_batch
loss_mtrx_hypo_vlue /= n_batch
correct_hypo /= n_batch
Total_loss /= n_batch
print('================== epoch {:d} ========'.format(epoch))
print('Final Total Loss {:.3f}'.format(Total_loss ))
print('discriminator distance based on '+self.criterion +'\n'+ str(discrm_distnc_mtrx))
print(' hypothesis loss \n' + str(loss_mtrx_hypo_vlue))
print(' hypothesis accuracy \n' + str(correct_hypo * 100))
print('coefficient:',self.alpha)
self.writer.add_scalars('MTL_total_loss', {'MTL_total_loss': Total_loss}, epoch)
for t in range(self.num_tsk):
# self.writer.add_scalars('task_' + str(t) + '/loss', {'loss_train': loss_mtrx_hypo_vlue[t, t]}, epoch)
for j in range(self.num_tsk):
if j != t:
self.writer.add_scalars('task_' + str(t) + '/Discrm_distance',
{'loss_D' + '_'.join([self.tsklist[t],self.tsklist[j]]): discrm_distnc_mtrx[t, j]}, epoch)
self.writer.add_scalars('task_' + str(t) + '/alpha',
{'alpha' + '_'.join([self.tsklist[t],self.tsklist[j]]): self.alpha[t, j]}, epoch)
if epoch % 1 == 0:
c_2, c_3 = 1 * np.ones(self.num_tsk), self.c3_value * np.ones(self.num_tsk)
self.alpha = alpha_opt.min_alphacvx(self.alpha.T, c_2, c_3, loss_mtrx_hypo_vlue.T, discrm_distnc_mtrx.T)
self.alpha = self.alpha.T
return Total_loss
def model_eval(self, data_loader, epoch, phase='test'):
loss_hypo_vlue = np.zeros(self.num_tsk)
correct_hypo = np.zeros(self.num_tsk)
self.FE.eval()
for t in range(self.num_tsk):
n_batch_t = 0
self.hypothesis[t].eval()
for j in range(t + 1, self.num_tsk):
self.discrm['{}{}'.format(t, j)].eval()
for inputs, targets in (data_loader[t]):
n_batch_t += 1
inputs = inputs.to(self.device)
targets = targets.to(self.device)
features = self.FE(inputs)
features = features.view(features.size(0), -1)
label_prob = self.hypothesis[t](features)
pred = label_prob.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct_hypo[t] += ((pred.eq(targets.view_as(pred)).sum().item()) / len(pred))
loss_hypo_vlue[t] += F.cross_entropy(label_prob, targets, reduction='mean').item()
if n_batch_t % 100 == 0:
grid_img = torchvision.utils.make_grid(inputs, nrow=5, padding=30)
self.writer.add_image('result Image_' + phase, grid_img)
loss_hypo_vlue[t] /= n_batch_t
correct_hypo[t] /= n_batch_t
self.writer.add_scalars('task_' + str(t) + '/loss', {'loss_' + phase: loss_hypo_vlue[t]}, epoch)
self.writer.add_scalars('task_' + str(t) + '/Acc', {'Acc_' + phase: correct_hypo[t]}, epoch)
print('\t === hypothesiz **' + phase + '** loss \n' + str(loss_hypo_vlue))
print('\t === hypothesiz **' + phase + '** accuracy \n' + str(correct_hypo * 100))
return correct_hypo
def gradient_penalty(self, data_t, data_k, t, k):
batch_size = data_k.size()[0]
# Calculate interpolation
theta = torch.rand(batch_size, 1, 1,1)
theta = theta.expand_as(data_t)
theta = theta.to(self.device)
interpolated = theta * data_t + (1 - theta) * data_k
# computing gradient w.r.t interplated sample
interpolated = Variable(interpolated, requires_grad=True)
interpolated = interpolated.to(self.device)
features_intrpltd = self.FE(interpolated)
features_intrpltd = features_intrpltd.view(features_intrpltd.size(0), -1)
# Calculate probability of interpolated examples
prob_interpolated = self.discrm['{}{}'.format(t, k)](features_intrpltd).squeeze()
# Calculate gradients of probabilities with respect to examples
gradients = torch_grad(outputs=prob_interpolated, inputs=interpolated,
grad_outputs=torch.ones(
prob_interpolated.size()).to(self.device),
create_graph=True, retain_graph=True)[0]
# Gradients have shape (batch_size, num_channels, img_width, img_height),
# so flatten to easily take norm per example in batch
gradients = gradients.view(batch_size, -1)
# Derivatives of the gradient close to 0 can cause problems because of
# the square root, so manually calculate norm and add epsilon
gradients_norm = torch.sqrt(torch.sum(gradients ** 2, dim=1) + 1e-12)
# Return gradient penalty
return ((gradients_norm - 1) ** 2).mean()
def main():
""""options for criterion is wasserstien, h_divergence"""
# criterion = ['wasserstien', 'h_divergence']
itertn = 1
# for c3_value in [0.5, 0.2, 1]:
c3_value = 0.5
for trial in range(1):
args = {'img_size': 28,
'chnnl': 1,
'lr': 0.01,
'momentum': 0.9,
'epochs': 1,
'tr_smpl': 1000,
'test_smpl': 10000,
'tsk_list': ['mnist', 'svhn', 'm_mnist'],
'grad_weight': 1,
'Trials': trial,
#'criterion': 'h_divergence',
'criterion': 'wasserstien',
'c3':c3_value}
ft_extrctor_prp = {'layer1': {'conv': [1, 32, 5, 1, 2], 'elu': [], 'maxpool': [3, 2, 0]},
'layer2': {'conv': [32, 64, 5, 1, 2], 'elu': [], 'maxpool': [3, 2, 0]}}
hypoth_prp = {
'layer3': {'fc': [util.in_feature_size(ft_extrctor_prp, args['img_size']), 128], 'act_fn': 'elu'},
'layer4': {'fc': [128, 10], 'act_fn': 'softmax'}}
discrm_prp = {'reverse_gradient': {},
'layer3': {'fc': [util.in_feature_size(ft_extrctor_prp, args['img_size']), 128],
'act_fn': 'elu'},
'layer4': {'fc': [128, 1], 'act_fn': 'sigm'}}
mtl = MTL_pairwise(ft_extrctor_prp, hypoth_prp, discrm_prp, **args)
del mtl
if __name__ == '__main__':
main()
| 44.396419 | 161 | 0.556829 | [
"MIT"
] | cjshui/AMTNN | MTL.py | 17,359 | Python |
from django.test import TestCase
class PollsViewsTestCase(TestCase):
fixtures = ['polls_views_testdata.json']
def test_index(self):
resp = self.client.get('/polls/')
self.assertEqual(resp.status_code, 200)
self.assertTrue('latest_poll_list' in resp.context)
self.assertEqual([poll.pk for poll in resp.context['latest_poll_list']], [1])
def test_choices(self):
resp = self.client.get('/polls/')
self.assertEqual(resp.status_code, 200)
self.assertTrue('latest_poll_list' in resp.context)
self.assertEqual([poll.pk for poll in resp.context['latest_poll_list']], [1])
poll_1 = resp.context['latest_poll_list'][0]
self.assertEqual(poll_1.question, 'Are you learning about testing in Django?')
self.assertEqual(poll_1.choice_set.count(), 2)
choices = poll_1.choice_set.all()
self.assertEqual(choices[0].choice, 'Yes')
self.assertEqual(choices[0].votes, 1)
self.assertEqual(choices[1].choice, 'No')
self.assertEqual(choices[1].votes, 0)
def test_detail(self):
resp = self.client.get('/polls/1/')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.context['polls'].pk, 1)
self.assertEqual(resp.context['polls'].question, 'Are you learning about testing in Django?')
# Ensure that non-existent polls throw a 404.
resp = self.client.get('/polls/2/')
self.assertEqual(resp.status_code, 404)
| 41.583333 | 101 | 0.662659 | [
"MIT"
] | nokiadev/django-tdd-dojo | tdd/polls/tests/test_fixtures.py | 1,497 | Python |
"""
Support for a local MQTT broker.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/mqtt/#use-the-embedded-broker
"""
import logging
import tempfile
from homeassistant.core import callback
from homeassistant.components.mqtt import PROTOCOL_311
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.util.async import run_coroutine_threadsafe
REQUIREMENTS = ['hbmqtt==0.8']
DEPENDENCIES = ['http']
def start(hass, server_config):
"""Initialize MQTT Server."""
from hbmqtt.broker import Broker, BrokerException
try:
passwd = tempfile.NamedTemporaryFile()
if server_config is None:
server_config, client_config = generate_config(hass, passwd)
else:
client_config = None
broker = Broker(server_config, hass.loop)
run_coroutine_threadsafe(broker.start(), hass.loop).result()
except BrokerException:
logging.getLogger(__name__).exception('Error initializing MQTT server')
return False, None
finally:
passwd.close()
@callback
def shutdown_mqtt_server(event):
"""Shut down the MQTT server."""
hass.async_add_job(broker.shutdown())
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, shutdown_mqtt_server)
return True, client_config
def generate_config(hass, passwd):
"""Generate a configuration based on current Home Assistant instance."""
config = {
'listeners': {
'default': {
'max-connections': 50000,
'bind': '0.0.0.0:1883',
'type': 'tcp',
},
'ws-1': {
'bind': '0.0.0.0:8080',
'type': 'ws',
},
},
'auth': {
'allow-anonymous': hass.config.api.api_password is None
},
'plugins': ['auth_anonymous'],
}
if hass.config.api.api_password:
username = 'homeassistant'
password = hass.config.api.api_password
# Encrypt with what hbmqtt uses to verify
from passlib.apps import custom_app_context
passwd.write(
'homeassistant:{}\n'.format(
custom_app_context.encrypt(
hass.config.api.api_password)).encode('utf-8'))
passwd.flush()
config['auth']['password-file'] = passwd.name
config['plugins'].append('auth_file')
else:
username = None
password = None
client_config = ('localhost', 1883, username, password, None, PROTOCOL_311)
return config, client_config
| 28.78022 | 79 | 0.628102 | [
"MIT"
] | 1Forward1Back/home-assistant | homeassistant/components/mqtt/server.py | 2,619 | Python |
import pyos
state = None
app = None
def buildAppEntry(a):
cont = pyos.GUI.Container((0,0), color=state.getColorPalette().getColor("background"), width=app.ui.width-2, height=40)
ic = a.getIcon()
icon = None
if ic != False:
icon = pyos.GUI.Image((0,0), surface=a.getIcon())
else:
icon = pyos.GUI.Image((0,0), surface=state.getIcons().getLoadedIcon("unknown"))
title = pyos.GUI.Text((40, 10), a.title, state.getColorPalette().getColor("item"), 20)
pauseBtn = pyos.GUI.Button((app.ui.width-100, 0), "Pause", state.getColorPalette().getColor("background"), state.getColorPalette().getColor("item"),
20, width=50, height=40, border=1, borderColor=state.getColorPalette().getColor("accent"),
onClick=registerPauseClick, onClickData=(a, cont))
stopBtn = pyos.GUI.Button((app.ui.width-50, 0), "Stop", state.getColorPalette().getColor("background"), state.getColorPalette().getColor("item"),
20, width=50, height=40, border=1, borderColor=state.getColorPalette().getColor("accent"),
onClick=registerStopClick, onClickData=(a, cont))
if a.thread.pause:
pauseBtn.textComponent.text = "Resume"
pauseBtn.refresh()
if a.thread.stop or a.thread.firstRun:
stopBtn.textComponent.text = "Start"
pauseBtn.refresh()
pauseBtn.textComponent.text = " - "
pauseBtn.refresh()
cont.addChild(icon)
cont.addChild(title)
cont.addChild(pauseBtn)
cont.addChild(stopBtn)
return cont
def registerPauseClick(a, cont):
pauseBtn = cont.getChildAt([app.ui.width-100,0])
if a.thread.stop or a.thread.firstRun: return
if a.thread.pause:
a.activate()
pauseBtn.textComponent.text = "Pause"
pauseBtn.refresh()
else:
a.deactivate(True)
pauseBtn.textComponent.text = "Resume"
pauseBtn.refresh()
def registerStopClick(a, cont):
pauseBtn = cont.getChildAt([app.ui.width-100,0])
stopBtn = cont.getChildAt([app.ui.width-50,0])
if a.thread.stop or a.thread.firstRun:
a.activate()
stopBtn.textComponent.text = "Stop"
stopBtn.refresh()
pauseBtn.textComponent.text = "Pause"
pauseBtn.refresh()
else:
a.deactivate(False)
stopBtn.textComponent.text = "Start"
stopBtn.refresh()
pauseBtn.textComponent.text = " - "
pauseBtn.refresh()
def loadList():
app.ui.clearChildren()
appList = pyos.GUI.ListPagedContainer((0, 0), width=app.ui.width, height=app.ui.height, color=state.getColorPalette().getColor("background"))
app.ui.addChild(appList)
for a in state.getApplicationList().getApplicationList():
appList.addChild(buildAppEntry(a))
appList.goToPage()
def onLoad(s, a):
global state, app
app = a
state = s
loadList()
pyos.GUI.WarningDialog("This application modifies the state of other apps. Using it may result in data loss.").display()
| 39.688312 | 152 | 0.637435 | [
"MIT"
] | furmada/PythonOS | apps/task-manager/__init__.py | 3,056 | Python |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.test_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import logging_ops
class TestUtilTest(test_util.TensorFlowTestCase):
def test_assert_ops_in_graph(self):
with self.test_session():
constant_op.constant(["hello", "taffy"], name="hello")
test_util.assert_ops_in_graph({"hello": "Const"}, ops.get_default_graph())
self.assertRaises(
ValueError, test_util.assert_ops_in_graph, {"bye": "Const"},
ops.get_default_graph())
self.assertRaises(
ValueError, test_util.assert_ops_in_graph, {"hello": "Variable"},
ops.get_default_graph())
def test_assert_equal_graph_def(self):
with tf.Graph().as_default() as g:
def_empty = g.as_graph_def()
tf.constant(5, name="five")
tf.constant(7, name="seven")
def_57 = g.as_graph_def()
with tf.Graph().as_default() as g:
tf.constant(7, name="seven")
tf.constant(5, name="five")
def_75 = g.as_graph_def()
# Comparing strings is order dependent
self.assertNotEqual(str(def_57), str(def_75))
# assert_equal_graph_def doesn't care about order
tf.test.assert_equal_graph_def(def_57, def_75)
# Compare two unequal graphs
with self.assertRaisesRegexp(AssertionError,
r"^Found unexpected node 'seven"):
tf.test.assert_equal_graph_def(def_57, def_empty)
def testIsGoogleCudaEnabled(self):
# The test doesn't assert anything. It ensures the py wrapper
# function is generated correctly.
if test_util.IsGoogleCudaEnabled():
print("GoogleCuda is enabled")
else:
print("GoogleCuda is disabled")
def testAssertProtoEqualsStr(self):
graph_str = "node { name: 'w1' op: 'params' }"
graph_def = graph_pb2.GraphDef()
text_format.Merge(graph_str, graph_def)
# test string based comparison
self.assertProtoEquals(graph_str, graph_def)
# test original comparison
self.assertProtoEquals(graph_def, graph_def)
def testNDArrayNear(self):
a1 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a2 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a3 = np.array([[10.0, 20.0, 30.0], [40.0, 50.0, 60.0]])
self.assertTrue(self._NDArrayNear(a1, a2, 1e-5))
self.assertFalse(self._NDArrayNear(a1, a3, 1e-5))
def testCheckedThreadSucceeds(self):
def noop(ev):
ev.set()
event_arg = threading.Event()
self.assertFalse(event_arg.is_set())
t = self.checkedThread(target=noop, args=(event_arg,))
t.start()
t.join()
self.assertTrue(event_arg.is_set())
def testCheckedThreadFails(self):
def err_func():
return 1 // 0
t = self.checkedThread(target=err_func)
t.start()
with self.assertRaises(self.failureException) as fe:
t.join()
self.assertTrue("integer division or modulo by zero" in str(fe.exception))
def testCheckedThreadWithWrongAssertionFails(self):
x = 37
def err_func():
self.assertTrue(x < 10)
t = self.checkedThread(target=err_func)
t.start()
with self.assertRaises(self.failureException) as fe:
t.join()
self.assertTrue("False is not true" in str(fe.exception))
def testMultipleThreadsWithOneFailure(self):
def err_func(i):
self.assertTrue(i != 7)
threads = [self.checkedThread(target=err_func, args=(i,))
for i in range(10)]
for t in threads:
t.start()
for i, t in enumerate(threads):
if i == 7:
with self.assertRaises(self.failureException):
t.join()
else:
t.join()
def _WeMustGoDeeper(self, msg):
with self.assertRaisesOpError(msg):
node_def = ops._NodeDef("op_type", "name")
node_def_orig = ops._NodeDef("op_type_orig", "orig")
op_orig = ops.Operation(node_def_orig, ops.get_default_graph())
op = ops.Operation(node_def, ops.get_default_graph(), original_op=op_orig)
raise errors.UnauthenticatedError(node_def, op, "true_err")
def testAssertRaisesOpErrorDoesNotPassMessageDueToLeakedStack(self):
with self.assertRaises(AssertionError):
self._WeMustGoDeeper("this_is_not_the_error_you_are_looking_for")
self._WeMustGoDeeper("true_err")
self._WeMustGoDeeper("name")
self._WeMustGoDeeper("orig")
def testAllCloseScalars(self):
self.assertAllClose(7, 7 + 1e-8)
with self.assertRaisesRegexp(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(7, 8)
def testArrayNear(self):
a = [1, 2]
b = [1, 2, 5]
with self.assertRaises(AssertionError):
self.assertArrayNear(a, b, 0.001)
a = [1, 2]
b = [[1, 2], [3, 4]]
with self.assertRaises(TypeError):
self.assertArrayNear(a, b, 0.001)
a = [1, 2]
b = [1, 2]
self.assertArrayNear(a, b, 0.001)
def testForceGPU(self):
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Cannot assign a device to node"):
with self.test_session(force_gpu=True):
# this relies on us not having a GPU implementation for assert, which
# seems sensible
x = [True]
y = [15]
logging_ops.Assert(x, y).run()
if __name__ == "__main__":
googletest.main()
| 33.149485 | 80 | 0.679521 | [
"Apache-2.0"
] | agrawalnishant/tensorflow | tensorflow/python/framework/test_util_test.py | 6,431 | Python |
from baselines.deepq import models # noqa F401
from baselines.deepq.deepq_learner import DEEPQ # noqa F401
from baselines.deepq.deepq import learn # noqa F401
from baselines.deepq.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer # noqa F401
def wrap_atari_dqn(env):
from baselines.common.atari_wrappers import wrap_deepmind
return wrap_deepmind(env, frame_stack=True, scale=False)
| 44.888889 | 92 | 0.814356 | [
"MIT"
] | RDaneelOlivav/baselines | baselines/deepq/__init__.py | 404 | Python |
# Generated by Django 3.1.4 on 2020-12-07 19:08
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Agent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Lead',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=20)),
('last_name', models.CharField(max_length=20)),
('age', models.IntegerField(default=0)),
('agent', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='leads.agent')),
],
),
]
| 59.25 | 329 | 0.643987 | [
"MIT"
] | 12345rana/getting-started-with-django | leads/migrations/0001_initial.py | 3,792 | Python |
# Generated from decafJavier.g4 by ANTLR 4.9.2
# encoding: utf-8
from antlr4 import *
from io import StringIO
import sys
if sys.version_info[1] > 5:
from typing import TextIO
else:
from typing.io import TextIO
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3;")
buf.write("\u0120\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7")
buf.write("\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r\4\16")
buf.write("\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23\t\23")
buf.write("\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31")
buf.write("\t\31\4\32\t\32\3\2\3\2\3\2\3\2\7\29\n\2\f\2\16\2<\13")
buf.write("\2\3\2\7\2?\n\2\f\2\16\2B\13\2\3\2\3\2\3\3\3\3\3\3\3\3")
buf.write("\3\3\3\3\3\3\7\3M\n\3\f\3\16\3P\13\3\3\3\3\3\3\4\3\4\3")
buf.write("\4\3\4\7\4X\n\4\f\4\16\4[\13\4\3\4\3\4\3\5\3\5\3\5\3\5")
buf.write("\3\5\3\6\3\6\5\6f\n\6\3\7\3\7\3\b\3\b\3\b\3\b\3\b\3\b")
buf.write("\3\b\3\b\3\b\3\b\7\bt\n\b\f\b\16\bw\13\b\5\by\n\b\3\b")
buf.write("\3\b\3\b\3\t\3\t\5\t\u0080\n\t\3\n\3\n\7\n\u0084\n\n\f")
buf.write("\n\16\n\u0087\13\n\3\n\7\n\u008a\n\n\f\n\16\n\u008d\13")
buf.write("\n\3\n\3\n\3\13\3\13\3\13\3\13\3\13\3\13\3\13\3\13\3\13")
buf.write("\3\13\3\13\3\13\3\13\3\13\3\13\3\13\3\13\5\13\u00a2\n")
buf.write("\13\3\13\3\13\3\13\3\13\3\13\3\13\3\13\3\13\3\13\3\13")
buf.write("\3\13\3\13\3\13\5\13\u00b1\n\13\3\13\3\13\3\13\3\13\5")
buf.write("\13\u00b7\n\13\3\13\5\13\u00ba\n\13\3\13\3\13\3\13\3\13")
buf.write("\5\13\u00c0\n\13\3\f\3\f\3\f\3\f\3\f\7\f\u00c7\n\f\f\f")
buf.write("\16\f\u00ca\13\f\5\f\u00cc\n\f\3\f\3\f\3\r\3\r\3\r\3\r")
buf.write("\3\r\3\r\3\r\3\r\3\r\3\r\3\r\7\r\u00db\n\r\f\r\16\r\u00de")
buf.write("\13\r\5\r\u00e0\n\r\3\r\3\r\5\r\u00e4\n\r\3\16\3\16\3")
buf.write("\16\3\16\3\16\3\16\3\16\3\16\3\16\3\16\3\16\3\16\5\16")
buf.write("\u00f2\n\16\3\16\3\16\3\16\3\16\7\16\u00f8\n\16\f\16\16")
buf.write("\16\u00fb\13\16\3\17\3\17\5\17\u00ff\n\17\3\20\3\20\5")
buf.write("\20\u0103\n\20\3\21\3\21\3\22\3\22\3\23\3\23\3\24\3\24")
buf.write("\3\25\3\25\3\25\5\25\u0110\n\25\3\26\3\26\3\26\3\26\5")
buf.write("\26\u0116\n\26\3\27\3\27\3\30\3\30\3\31\3\31\3\32\3\32")
buf.write("\3\32\2\3\32\33\2\4\6\b\n\f\16\20\22\24\26\30\32\34\36")
buf.write(" \"$&(*,.\60\62\2\t\4\2\61\61\63\63\3\2%(\3\2,-\3\2\"")
buf.write("#\3\2\35!\4\2\13\13\r\r\3\2)+\2\u012e\2\64\3\2\2\2\4E")
buf.write("\3\2\2\2\6S\3\2\2\2\b^\3\2\2\2\ne\3\2\2\2\fg\3\2\2\2\16")
buf.write("i\3\2\2\2\20\177\3\2\2\2\22\u0081\3\2\2\2\24\u00bf\3\2")
buf.write("\2\2\26\u00c1\3\2\2\2\30\u00e3\3\2\2\2\32\u00f1\3\2\2")
buf.write("\2\34\u00fe\3\2\2\2\36\u0102\3\2\2\2 \u0104\3\2\2\2\"")
buf.write("\u0106\3\2\2\2$\u0108\3\2\2\2&\u010a\3\2\2\2(\u010f\3")
buf.write("\2\2\2*\u0115\3\2\2\2,\u0117\3\2\2\2.\u0119\3\2\2\2\60")
buf.write("\u011b\3\2\2\2\62\u011d\3\2\2\2\64\65\7\3\2\2\65\66\7")
buf.write("\4\2\2\66:\7\24\2\2\679\5\6\4\28\67\3\2\2\29<\3\2\2\2")
buf.write(":8\3\2\2\2:;\3\2\2\2;@\3\2\2\2<:\3\2\2\2=?\5\16\b\2>=")
buf.write("\3\2\2\2?B\3\2\2\2@>\3\2\2\2@A\3\2\2\2AC\3\2\2\2B@\3\2")
buf.write("\2\2CD\7\25\2\2D\3\3\2\2\2EF\5.\30\2FG\5\n\6\2GN\3\2\2")
buf.write("\2HI\7\32\2\2IJ\5.\30\2JK\5\n\6\2KM\3\2\2\2LH\3\2\2\2")
buf.write("MP\3\2\2\2NL\3\2\2\2NO\3\2\2\2OQ\3\2\2\2PN\3\2\2\2QR\7")
buf.write("\23\2\2R\5\3\2\2\2ST\5.\30\2TY\5\n\6\2UV\7\32\2\2VX\5")
buf.write("\n\6\2WU\3\2\2\2X[\3\2\2\2YW\3\2\2\2YZ\3\2\2\2Z\\\3\2")
buf.write("\2\2[Y\3\2\2\2\\]\7\23\2\2]\7\3\2\2\2^_\7.\2\2_`\7\26")
buf.write("\2\2`a\5 \21\2ab\7\27\2\2b\t\3\2\2\2cf\5\f\7\2df\5\b\5")
buf.write("\2ec\3\2\2\2ed\3\2\2\2f\13\3\2\2\2gh\7.\2\2h\r\3\2\2\2")
buf.write("ij\5\20\t\2jk\5\62\32\2kx\7\30\2\2lm\5.\30\2mn\5\f\7\2")
buf.write("nu\3\2\2\2op\7\32\2\2pq\5.\30\2qr\5\f\7\2rt\3\2\2\2so")
buf.write("\3\2\2\2tw\3\2\2\2us\3\2\2\2uv\3\2\2\2vy\3\2\2\2wu\3\2")
buf.write("\2\2xl\3\2\2\2xy\3\2\2\2yz\3\2\2\2z{\7\31\2\2{|\5\22\n")
buf.write("\2|\17\3\2\2\2}\u0080\5.\30\2~\u0080\7\21\2\2\177}\3\2")
buf.write("\2\2\177~\3\2\2\2\u0080\21\3\2\2\2\u0081\u0085\7\24\2")
buf.write("\2\u0082\u0084\5\4\3\2\u0083\u0082\3\2\2\2\u0084\u0087")
buf.write("\3\2\2\2\u0085\u0083\3\2\2\2\u0085\u0086\3\2\2\2\u0086")
buf.write("\u008b\3\2\2\2\u0087\u0085\3\2\2\2\u0088\u008a\5\24\13")
buf.write("\2\u0089\u0088\3\2\2\2\u008a\u008d\3\2\2\2\u008b\u0089")
buf.write("\3\2\2\2\u008b\u008c\3\2\2\2\u008c\u008e\3\2\2\2\u008d")
buf.write("\u008b\3\2\2\2\u008e\u008f\7\25\2\2\u008f\23\3\2\2\2\u0090")
buf.write("\u0091\5\34\17\2\u0091\u0092\5\60\31\2\u0092\u0093\5\32")
buf.write("\16\2\u0093\u00c0\3\2\2\2\u0094\u0095\5\34\17\2\u0095")
buf.write("\u0096\5\60\31\2\u0096\u0097\5\32\16\2\u0097\u0098\7\23")
buf.write("\2\2\u0098\u00c0\3\2\2\2\u0099\u00c0\5\30\r\2\u009a\u009b")
buf.write("\7\5\2\2\u009b\u009c\7\30\2\2\u009c\u009d\5\32\16\2\u009d")
buf.write("\u009e\7\31\2\2\u009e\u00a1\5\22\n\2\u009f\u00a0\7\6\2")
buf.write("\2\u00a0\u00a2\5\22\n\2\u00a1\u009f\3\2\2\2\u00a1\u00a2")
buf.write("\3\2\2\2\u00a2\u00c0\3\2\2\2\u00a3\u00a4\5\f\7\2\u00a4")
buf.write("\u00a5\7)\2\2\u00a5\u00a6\5\32\16\2\u00a6\u00a7\7\23\2")
buf.write("\2\u00a7\u00c0\3\2\2\2\u00a8\u00a9\7\b\2\2\u00a9\u00aa")
buf.write("\5\32\16\2\u00aa\u00ab\7\23\2\2\u00ab\u00c0\3\2\2\2\u00ac")
buf.write("\u00ad\7\7\2\2\u00ad\u00b0\5\f\7\2\u00ae\u00af\7)\2\2")
buf.write("\u00af\u00b1\5 \21\2\u00b0\u00ae\3\2\2\2\u00b0\u00b1\3")
buf.write("\2\2\2\u00b1\u00b2\3\2\2\2\u00b2\u00b9\7\32\2\2\u00b3")
buf.write("\u00b6\5\f\7\2\u00b4\u00b5\7)\2\2\u00b5\u00b7\5 \21\2")
buf.write("\u00b6\u00b4\3\2\2\2\u00b6\u00b7\3\2\2\2\u00b7\u00ba\3")
buf.write("\2\2\2\u00b8\u00ba\5 \21\2\u00b9\u00b3\3\2\2\2\u00b9\u00b8")
buf.write("\3\2\2\2\u00ba\u00bb\3\2\2\2\u00bb\u00bc\5\22\n\2\u00bc")
buf.write("\u00c0\3\2\2\2\u00bd\u00be\7\t\2\2\u00be\u00c0\7\23\2")
buf.write("\2\u00bf\u0090\3\2\2\2\u00bf\u0094\3\2\2\2\u00bf\u0099")
buf.write("\3\2\2\2\u00bf\u009a\3\2\2\2\u00bf\u00a3\3\2\2\2\u00bf")
buf.write("\u00a8\3\2\2\2\u00bf\u00ac\3\2\2\2\u00bf\u00bd\3\2\2\2")
buf.write("\u00c0\25\3\2\2\2\u00c1\u00c2\5\62\32\2\u00c2\u00cb\7")
buf.write("\30\2\2\u00c3\u00c8\5\32\16\2\u00c4\u00c5\7\32\2\2\u00c5")
buf.write("\u00c7\5\32\16\2\u00c6\u00c4\3\2\2\2\u00c7\u00ca\3\2\2")
buf.write("\2\u00c8\u00c6\3\2\2\2\u00c8\u00c9\3\2\2\2\u00c9\u00cc")
buf.write("\3\2\2\2\u00ca\u00c8\3\2\2\2\u00cb\u00c3\3\2\2\2\u00cb")
buf.write("\u00cc\3\2\2\2\u00cc\u00cd\3\2\2\2\u00cd\u00ce\7\31\2")
buf.write("\2\u00ce\27\3\2\2\2\u00cf\u00e4\5\26\f\2\u00d0\u00d1\5")
buf.write("\26\f\2\u00d1\u00d2\7\23\2\2\u00d2\u00e4\3\2\2\2\u00d3")
buf.write("\u00d4\7\22\2\2\u00d4\u00d5\7\30\2\2\u00d5\u00df\7\65")
buf.write("\2\2\u00d6\u00d7\7\32\2\2\u00d7\u00dc\5\36\20\2\u00d8")
buf.write("\u00d9\7\32\2\2\u00d9\u00db\5\36\20\2\u00da\u00d8\3\2")
buf.write("\2\2\u00db\u00de\3\2\2\2\u00dc\u00da\3\2\2\2\u00dc\u00dd")
buf.write("\3\2\2\2\u00dd\u00e0\3\2\2\2\u00de\u00dc\3\2\2\2\u00df")
buf.write("\u00d6\3\2\2\2\u00df\u00e0\3\2\2\2\u00e0\u00e1\3\2\2\2")
buf.write("\u00e1\u00e2\7\31\2\2\u00e2\u00e4\7\23\2\2\u00e3\u00cf")
buf.write("\3\2\2\2\u00e3\u00d0\3\2\2\2\u00e3\u00d3\3\2\2\2\u00e4")
buf.write("\31\3\2\2\2\u00e5\u00e6\b\16\1\2\u00e6\u00f2\5\34\17\2")
buf.write("\u00e7\u00f2\5(\25\2\u00e8\u00e9\7\36\2\2\u00e9\u00f2")
buf.write("\5\32\16\6\u00ea\u00f2\5\30\r\2\u00eb\u00ec\7$\2\2\u00ec")
buf.write("\u00f2\5\32\16\4\u00ed\u00ee\7\30\2\2\u00ee\u00ef\5\32")
buf.write("\16\2\u00ef\u00f0\7\31\2\2\u00f0\u00f2\3\2\2\2\u00f1\u00e5")
buf.write("\3\2\2\2\u00f1\u00e7\3\2\2\2\u00f1\u00e8\3\2\2\2\u00f1")
buf.write("\u00ea\3\2\2\2\u00f1\u00eb\3\2\2\2\u00f1\u00ed\3\2\2\2")
buf.write("\u00f2\u00f9\3\2\2\2\u00f3\u00f4\f\7\2\2\u00f4\u00f5\5")
buf.write("*\26\2\u00f5\u00f6\5\32\16\b\u00f6\u00f8\3\2\2\2\u00f7")
buf.write("\u00f3\3\2\2\2\u00f8\u00fb\3\2\2\2\u00f9\u00f7\3\2\2\2")
buf.write("\u00f9\u00fa\3\2\2\2\u00fa\33\3\2\2\2\u00fb\u00f9\3\2")
buf.write("\2\2\u00fc\u00ff\5\f\7\2\u00fd\u00ff\5\b\5\2\u00fe\u00fc")
buf.write("\3\2\2\2\u00fe\u00fd\3\2\2\2\u00ff\35\3\2\2\2\u0100\u0103")
buf.write("\5\32\16\2\u0101\u0103\7\65\2\2\u0102\u0100\3\2\2\2\u0102")
buf.write("\u0101\3\2\2\2\u0103\37\3\2\2\2\u0104\u0105\t\2\2\2\u0105")
buf.write("!\3\2\2\2\u0106\u0107\t\3\2\2\u0107#\3\2\2\2\u0108\u0109")
buf.write("\t\4\2\2\u0109%\3\2\2\2\u010a\u010b\t\5\2\2\u010b\'\3")
buf.write("\2\2\2\u010c\u0110\5 \21\2\u010d\u0110\7\60\2\2\u010e")
buf.write("\u0110\7\64\2\2\u010f\u010c\3\2\2\2\u010f\u010d\3\2\2")
buf.write("\2\u010f\u010e\3\2\2\2\u0110)\3\2\2\2\u0111\u0116\5,\27")
buf.write("\2\u0112\u0116\5\"\22\2\u0113\u0116\5$\23\2\u0114\u0116")
buf.write("\5&\24\2\u0115\u0111\3\2\2\2\u0115\u0112\3\2\2\2\u0115")
buf.write("\u0113\3\2\2\2\u0115\u0114\3\2\2\2\u0116+\3\2\2\2\u0117")
buf.write("\u0118\t\6\2\2\u0118-\3\2\2\2\u0119\u011a\t\7\2\2\u011a")
buf.write("/\3\2\2\2\u011b\u011c\t\b\2\2\u011c\61\3\2\2\2\u011d\u011e")
buf.write("\7.\2\2\u011e\63\3\2\2\2\34:@NYeux\177\u0085\u008b\u00a1")
buf.write("\u00b0\u00b6\u00b9\u00bf\u00c8\u00cb\u00dc\u00df\u00e3")
buf.write("\u00f1\u00f9\u00fe\u0102\u010f\u0115")
return buf.getvalue()
class decafJavierParser ( Parser ):
grammarFileName = "decafJavier.g4"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ "<INVALID>", "'class'", "'Program'", "'if'", "'else'",
"'for'", "'return'", "'break'", "'continue'", "'boolean'",
"'char'", "'int'", "'string'", "'True'", "'False'",
"'void'", "'callout'", "';'", "'{'", "'}'", "'['",
"']'", "'('", "')'", "','", "'\"'", "'''", "'+'", "'-'",
"'*'", "'/'", "'%'", "'&&'", "'||'", "'!'", "'>'",
"'<'", "'>='", "'<='", "'='", "'+='", "'-='", "'=='",
"'!='" ]
symbolicNames = [ "<INVALID>", "CLASS", "PROGRAM", "IF", "ELSE", "FOR",
"RETURN", "BREAK", "CONTINUE", "BOOLEAN", "CHAR",
"INT", "STRING", "TRUE", "FALSE", "VOID", "CALLOUT",
"SEMICOLON", "LCURLY", "RCURLY", "LSQUARE", "RSQUARE",
"LROUND", "RROUND", "COMMA", "QUOTES", "APOSTROPHE",
"ADD", "SUB", "MULTIPLY", "DIVIDE", "REMINDER", "AND",
"OR", "NOT", "GREATER_OP", "LESS_OP", "GREATER_eq_op",
"LESS_eq_op", "EQUAL_OP", "ADD_eq_op", "SUB_eq_op",
"EQUALITY_OP", "UNEQUALITY_OP", "ID", "ALPHA", "CHAR_LITERAL",
"DECIMAL_LITERAL", "DIGIT", "HEX_LITERAL", "BOOL_LITERAL",
"STRING_LITERAL", "ALPHA_NUM", "HEX_DIGIT", "LINE_COMMENT",
"COMMENT", "NEWLINE", "WHITESPACE" ]
RULE_program = 0
RULE_vardeclr = 1
RULE_field_declr = 2
RULE_array_id = 3
RULE_field_var = 4
RULE_var_id = 5
RULE_method_declr = 6
RULE_return_type = 7
RULE_block = 8
RULE_statement = 9
RULE_method_call_inter = 10
RULE_method_call = 11
RULE_expr = 12
RULE_location = 13
RULE_callout_arg = 14
RULE_int_literal = 15
RULE_rel_op = 16
RULE_eq_op = 17
RULE_cond_op = 18
RULE_literal = 19
RULE_bin_op = 20
RULE_arith_op = 21
RULE_var_type = 22
RULE_assign_op = 23
RULE_method_name = 24
ruleNames = [ "program", "vardeclr", "field_declr", "array_id", "field_var",
"var_id", "method_declr", "return_type", "block", "statement",
"method_call_inter", "method_call", "expr", "location",
"callout_arg", "int_literal", "rel_op", "eq_op", "cond_op",
"literal", "bin_op", "arith_op", "var_type", "assign_op",
"method_name" ]
EOF = Token.EOF
CLASS=1
PROGRAM=2
IF=3
ELSE=4
FOR=5
RETURN=6
BREAK=7
CONTINUE=8
BOOLEAN=9
CHAR=10
INT=11
STRING=12
TRUE=13
FALSE=14
VOID=15
CALLOUT=16
SEMICOLON=17
LCURLY=18
RCURLY=19
LSQUARE=20
RSQUARE=21
LROUND=22
RROUND=23
COMMA=24
QUOTES=25
APOSTROPHE=26
ADD=27
SUB=28
MULTIPLY=29
DIVIDE=30
REMINDER=31
AND=32
OR=33
NOT=34
GREATER_OP=35
LESS_OP=36
GREATER_eq_op=37
LESS_eq_op=38
EQUAL_OP=39
ADD_eq_op=40
SUB_eq_op=41
EQUALITY_OP=42
UNEQUALITY_OP=43
ID=44
ALPHA=45
CHAR_LITERAL=46
DECIMAL_LITERAL=47
DIGIT=48
HEX_LITERAL=49
BOOL_LITERAL=50
STRING_LITERAL=51
ALPHA_NUM=52
HEX_DIGIT=53
LINE_COMMENT=54
COMMENT=55
NEWLINE=56
WHITESPACE=57
def __init__(self, input:TokenStream, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.9.2")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class ProgramContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def CLASS(self):
return self.getToken(decafJavierParser.CLASS, 0)
def PROGRAM(self):
return self.getToken(decafJavierParser.PROGRAM, 0)
def LCURLY(self):
return self.getToken(decafJavierParser.LCURLY, 0)
def RCURLY(self):
return self.getToken(decafJavierParser.RCURLY, 0)
def field_declr(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(decafJavierParser.Field_declrContext)
else:
return self.getTypedRuleContext(decafJavierParser.Field_declrContext,i)
def method_declr(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(decafJavierParser.Method_declrContext)
else:
return self.getTypedRuleContext(decafJavierParser.Method_declrContext,i)
def getRuleIndex(self):
return decafJavierParser.RULE_program
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterProgram" ):
listener.enterProgram(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitProgram" ):
listener.exitProgram(self)
def program(self):
localctx = decafJavierParser.ProgramContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_program)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 50
self.match(decafJavierParser.CLASS)
self.state = 51
self.match(decafJavierParser.PROGRAM)
self.state = 52
self.match(decafJavierParser.LCURLY)
self.state = 56
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,0,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 53
self.field_declr()
self.state = 58
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,0,self._ctx)
self.state = 62
self._errHandler.sync(self)
_la = self._input.LA(1)
while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << decafJavierParser.BOOLEAN) | (1 << decafJavierParser.INT) | (1 << decafJavierParser.VOID))) != 0):
self.state = 59
self.method_declr()
self.state = 64
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 65
self.match(decafJavierParser.RCURLY)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class VardeclrContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def SEMICOLON(self):
return self.getToken(decafJavierParser.SEMICOLON, 0)
def var_type(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(decafJavierParser.Var_typeContext)
else:
return self.getTypedRuleContext(decafJavierParser.Var_typeContext,i)
def field_var(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(decafJavierParser.Field_varContext)
else:
return self.getTypedRuleContext(decafJavierParser.Field_varContext,i)
def COMMA(self, i:int=None):
if i is None:
return self.getTokens(decafJavierParser.COMMA)
else:
return self.getToken(decafJavierParser.COMMA, i)
def getRuleIndex(self):
return decafJavierParser.RULE_vardeclr
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterVardeclr" ):
listener.enterVardeclr(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitVardeclr" ):
listener.exitVardeclr(self)
def vardeclr(self):
localctx = decafJavierParser.VardeclrContext(self, self._ctx, self.state)
self.enterRule(localctx, 2, self.RULE_vardeclr)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 67
self.var_type()
self.state = 68
self.field_var()
self.state = 76
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==decafJavierParser.COMMA:
self.state = 70
self.match(decafJavierParser.COMMA)
self.state = 71
self.var_type()
self.state = 72
self.field_var()
self.state = 78
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 79
self.match(decafJavierParser.SEMICOLON)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Field_declrContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def var_type(self):
return self.getTypedRuleContext(decafJavierParser.Var_typeContext,0)
def field_var(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(decafJavierParser.Field_varContext)
else:
return self.getTypedRuleContext(decafJavierParser.Field_varContext,i)
def SEMICOLON(self):
return self.getToken(decafJavierParser.SEMICOLON, 0)
def COMMA(self, i:int=None):
if i is None:
return self.getTokens(decafJavierParser.COMMA)
else:
return self.getToken(decafJavierParser.COMMA, i)
def getRuleIndex(self):
return decafJavierParser.RULE_field_declr
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterField_declr" ):
listener.enterField_declr(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitField_declr" ):
listener.exitField_declr(self)
def field_declr(self):
localctx = decafJavierParser.Field_declrContext(self, self._ctx, self.state)
self.enterRule(localctx, 4, self.RULE_field_declr)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 81
self.var_type()
self.state = 82
self.field_var()
self.state = 87
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==decafJavierParser.COMMA:
self.state = 83
self.match(decafJavierParser.COMMA)
self.state = 84
self.field_var()
self.state = 89
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 90
self.match(decafJavierParser.SEMICOLON)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Array_idContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ID(self):
return self.getToken(decafJavierParser.ID, 0)
def LSQUARE(self):
return self.getToken(decafJavierParser.LSQUARE, 0)
def int_literal(self):
return self.getTypedRuleContext(decafJavierParser.Int_literalContext,0)
def RSQUARE(self):
return self.getToken(decafJavierParser.RSQUARE, 0)
def getRuleIndex(self):
return decafJavierParser.RULE_array_id
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterArray_id" ):
listener.enterArray_id(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitArray_id" ):
listener.exitArray_id(self)
def array_id(self):
localctx = decafJavierParser.Array_idContext(self, self._ctx, self.state)
self.enterRule(localctx, 6, self.RULE_array_id)
try:
self.enterOuterAlt(localctx, 1)
self.state = 92
self.match(decafJavierParser.ID)
self.state = 93
self.match(decafJavierParser.LSQUARE)
self.state = 94
self.int_literal()
self.state = 95
self.match(decafJavierParser.RSQUARE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Field_varContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def var_id(self):
return self.getTypedRuleContext(decafJavierParser.Var_idContext,0)
def array_id(self):
return self.getTypedRuleContext(decafJavierParser.Array_idContext,0)
def getRuleIndex(self):
return decafJavierParser.RULE_field_var
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterField_var" ):
listener.enterField_var(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitField_var" ):
listener.exitField_var(self)
def field_var(self):
localctx = decafJavierParser.Field_varContext(self, self._ctx, self.state)
self.enterRule(localctx, 8, self.RULE_field_var)
try:
self.state = 99
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,4,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 97
self.var_id()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 98
self.array_id()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Var_idContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ID(self):
return self.getToken(decafJavierParser.ID, 0)
def getRuleIndex(self):
return decafJavierParser.RULE_var_id
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterVar_id" ):
listener.enterVar_id(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitVar_id" ):
listener.exitVar_id(self)
def var_id(self):
localctx = decafJavierParser.Var_idContext(self, self._ctx, self.state)
self.enterRule(localctx, 10, self.RULE_var_id)
try:
self.enterOuterAlt(localctx, 1)
self.state = 101
self.match(decafJavierParser.ID)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Method_declrContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def return_type(self):
return self.getTypedRuleContext(decafJavierParser.Return_typeContext,0)
def method_name(self):
return self.getTypedRuleContext(decafJavierParser.Method_nameContext,0)
def LROUND(self):
return self.getToken(decafJavierParser.LROUND, 0)
def RROUND(self):
return self.getToken(decafJavierParser.RROUND, 0)
def block(self):
return self.getTypedRuleContext(decafJavierParser.BlockContext,0)
def var_type(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(decafJavierParser.Var_typeContext)
else:
return self.getTypedRuleContext(decafJavierParser.Var_typeContext,i)
def var_id(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(decafJavierParser.Var_idContext)
else:
return self.getTypedRuleContext(decafJavierParser.Var_idContext,i)
def COMMA(self, i:int=None):
if i is None:
return self.getTokens(decafJavierParser.COMMA)
else:
return self.getToken(decafJavierParser.COMMA, i)
def getRuleIndex(self):
return decafJavierParser.RULE_method_declr
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterMethod_declr" ):
listener.enterMethod_declr(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitMethod_declr" ):
listener.exitMethod_declr(self)
def method_declr(self):
localctx = decafJavierParser.Method_declrContext(self, self._ctx, self.state)
self.enterRule(localctx, 12, self.RULE_method_declr)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 103
self.return_type()
self.state = 104
self.method_name()
self.state = 105
self.match(decafJavierParser.LROUND)
self.state = 118
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==decafJavierParser.BOOLEAN or _la==decafJavierParser.INT:
self.state = 106
self.var_type()
self.state = 107
self.var_id()
self.state = 115
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==decafJavierParser.COMMA:
self.state = 109
self.match(decafJavierParser.COMMA)
self.state = 110
self.var_type()
self.state = 111
self.var_id()
self.state = 117
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 120
self.match(decafJavierParser.RROUND)
self.state = 121
self.block()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Return_typeContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def var_type(self):
return self.getTypedRuleContext(decafJavierParser.Var_typeContext,0)
def VOID(self):
return self.getToken(decafJavierParser.VOID, 0)
def getRuleIndex(self):
return decafJavierParser.RULE_return_type
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterReturn_type" ):
listener.enterReturn_type(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitReturn_type" ):
listener.exitReturn_type(self)
def return_type(self):
localctx = decafJavierParser.Return_typeContext(self, self._ctx, self.state)
self.enterRule(localctx, 14, self.RULE_return_type)
try:
self.enterOuterAlt(localctx, 1)
self.state = 125
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [decafJavierParser.BOOLEAN, decafJavierParser.INT]:
self.state = 123
self.var_type()
pass
elif token in [decafJavierParser.VOID]:
self.state = 124
self.match(decafJavierParser.VOID)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class BlockContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def LCURLY(self):
return self.getToken(decafJavierParser.LCURLY, 0)
def RCURLY(self):
return self.getToken(decafJavierParser.RCURLY, 0)
def vardeclr(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(decafJavierParser.VardeclrContext)
else:
return self.getTypedRuleContext(decafJavierParser.VardeclrContext,i)
def statement(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(decafJavierParser.StatementContext)
else:
return self.getTypedRuleContext(decafJavierParser.StatementContext,i)
def getRuleIndex(self):
return decafJavierParser.RULE_block
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBlock" ):
listener.enterBlock(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBlock" ):
listener.exitBlock(self)
def block(self):
localctx = decafJavierParser.BlockContext(self, self._ctx, self.state)
self.enterRule(localctx, 16, self.RULE_block)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 127
self.match(decafJavierParser.LCURLY)
self.state = 131
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==decafJavierParser.BOOLEAN or _la==decafJavierParser.INT:
self.state = 128
self.vardeclr()
self.state = 133
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 137
self._errHandler.sync(self)
_la = self._input.LA(1)
while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << decafJavierParser.IF) | (1 << decafJavierParser.FOR) | (1 << decafJavierParser.RETURN) | (1 << decafJavierParser.BREAK) | (1 << decafJavierParser.CALLOUT) | (1 << decafJavierParser.ID))) != 0):
self.state = 134
self.statement()
self.state = 139
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 140
self.match(decafJavierParser.RCURLY)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StatementContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def location(self):
return self.getTypedRuleContext(decafJavierParser.LocationContext,0)
def assign_op(self):
return self.getTypedRuleContext(decafJavierParser.Assign_opContext,0)
def expr(self):
return self.getTypedRuleContext(decafJavierParser.ExprContext,0)
def SEMICOLON(self):
return self.getToken(decafJavierParser.SEMICOLON, 0)
def method_call(self):
return self.getTypedRuleContext(decafJavierParser.Method_callContext,0)
def IF(self):
return self.getToken(decafJavierParser.IF, 0)
def LROUND(self):
return self.getToken(decafJavierParser.LROUND, 0)
def RROUND(self):
return self.getToken(decafJavierParser.RROUND, 0)
def block(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(decafJavierParser.BlockContext)
else:
return self.getTypedRuleContext(decafJavierParser.BlockContext,i)
def ELSE(self):
return self.getToken(decafJavierParser.ELSE, 0)
def var_id(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(decafJavierParser.Var_idContext)
else:
return self.getTypedRuleContext(decafJavierParser.Var_idContext,i)
def EQUAL_OP(self, i:int=None):
if i is None:
return self.getTokens(decafJavierParser.EQUAL_OP)
else:
return self.getToken(decafJavierParser.EQUAL_OP, i)
def RETURN(self):
return self.getToken(decafJavierParser.RETURN, 0)
def FOR(self):
return self.getToken(decafJavierParser.FOR, 0)
def COMMA(self):
return self.getToken(decafJavierParser.COMMA, 0)
def int_literal(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(decafJavierParser.Int_literalContext)
else:
return self.getTypedRuleContext(decafJavierParser.Int_literalContext,i)
def BREAK(self):
return self.getToken(decafJavierParser.BREAK, 0)
def getRuleIndex(self):
return decafJavierParser.RULE_statement
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStatement" ):
listener.enterStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStatement" ):
listener.exitStatement(self)
def statement(self):
localctx = decafJavierParser.StatementContext(self, self._ctx, self.state)
self.enterRule(localctx, 18, self.RULE_statement)
self._la = 0 # Token type
try:
self.state = 189
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,14,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 142
self.location()
self.state = 143
self.assign_op()
self.state = 144
self.expr(0)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 146
self.location()
self.state = 147
self.assign_op()
self.state = 148
self.expr(0)
self.state = 149
self.match(decafJavierParser.SEMICOLON)
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 151
self.method_call()
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 152
self.match(decafJavierParser.IF)
self.state = 153
self.match(decafJavierParser.LROUND)
self.state = 154
self.expr(0)
self.state = 155
self.match(decafJavierParser.RROUND)
self.state = 156
self.block()
self.state = 159
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==decafJavierParser.ELSE:
self.state = 157
self.match(decafJavierParser.ELSE)
self.state = 158
self.block()
pass
elif la_ == 5:
self.enterOuterAlt(localctx, 5)
self.state = 161
self.var_id()
self.state = 162
self.match(decafJavierParser.EQUAL_OP)
self.state = 163
self.expr(0)
self.state = 164
self.match(decafJavierParser.SEMICOLON)
pass
elif la_ == 6:
self.enterOuterAlt(localctx, 6)
self.state = 166
self.match(decafJavierParser.RETURN)
self.state = 167
self.expr(0)
self.state = 168
self.match(decafJavierParser.SEMICOLON)
pass
elif la_ == 7:
self.enterOuterAlt(localctx, 7)
self.state = 170
self.match(decafJavierParser.FOR)
self.state = 171
self.var_id()
self.state = 174
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==decafJavierParser.EQUAL_OP:
self.state = 172
self.match(decafJavierParser.EQUAL_OP)
self.state = 173
self.int_literal()
self.state = 176
self.match(decafJavierParser.COMMA)
self.state = 183
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [decafJavierParser.ID]:
self.state = 177
self.var_id()
self.state = 180
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==decafJavierParser.EQUAL_OP:
self.state = 178
self.match(decafJavierParser.EQUAL_OP)
self.state = 179
self.int_literal()
pass
elif token in [decafJavierParser.DECIMAL_LITERAL, decafJavierParser.HEX_LITERAL]:
self.state = 182
self.int_literal()
pass
else:
raise NoViableAltException(self)
self.state = 185
self.block()
pass
elif la_ == 8:
self.enterOuterAlt(localctx, 8)
self.state = 187
self.match(decafJavierParser.BREAK)
self.state = 188
self.match(decafJavierParser.SEMICOLON)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Method_call_interContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def method_name(self):
return self.getTypedRuleContext(decafJavierParser.Method_nameContext,0)
def LROUND(self):
return self.getToken(decafJavierParser.LROUND, 0)
def RROUND(self):
return self.getToken(decafJavierParser.RROUND, 0)
def expr(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(decafJavierParser.ExprContext)
else:
return self.getTypedRuleContext(decafJavierParser.ExprContext,i)
def COMMA(self, i:int=None):
if i is None:
return self.getTokens(decafJavierParser.COMMA)
else:
return self.getToken(decafJavierParser.COMMA, i)
def getRuleIndex(self):
return decafJavierParser.RULE_method_call_inter
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterMethod_call_inter" ):
listener.enterMethod_call_inter(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitMethod_call_inter" ):
listener.exitMethod_call_inter(self)
def method_call_inter(self):
localctx = decafJavierParser.Method_call_interContext(self, self._ctx, self.state)
self.enterRule(localctx, 20, self.RULE_method_call_inter)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 191
self.method_name()
self.state = 192
self.match(decafJavierParser.LROUND)
self.state = 201
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << decafJavierParser.CALLOUT) | (1 << decafJavierParser.LROUND) | (1 << decafJavierParser.SUB) | (1 << decafJavierParser.NOT) | (1 << decafJavierParser.ID) | (1 << decafJavierParser.CHAR_LITERAL) | (1 << decafJavierParser.DECIMAL_LITERAL) | (1 << decafJavierParser.HEX_LITERAL) | (1 << decafJavierParser.BOOL_LITERAL))) != 0):
self.state = 193
self.expr(0)
self.state = 198
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==decafJavierParser.COMMA:
self.state = 194
self.match(decafJavierParser.COMMA)
self.state = 195
self.expr(0)
self.state = 200
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 203
self.match(decafJavierParser.RROUND)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Method_callContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def method_call_inter(self):
return self.getTypedRuleContext(decafJavierParser.Method_call_interContext,0)
def SEMICOLON(self):
return self.getToken(decafJavierParser.SEMICOLON, 0)
def CALLOUT(self):
return self.getToken(decafJavierParser.CALLOUT, 0)
def LROUND(self):
return self.getToken(decafJavierParser.LROUND, 0)
def STRING_LITERAL(self):
return self.getToken(decafJavierParser.STRING_LITERAL, 0)
def RROUND(self):
return self.getToken(decafJavierParser.RROUND, 0)
def COMMA(self, i:int=None):
if i is None:
return self.getTokens(decafJavierParser.COMMA)
else:
return self.getToken(decafJavierParser.COMMA, i)
def callout_arg(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(decafJavierParser.Callout_argContext)
else:
return self.getTypedRuleContext(decafJavierParser.Callout_argContext,i)
def getRuleIndex(self):
return decafJavierParser.RULE_method_call
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterMethod_call" ):
listener.enterMethod_call(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitMethod_call" ):
listener.exitMethod_call(self)
def method_call(self):
localctx = decafJavierParser.Method_callContext(self, self._ctx, self.state)
self.enterRule(localctx, 22, self.RULE_method_call)
self._la = 0 # Token type
try:
self.state = 225
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,19,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 205
self.method_call_inter()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 206
self.method_call_inter()
self.state = 207
self.match(decafJavierParser.SEMICOLON)
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 209
self.match(decafJavierParser.CALLOUT)
self.state = 210
self.match(decafJavierParser.LROUND)
self.state = 211
self.match(decafJavierParser.STRING_LITERAL)
self.state = 221
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==decafJavierParser.COMMA:
self.state = 212
self.match(decafJavierParser.COMMA)
self.state = 213
self.callout_arg()
self.state = 218
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==decafJavierParser.COMMA:
self.state = 214
self.match(decafJavierParser.COMMA)
self.state = 215
self.callout_arg()
self.state = 220
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 223
self.match(decafJavierParser.RROUND)
self.state = 224
self.match(decafJavierParser.SEMICOLON)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExprContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def location(self):
return self.getTypedRuleContext(decafJavierParser.LocationContext,0)
def literal(self):
return self.getTypedRuleContext(decafJavierParser.LiteralContext,0)
def SUB(self):
return self.getToken(decafJavierParser.SUB, 0)
def expr(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(decafJavierParser.ExprContext)
else:
return self.getTypedRuleContext(decafJavierParser.ExprContext,i)
def method_call(self):
return self.getTypedRuleContext(decafJavierParser.Method_callContext,0)
def NOT(self):
return self.getToken(decafJavierParser.NOT, 0)
def LROUND(self):
return self.getToken(decafJavierParser.LROUND, 0)
def RROUND(self):
return self.getToken(decafJavierParser.RROUND, 0)
def bin_op(self):
return self.getTypedRuleContext(decafJavierParser.Bin_opContext,0)
def getRuleIndex(self):
return decafJavierParser.RULE_expr
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExpr" ):
listener.enterExpr(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExpr" ):
listener.exitExpr(self)
def expr(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = decafJavierParser.ExprContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 24
self.enterRecursionRule(localctx, 24, self.RULE_expr, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 239
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,20,self._ctx)
if la_ == 1:
self.state = 228
self.location()
pass
elif la_ == 2:
self.state = 229
self.literal()
pass
elif la_ == 3:
self.state = 230
self.match(decafJavierParser.SUB)
self.state = 231
self.expr(4)
pass
elif la_ == 4:
self.state = 232
self.method_call()
pass
elif la_ == 5:
self.state = 233
self.match(decafJavierParser.NOT)
self.state = 234
self.expr(2)
pass
elif la_ == 6:
self.state = 235
self.match(decafJavierParser.LROUND)
self.state = 236
self.expr(0)
self.state = 237
self.match(decafJavierParser.RROUND)
pass
self._ctx.stop = self._input.LT(-1)
self.state = 247
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,21,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = decafJavierParser.ExprContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_expr)
self.state = 241
if not self.precpred(self._ctx, 5):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 5)")
self.state = 242
self.bin_op()
self.state = 243
self.expr(6)
self.state = 249
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,21,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class LocationContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def var_id(self):
return self.getTypedRuleContext(decafJavierParser.Var_idContext,0)
def array_id(self):
return self.getTypedRuleContext(decafJavierParser.Array_idContext,0)
def getRuleIndex(self):
return decafJavierParser.RULE_location
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLocation" ):
listener.enterLocation(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLocation" ):
listener.exitLocation(self)
def location(self):
localctx = decafJavierParser.LocationContext(self, self._ctx, self.state)
self.enterRule(localctx, 26, self.RULE_location)
try:
self.state = 252
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,22,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 250
self.var_id()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 251
self.array_id()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Callout_argContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def expr(self):
return self.getTypedRuleContext(decafJavierParser.ExprContext,0)
def STRING_LITERAL(self):
return self.getToken(decafJavierParser.STRING_LITERAL, 0)
def getRuleIndex(self):
return decafJavierParser.RULE_callout_arg
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCallout_arg" ):
listener.enterCallout_arg(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCallout_arg" ):
listener.exitCallout_arg(self)
def callout_arg(self):
localctx = decafJavierParser.Callout_argContext(self, self._ctx, self.state)
self.enterRule(localctx, 28, self.RULE_callout_arg)
try:
self.state = 256
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [decafJavierParser.CALLOUT, decafJavierParser.LROUND, decafJavierParser.SUB, decafJavierParser.NOT, decafJavierParser.ID, decafJavierParser.CHAR_LITERAL, decafJavierParser.DECIMAL_LITERAL, decafJavierParser.HEX_LITERAL, decafJavierParser.BOOL_LITERAL]:
self.enterOuterAlt(localctx, 1)
self.state = 254
self.expr(0)
pass
elif token in [decafJavierParser.STRING_LITERAL]:
self.enterOuterAlt(localctx, 2)
self.state = 255
self.match(decafJavierParser.STRING_LITERAL)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Int_literalContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def DECIMAL_LITERAL(self):
return self.getToken(decafJavierParser.DECIMAL_LITERAL, 0)
def HEX_LITERAL(self):
return self.getToken(decafJavierParser.HEX_LITERAL, 0)
def getRuleIndex(self):
return decafJavierParser.RULE_int_literal
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterInt_literal" ):
listener.enterInt_literal(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitInt_literal" ):
listener.exitInt_literal(self)
def int_literal(self):
localctx = decafJavierParser.Int_literalContext(self, self._ctx, self.state)
self.enterRule(localctx, 30, self.RULE_int_literal)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 258
_la = self._input.LA(1)
if not(_la==decafJavierParser.DECIMAL_LITERAL or _la==decafJavierParser.HEX_LITERAL):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Rel_opContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def GREATER_OP(self):
return self.getToken(decafJavierParser.GREATER_OP, 0)
def LESS_OP(self):
return self.getToken(decafJavierParser.LESS_OP, 0)
def LESS_eq_op(self):
return self.getToken(decafJavierParser.LESS_eq_op, 0)
def GREATER_eq_op(self):
return self.getToken(decafJavierParser.GREATER_eq_op, 0)
def getRuleIndex(self):
return decafJavierParser.RULE_rel_op
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRel_op" ):
listener.enterRel_op(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRel_op" ):
listener.exitRel_op(self)
def rel_op(self):
localctx = decafJavierParser.Rel_opContext(self, self._ctx, self.state)
self.enterRule(localctx, 32, self.RULE_rel_op)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 260
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << decafJavierParser.GREATER_OP) | (1 << decafJavierParser.LESS_OP) | (1 << decafJavierParser.GREATER_eq_op) | (1 << decafJavierParser.LESS_eq_op))) != 0)):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Eq_opContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def EQUALITY_OP(self):
return self.getToken(decafJavierParser.EQUALITY_OP, 0)
def UNEQUALITY_OP(self):
return self.getToken(decafJavierParser.UNEQUALITY_OP, 0)
def getRuleIndex(self):
return decafJavierParser.RULE_eq_op
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterEq_op" ):
listener.enterEq_op(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitEq_op" ):
listener.exitEq_op(self)
def eq_op(self):
localctx = decafJavierParser.Eq_opContext(self, self._ctx, self.state)
self.enterRule(localctx, 34, self.RULE_eq_op)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 262
_la = self._input.LA(1)
if not(_la==decafJavierParser.EQUALITY_OP or _la==decafJavierParser.UNEQUALITY_OP):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Cond_opContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def AND(self):
return self.getToken(decafJavierParser.AND, 0)
def OR(self):
return self.getToken(decafJavierParser.OR, 0)
def getRuleIndex(self):
return decafJavierParser.RULE_cond_op
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCond_op" ):
listener.enterCond_op(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCond_op" ):
listener.exitCond_op(self)
def cond_op(self):
localctx = decafJavierParser.Cond_opContext(self, self._ctx, self.state)
self.enterRule(localctx, 36, self.RULE_cond_op)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 264
_la = self._input.LA(1)
if not(_la==decafJavierParser.AND or _la==decafJavierParser.OR):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LiteralContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def int_literal(self):
return self.getTypedRuleContext(decafJavierParser.Int_literalContext,0)
def CHAR_LITERAL(self):
return self.getToken(decafJavierParser.CHAR_LITERAL, 0)
def BOOL_LITERAL(self):
return self.getToken(decafJavierParser.BOOL_LITERAL, 0)
def getRuleIndex(self):
return decafJavierParser.RULE_literal
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLiteral" ):
listener.enterLiteral(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLiteral" ):
listener.exitLiteral(self)
def literal(self):
localctx = decafJavierParser.LiteralContext(self, self._ctx, self.state)
self.enterRule(localctx, 38, self.RULE_literal)
try:
self.state = 269
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [decafJavierParser.DECIMAL_LITERAL, decafJavierParser.HEX_LITERAL]:
self.enterOuterAlt(localctx, 1)
self.state = 266
self.int_literal()
pass
elif token in [decafJavierParser.CHAR_LITERAL]:
self.enterOuterAlt(localctx, 2)
self.state = 267
self.match(decafJavierParser.CHAR_LITERAL)
pass
elif token in [decafJavierParser.BOOL_LITERAL]:
self.enterOuterAlt(localctx, 3)
self.state = 268
self.match(decafJavierParser.BOOL_LITERAL)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Bin_opContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def arith_op(self):
return self.getTypedRuleContext(decafJavierParser.Arith_opContext,0)
def rel_op(self):
return self.getTypedRuleContext(decafJavierParser.Rel_opContext,0)
def eq_op(self):
return self.getTypedRuleContext(decafJavierParser.Eq_opContext,0)
def cond_op(self):
return self.getTypedRuleContext(decafJavierParser.Cond_opContext,0)
def getRuleIndex(self):
return decafJavierParser.RULE_bin_op
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBin_op" ):
listener.enterBin_op(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBin_op" ):
listener.exitBin_op(self)
def bin_op(self):
localctx = decafJavierParser.Bin_opContext(self, self._ctx, self.state)
self.enterRule(localctx, 40, self.RULE_bin_op)
try:
self.state = 275
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [decafJavierParser.ADD, decafJavierParser.SUB, decafJavierParser.MULTIPLY, decafJavierParser.DIVIDE, decafJavierParser.REMINDER]:
self.enterOuterAlt(localctx, 1)
self.state = 271
self.arith_op()
pass
elif token in [decafJavierParser.GREATER_OP, decafJavierParser.LESS_OP, decafJavierParser.GREATER_eq_op, decafJavierParser.LESS_eq_op]:
self.enterOuterAlt(localctx, 2)
self.state = 272
self.rel_op()
pass
elif token in [decafJavierParser.EQUALITY_OP, decafJavierParser.UNEQUALITY_OP]:
self.enterOuterAlt(localctx, 3)
self.state = 273
self.eq_op()
pass
elif token in [decafJavierParser.AND, decafJavierParser.OR]:
self.enterOuterAlt(localctx, 4)
self.state = 274
self.cond_op()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Arith_opContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ADD(self):
return self.getToken(decafJavierParser.ADD, 0)
def SUB(self):
return self.getToken(decafJavierParser.SUB, 0)
def MULTIPLY(self):
return self.getToken(decafJavierParser.MULTIPLY, 0)
def DIVIDE(self):
return self.getToken(decafJavierParser.DIVIDE, 0)
def REMINDER(self):
return self.getToken(decafJavierParser.REMINDER, 0)
def getRuleIndex(self):
return decafJavierParser.RULE_arith_op
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterArith_op" ):
listener.enterArith_op(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitArith_op" ):
listener.exitArith_op(self)
def arith_op(self):
localctx = decafJavierParser.Arith_opContext(self, self._ctx, self.state)
self.enterRule(localctx, 42, self.RULE_arith_op)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 277
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << decafJavierParser.ADD) | (1 << decafJavierParser.SUB) | (1 << decafJavierParser.MULTIPLY) | (1 << decafJavierParser.DIVIDE) | (1 << decafJavierParser.REMINDER))) != 0)):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Var_typeContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def INT(self):
return self.getToken(decafJavierParser.INT, 0)
def BOOLEAN(self):
return self.getToken(decafJavierParser.BOOLEAN, 0)
def getRuleIndex(self):
return decafJavierParser.RULE_var_type
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterVar_type" ):
listener.enterVar_type(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitVar_type" ):
listener.exitVar_type(self)
def var_type(self):
localctx = decafJavierParser.Var_typeContext(self, self._ctx, self.state)
self.enterRule(localctx, 44, self.RULE_var_type)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 279
_la = self._input.LA(1)
if not(_la==decafJavierParser.BOOLEAN or _la==decafJavierParser.INT):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Assign_opContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def EQUAL_OP(self):
return self.getToken(decafJavierParser.EQUAL_OP, 0)
def ADD_eq_op(self):
return self.getToken(decafJavierParser.ADD_eq_op, 0)
def SUB_eq_op(self):
return self.getToken(decafJavierParser.SUB_eq_op, 0)
def getRuleIndex(self):
return decafJavierParser.RULE_assign_op
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAssign_op" ):
listener.enterAssign_op(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAssign_op" ):
listener.exitAssign_op(self)
def assign_op(self):
localctx = decafJavierParser.Assign_opContext(self, self._ctx, self.state)
self.enterRule(localctx, 46, self.RULE_assign_op)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 281
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << decafJavierParser.EQUAL_OP) | (1 << decafJavierParser.ADD_eq_op) | (1 << decafJavierParser.SUB_eq_op))) != 0)):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Method_nameContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ID(self):
return self.getToken(decafJavierParser.ID, 0)
def getRuleIndex(self):
return decafJavierParser.RULE_method_name
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterMethod_name" ):
listener.enterMethod_name(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitMethod_name" ):
listener.exitMethod_name(self)
def method_name(self):
localctx = decafJavierParser.Method_nameContext(self, self._ctx, self.state)
self.enterRule(localctx, 48, self.RULE_method_name)
try:
self.enterOuterAlt(localctx, 1)
self.state = 283
self.match(decafJavierParser.ID)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
def sempred(self, localctx:RuleContext, ruleIndex:int, predIndex:int):
if self._predicates == None:
self._predicates = dict()
self._predicates[12] = self.expr_sempred
pred = self._predicates.get(ruleIndex, None)
if pred is None:
raise Exception("No predicate with index:" + str(ruleIndex))
else:
return pred(localctx, predIndex)
def expr_sempred(self, localctx:ExprContext, predIndex:int):
if predIndex == 0:
return self.precpred(self._ctx, 5)
| 35.501364 | 385 | 0.575535 | [
"MIT"
] | tej17584/compis_Proyecto1 | Python3/decafJavierParser.py | 78,103 | Python |
import numpy as np
from dct_image_transform.dct import dct2
def reflection(image,axis=0):
'''
8x8のブロックごとに離散コサイン変換された画像(以下DCT画像)を鏡像変換する.
Parameters
----------
image:幅と高さが8の倍数である画像を表す2次元配列. 8の倍数でない場合の動作は未定義.
axis:変換する軸. defalutは`axis=0`
Returns
-------
`image`を鏡像変換したDCT画像を表す2次元配列を返す. `image`の値は変わらない.
Examples
--------
>>> import numpy as np
>>> a = np.arange(64).reshape((8,8))
>>> a
array([[ 0, 1, 2, 3, 4, 5, 6, 7],
[ 8, 9, 10, 11, 12, 13, 14, 15],
[16, 17, 18, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 28, 29, 30, 31],
[32, 33, 34, 35, 36, 37, 38, 39],
[40, 41, 42, 43, 44, 45, 46, 47],
[48, 49, 50, 51, 52, 53, 54, 55],
[56, 57, 58, 59, 60, 61, 62, 63]])
>>> dct_image_transform.reflection.reflection(a,axis=0)
array([[ 5.77395663e-15, 1.00000000e+00, 2.00000000e+00,
3.00000000e+00, 4.00000000e+00, 5.00000000e+00,
6.00000000e+00, 7.00000000e+00],
[-8.00000000e+00, -9.00000000e+00, -1.00000000e+01,
-1.10000000e+01, -1.20000000e+01, -1.30000000e+01,
-1.40000000e+01, -1.50000000e+01],
[ 1.60000000e+01, 1.70000000e+01, 1.80000000e+01,
1.90000000e+01, 2.00000000e+01, 2.10000000e+01,
2.20000000e+01, 2.30000000e+01],
[-2.40000000e+01, -2.50000000e+01, -2.60000000e+01,
-2.70000000e+01, -2.80000000e+01, -2.90000000e+01,
-3.00000000e+01, -3.10000000e+01],
[ 3.20000000e+01, 3.30000000e+01, 3.40000000e+01,
3.50000000e+01, 3.60000000e+01, 3.70000000e+01,
3.80000000e+01, 3.90000000e+01],
[-4.00000000e+01, -4.10000000e+01, -4.20000000e+01,
-4.30000000e+01, -4.40000000e+01, -4.50000000e+01,
-4.60000000e+01, -4.70000000e+01],
[ 4.80000000e+01, 4.90000000e+01, 5.00000000e+01,
5.10000000e+01, 5.20000000e+01, 5.30000000e+01,
5.40000000e+01, 5.50000000e+01],
[-5.60000000e+01, -5.70000000e+01, -5.80000000e+01,
-5.90000000e+01, -6.00000000e+01, -6.10000000e+01,
-6.20000000e+01, -6.30000000e+01]])
'''
R = np.zeros((8,8),dtype=np.float)
for i in range(8):
R[i,7-i] = 1
R = dct2(R)
if axis == 0:
return np.vstack(list(map(lambda m:np.dot(R,m),np.flip(np.vsplit(image,range(8,image.shape[1],8)),0))))
elif axis == 1:
return np.hstack(list(map(lambda m:np.dot(m,R),np.flip(np.hsplit(image,range(8,image.shape[1],8)),0)))) | 40.984375 | 111 | 0.544034 | [
"MIT"
] | kanpurin/dctimagetransform | dct_image_transform/reflection.py | 2,823 | Python |
from __future__ import absolute_import
from django.conf import settings
import ujson
from zproject.backends import password_auth_enabled, dev_auth_enabled, google_auth_enabled, github_auth_enabled
def add_settings(request):
realm = request.user.realm if hasattr(request.user, "realm") else None
return {
# We use the not_voyager variable name so that templates
# will render even if the appropriate context is not provided
# to the template
'not_voyager': not settings.VOYAGER,
'zulip_com': settings.ZULIP_COM,
'custom_logo_url': settings.CUSTOM_LOGO_URL,
'register_link_disabled': settings.REGISTER_LINK_DISABLED,
'show_oss_announcement': settings.SHOW_OSS_ANNOUNCEMENT,
'zulip_admin': settings.ZULIP_ADMINISTRATOR,
'login_url': settings.HOME_NOT_LOGGED_IN,
'only_sso': settings.ONLY_SSO,
'external_api_path': settings.EXTERNAL_API_PATH,
'external_api_uri': settings.EXTERNAL_API_URI,
'external_uri_scheme': settings.EXTERNAL_URI_SCHEME,
'api_site_required': settings.EXTERNAL_API_PATH != "api.zulip.com",
'email_integration_enabled': settings.EMAIL_GATEWAY_BOT != "",
'email_gateway_example': settings.EMAIL_GATEWAY_EXAMPLE,
'open_realm_creation': settings.OPEN_REALM_CREATION,
'password_auth_enabled': password_auth_enabled(realm),
'dev_auth_enabled': dev_auth_enabled(),
'google_auth_enabled': google_auth_enabled(),
'github_auth_enabled': github_auth_enabled(),
'development_environment': settings.DEVELOPMENT,
}
def add_metrics(request):
return {
'dropboxAppKey': settings.DROPBOX_APP_KEY
}
| 48.051282 | 111 | 0.664354 | [
"Apache-2.0"
] | yicongwu/zulip | zerver/context_processors.py | 1,874 | Python |
from ._base import *
DEBUG = True
| 8.75 | 20 | 0.685714 | [
"MIT"
] | AlexanderTN/Django-3-Web-Development-Cookbook-Fourth-Edition | ch01/myproject_docker/src/myproject/myproject/settings/dev.py | 35 | Python |
# div.py
def main():
bread = 10 # 열 개의 빵
people = int(input("몇 명? "))
print("1인당 빵의 수: ", bread / people)
print("맛있게 드세요.")
main()
| 15.8 | 39 | 0.481013 | [
"MIT"
] | chiwoongMOON/202111PythonGrammarStudy | module/chapter14/div.py | 194 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""The setup script."""
import sys
from setuptools import setup, find_packages
if sys.version_info < (3, 7):
print(
"glean_parser requires at least Python 3.7",
file=sys.stderr
)
sys.exit(1)
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
'Click>=6.0',
'PyYAML>=3.13',
'jsonschema>=3.0.0',
'inflection>=0.3.1',
'Jinja2>=2.10',
'diskcache>=3.1.0',
'appdirs>=1.4.3'
]
setup_requirements = ['pytest-runner', ]
test_requirements = ['pytest', ]
setup(
author="Michael Droettboom",
author_email='[email protected]',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
],
description="Parser tools for Mozilla's glean telemetry",
entry_points={
'console_scripts': [
'glean_parser=glean_parser.__main__:main',
],
},
install_requires=requirements,
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='glean_parser',
name='glean_parser',
packages=find_packages(include=['glean_parser']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/mozilla/glean_parser',
version='0.23.1',
zip_safe=False,
)
| 25.069444 | 69 | 0.642659 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | mdboom/glean_parser | setup.py | 1,805 | Python |
import taichi as ti
from mpl_toolkits.mplot3d import Axes3D
import os
import math
import numpy as np
import random
import cv2
import matplotlib.pyplot as plt
import time
import taichi as tc
real = ti.f32
ti.set_default_fp(real)
dim = 3
# this will be overwritten
n_particles = 0
n_solid_particles = 0
n_actuators = 0
n_grid = 64
dx = 1 / n_grid
inv_dx = 1 / dx
dt = 2e-3
p_vol = 1
E = 10
# TODO: update
mu = E
la = E
max_steps = 512
steps = 512
gravity = 10
target = [0.8, 0.2, 0.2]
use_apic = False
scalar = lambda: ti.var(dt=real)
vec = lambda: ti.Vector(dim, dt=real)
mat = lambda: ti.Matrix(dim, dim, dt=real)
actuator_id = ti.global_var(ti.i32)
particle_type = ti.global_var(ti.i32)
x, v = vec(), vec()
grid_v_in, grid_m_in = vec(), scalar()
grid_v_out = vec()
C, F = mat(), mat()
screen = ti.Vector(3, dt=real)
loss = scalar()
n_sin_waves = 4
weights = scalar()
bias = scalar()
x_avg = vec()
actuation = scalar()
actuation_omega = 40
act_strength = 5
# ti.cfg.arch = ti.x86_64
# ti.cfg.use_llvm = True
ti.cfg.arch = ti.cuda
# ti.cfg.print_ir = True
visualize_resolution = 256
@ti.layout
def place():
ti.root.dense(ti.ij, (n_actuators, n_sin_waves)).place(weights)
ti.root.dense(ti.i, n_actuators).place(bias)
ti.root.dense(ti.ij, (max_steps, n_actuators)).place(actuation)
ti.root.dense(ti.i, n_particles).place(actuator_id, particle_type)
ti.root.dense(ti.l, max_steps).dense(ti.k, n_particles).place(x, v, C, F)
ti.root.dense(ti.ijk, n_grid).place(grid_v_in, grid_m_in, grid_v_out)
ti.root.place(loss, x_avg)
ti.root.dense(ti.ij, (visualize_resolution, visualize_resolution)).place(screen)
ti.root.lazy_grad()
def zero_vec():
return [0.0, 0.0, 0.0]
def zero_matrix():
return [zero_vec(), zero_vec(), zero_vec()]
@ti.kernel
def clear_grid():
for i, j, k in grid_m_in:
grid_v_in[i, j, k] = [0, 0, 0]
grid_m_in[i, j, k] = 0
grid_v_in.grad[i, j, k] = [0, 0, 0]
grid_m_in.grad[i, j, k] = 0
grid_v_out.grad[i, j, k] = [0, 0, 0]
@ti.kernel
def clear_particle_grad():
# for all time steps and all particles
for f, i in x:
x.grad[f, i] = zero_vec()
v.grad[f, i] = zero_vec()
C.grad[f, i] = zero_matrix()
F.grad[f, i] = zero_matrix()
@ti.kernel
def clear_actuation_grad():
for t, i in actuation:
actuation[t, i] = 0.0
@ti.kernel
def p2g(f: ti.i32):
for p in range(0, n_particles):
base = ti.cast(x[f, p] * inv_dx - 0.5, ti.i32)
fx = x[f, p] * inv_dx - ti.cast(base, ti.i32)
w = [0.5 * ti.sqr(1.5 - fx), 0.75 - ti.sqr(fx - 1),
0.5 * ti.sqr(fx - 0.5)]
new_F = (ti.Matrix.diag(dim=dim, val=1) + dt * C[f, p]) @ F[f, p]
J = ti.determinant(new_F)
if particle_type[p] == 0: # fluid
sqrtJ = ti.sqrt(J)
# TODO: need pow(x, 1/3)
new_F = ti.Matrix([[sqrtJ, 0, 0], [0, sqrtJ, 0], [0, 0, 1]])
F[f + 1, p] = new_F
# r, s = ti.polar_decompose(new_F)
act_id = actuator_id[p]
act = actuation[f, ti.max(0, act_id)] * act_strength
if act_id == -1:
act = 0.0
# ti.print(act)
A = ti.Matrix([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 1.0]]) * act
cauchy = ti.Matrix(zero_matrix())
mass = 0.0
ident = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
if particle_type[p] == 0:
mass = 4
cauchy = ti.Matrix(ident) * (J - 1) * E
else:
mass = 1
cauchy = mu * (new_F @ ti.transposed(new_F)) + ti.Matrix(ident) * (la * ti.log(J) - mu)
cauchy += new_F @ A @ ti.transposed(new_F)
stress = -(dt * p_vol * 4 * inv_dx * inv_dx) * cauchy
affine = stress + mass * C[f, p]
for i in ti.static(range(3)):
for j in ti.static(range(3)):
for k in ti.static(range(3)):
offset = ti.Vector([i, j, k])
dpos = (ti.cast(ti.Vector([i, j, k]), real) - fx) * dx
weight = w[i](0) * w[j](1) * w[k](2)
grid_v_in[base + offset].atomic_add(
weight * (mass * v[f, p] + affine @ dpos))
grid_m_in[base + offset].atomic_add(weight * mass)
bound = 3
coeff = 1.5
@ti.kernel
def grid_op():
for i, j, k in grid_m_in:
inv_m = 1 / (grid_m_in[i, j, k] + 1e-10)
v_out = inv_m * grid_v_in[i, j, k]
v_out[1] -= dt * gravity
if i < bound and v_out[0] < 0:
v_out[0] = 0
v_out[1] = 0
v_out[2] = 0
if i > n_grid - bound and v_out[0] > 0:
v_out[0] = 0
v_out[1] = 0
v_out[2] = 0
if k < bound and v_out[2] < 0:
v_out[0] = 0
v_out[1] = 0
v_out[2] = 0
if k > n_grid - bound and v_out[2] > 0:
v_out[0] = 0
v_out[1] = 0
v_out[2] = 0
if j < bound and v_out[1] < 0:
v_out[0] = 0
v_out[1] = 0
v_out[2] = 0
normal = ti.Vector([0.0, 1.0, 0.0])
lsq = ti.sqr(normal).sum()
if lsq > 0.5:
if ti.static(coeff < 0):
v_out[0] = 0
v_out[1] = 0
v_out[2] = 0
else:
lin = (ti.transposed(v_out) @ normal)(0)
if lin < 0:
vit = v_out - lin * normal
lit = vit.norm() + 1e-10
if lit + coeff * lin <= 0:
v_out[0] = 0
v_out[1] = 0
v_out[2] = 0
else:
v_out = (1 + coeff * lin / lit) * vit
if j > n_grid - bound and v_out[1] > 0:
v_out[0] = 0
v_out[1] = 0
v_out[2] = 0
grid_v_out[i, j, k] = v_out
@ti.kernel
def g2p(f: ti.i32):
for p in range(0, n_particles):
base = ti.cast(x[f, p] * inv_dx - 0.5, ti.i32)
fx = x[f, p] * inv_dx - ti.cast(base, real)
w = [0.5 * ti.sqr(1.5 - fx), 0.75 - ti.sqr(fx - 1.0),
0.5 * ti.sqr(fx - 0.5)]
new_v = ti.Vector(zero_vec())
new_C = ti.Matrix(zero_matrix())
for i in ti.static(range(3)):
for j in ti.static(range(3)):
for k in ti.static(range(3)):
dpos = ti.cast(ti.Vector([i, j, k]), real) - fx
g_v = grid_v_out[base(0) + i, base(1) + j, base(2) + k]
weight = w[i](0) * w[j](1) * w[k](2)
new_v += weight * g_v
new_C += 4 * weight * ti.outer_product(g_v, dpos) * inv_dx
v[f + 1, p] = new_v
x[f + 1, p] = x[f, p] + dt * v[f + 1, p]
C[f + 1, p] = new_C
@ti.kernel
def compute_actuation(t: ti.i32):
for i in range(n_actuators):
act = 0.0
for j in ti.static(range(n_sin_waves)):
act += weights[i, j] * ti.sin(
actuation_omega * t * dt + 2 * math.pi / n_sin_waves * j)
act += bias[i]
actuation[t, i] = ti.tanh(act)
@ti.kernel
def compute_x_avg():
for i in range(n_particles):
contrib = 0.0
if particle_type[i] == 1:
contrib = 1.0 / n_solid_particles
x_avg[None].atomic_add(contrib * x[steps - 1, i])
@ti.kernel
def compute_loss():
dist = x_avg[None][0]
loss[None] = -dist
def forward(total_steps=steps):
# simulation
for s in range(total_steps - 1):
clear_grid()
compute_actuation()
p2g(s)
grid_op()
g2p(s)
x_avg[None] = [0, 0, 0]
compute_x_avg()
compute_loss()
return loss[None]
def backward():
clear_particle_grad()
compute_loss.grad()
compute_x_avg.grad()
for s in reversed(range(steps - 1)):
# Since we do not store the grid history (to save space), we redo p2g and grid op
clear_grid()
p2g(s)
grid_op()
g2p.grad(s)
grid_op.grad()
p2g.grad(s)
compute_actuation.grad()
class Scene:
def __init__(self):
self.n_particles = 0
self.n_solid_particles = 0
self.x = []
self.actuator_id = []
self.particle_type = []
self.offset_x = 0
self.offset_y = 0
self.offset_z = 0
self.num_actuators = 0
def new_actuator(self):
self.num_actuators += 1
global n_actuators
n_actuators = self.num_actuators
return self.num_actuators - 1
def add_rect(self, x, y, z, w, h, d, actuation, ptype=1):
if ptype == 0:
assert actuation == -1
global n_particles
density = 3
w_count = int(w / dx * density)
h_count = int(h / dx * density)
d_count = int(d / dx * density)
real_dx = w / w_count
real_dy = h / h_count
real_dz = d / d_count
if ptype == 1:
for i in range(w_count):
for j in range(h_count):
for k in range(d_count):
self.x.append([x + (i + 0.5) * real_dx + self.offset_x,
y + (j + 0.5) * real_dy + self.offset_y,
z + (k + 0.5) * real_dz + self.offset_z])
self.actuator_id.append(actuation)
self.particle_type.append(ptype)
self.n_particles += 1
self.n_solid_particles += int(ptype == 1)
if self.n_particles % 1000 == 0:
print("num particles", self.n_particles)
else:
for i in range(w_count):
for j in range(h_count):
for k in range(d_count):
self.x.append([x + random.random() * w + self.offset_x,
y + random.random() * h + self.offset_y,
z + random.random() * d + self.offset_z])
self.actuator_id.append(actuation)
self.particle_type.append(ptype)
self.n_particles += 1
self.n_solid_particles += int(ptype == 1)
if self.n_particles % 1000 == 0:
print("num particles", self.n_particles)
def set_offset(self, x, y, z):
self.offset_x = x
self.offset_y = y
self.offset_z = z
def finalize(self):
global n_particles, n_solid_particles
n_particles = self.n_particles
n_solid_particles = max(self.n_solid_particles, 1)
print('n_particles', n_particles)
print('n_solid', n_solid_particles)
def set_n_actuators(self, n_act):
global n_actuators
n_actuators = n_act
gui = tc.core.GUI("Differentiable MPM", tc.veci(1024, 1024))
canvas = gui.get_canvas()
@ti.kernel
def splat(t: ti.i32):
for i in range(n_particles):
pos = ti.cast(x[t, i] * visualize_resolution, ti.i32)
screen[pos[0], pos[1]][0] += 0.1
res = [visualize_resolution, visualize_resolution]
@ti.kernel
def copy_back_and_clear(img: np.ndarray):
for i in range(res[0]):
for j in range(res[1]):
coord = ((res[1] - 1 - j) * res[0] + i) * 3
for c in ti.static(range(3)):
img[coord + c] = screen[i, j][2 - c]
screen[i, j][2 - c] = 0
def robot(scene):
block_size = 0.1
# scene.set_offset(0.1, 0.10, 0.3)
scene.set_offset(0.1, 0.05, 0.3)
def add_leg(x, y, z):
for i in range(4):
scene.add_rect(x + block_size / 2 * (i // 2), y + 0.7 * block_size / 2 * (i % 2), z, block_size / 2, 0.7 * block_size / 2, block_size, scene.new_actuator())
for i in range(4):
add_leg(i // 2 * block_size * 2, 0.0, i % 2 * block_size * 2)
for i in range(3):
scene.add_rect(block_size * i, 0, block_size, block_size, block_size * 0.7, block_size, -1, 1)
# scene.set_offset(0.1, 0.03, 0.3)
scene.add_rect(0.1, 0.15, 0.1, 0.2, 0.05, 0.2, -1, 0)
# scene.
def main():
tc.set_gdb_trigger()
# initialization
scene = Scene()
# fish(scene)
robot(scene)
# scene.add_rect(0.4, 0.4, 0.2, 0.1, 0.3, 0.1, -1, 1)
scene.finalize()
for i in range(n_actuators):
for j in range(n_sin_waves):
weights[i, j] = np.random.randn() * 0.01
for i in range(scene.n_particles):
x[0, i] = scene.x[i]
F[0, i] = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
actuator_id[i] = scene.actuator_id[i]
particle_type[i] = scene.particle_type[i]
fig = plt.figure()
plt.ion()
ax = fig.add_subplot(111, projection='3d')
losses = []
for iter in range(501):
ti.clear_all_gradients()
l = forward()
losses.append(l)
loss.grad[None] = 1
backward()
print('i=', iter, 'loss=', l)
learning_rate = 10
for i in range(n_actuators):
for j in range(n_sin_waves):
# print(weights.grad[i, j])
weights[i, j] -= learning_rate * weights.grad[i, j]
bias[i] -= learning_rate * bias.grad[i]
if iter % 50 == 0:
# visualize
print("Dumping particles...")
for s in range(7, steps, 2):
def to255(x):
return int(max(min(x * 255, 255), 0))
xs, ys, zs = [], [], []
us, vs, ws = [], [], []
cs = []
folder = 'mpm3d/iter{:04d}/'.format(iter)
os.makedirs(folder, exist_ok=True)
for i in range(n_particles):
xs.append(x[s, i][0])
ys.append(x[s, i][1])
zs.append(x[s, i][2])
us.append(v[s, i][0])
vs.append(v[s, i][1])
ws.append(v[s, i][2])
if particle_type[i] == 0:
# fluid
r = 0.3
g = 0.3
b = 1.0
else:
# neohookean
if actuator_id[i] != -1:
# actuated
act = actuation[s, actuator_id[i]] * 0.5
r = 0.5 - act
g = 0.5 - abs(act)
b = 0.5 + act
else:
r, g, b = 0.4, 0.4, 0.4
color = to255(r) * 65536 + 256 * to255(g) + to255(b)
cs.append(color)
data = np.array(xs + ys + zs + us + vs + ws + cs, dtype=np.float32)
data.tofile(open('{}/{:04}.bin'.format(folder, s), 'wb'))
print("Particles dumped")
if __name__ == '__main__':
main()
| 26.364 | 162 | 0.553254 | [
"MIT"
] | AnimatedRNG/taichi | examples/difftaichi/liquid.py | 13,182 | Python |
#!/usr/bin/env python
import sys
last_pkt_num = -1
daystart_pkt_num = -1
daystart_recv_time = -1
daystart_hwrecv_time = -1
dayend_pkt_num = -1
dayend_recv_time = -1
dayend_hwrecv_time = -1
def process_line(line):
global last_pkt_num
global daystart_pkt_num, daystart_recv_time, daystart_hwrecv_time
global dayend_pkt_num, dayend_recv_time, dayend_hwrecv_time
parts = line.split()
pkt_num = long(parts[1])
sent_time = long(parts[3])
recv_time = long(parts[5])
hw_recv_time = long(parts[7])
# read in the first line
if (daystart_pkt_num == -1):
last_pkt_num = pkt_num
daystart_pkt_num = pkt_num
daystart_recv_time = recv_time
daystart_hwrecv_time = hw_recv_time
dayend_pkt_num = pkt_num
dayend_recv_time = recv_time
dayend_hwrecv_time = hw_recv_time
return
# skip through the day, looking for a gap
if (pkt_num == last_pkt_num + 1):
last_pkt_num = pkt_num
dayend_pkt_num = pkt_num
dayend_recv_time = recv_time
dayend_hwrecv_time = hw_recv_time
return
# we found a gap
dstr = "D {} pkts long, {} us (utime), {} us (hw)".format(
dayend_pkt_num - daystart_pkt_num,
dayend_recv_time - daystart_recv_time,
dayend_hwrecv_time - daystart_hwrecv_time)
print(dstr)
nstr = "\t\t\t\t\t\t\t\tN {} pkts long, {} us (utime), {} us (hw)".format(
pkt_num - dayend_pkt_num,
recv_time - dayend_recv_time,
hw_recv_time - dayend_hwrecv_time)
print(nstr)
last_pkt_num = pkt_num
daystart_pkt_num = pkt_num
daystart_recv_time = recv_time
daystart_hwrecv_time = hw_recv_time
def main(argv):
if (len(argv) == 1):
fin = sys.stdin
else:
fin = open(argv[1])
while 1:
try:
line = fin.readline()
except KeyboardInterrupt:
break
if not line:
break
process_line(line)
if __name__ == "__main__":
main(sys.argv)
| 22.910112 | 78 | 0.635606 | [
"BSD-3-Clause"
] | gmporter/TritonVFN | src/scripts/process-loss-rate-output.py | 2,039 | Python |
class Point:
counter = []
def __init__(self, x=0, y=0):
"""Konstruktor punktu."""
self.x = x
self.y = y
def update(self, n):
self.counter.append(n)
p1 = Point(0,0)
p2 = Point(1,1)
p1.counter.append(1)
p2.counter.append(3)
p1.counter[0] = 2
print(p1.counter)
print(p2.counter)
p1.update(1)
print(p1.counter)
print(p2.counter)
| 16.217391 | 33 | 0.597855 | [
"MIT"
] | wrutkowski1000/wizualizacja-danych | zadanka/l5zad4.py | 373 | Python |
import discord
from discord.ext import commands
import json
gamertags = 'gamer_tags.json'
class Gamertag(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def newtag(self, ctx):
with open(gamertags, 'r') as in_file:
data = json.load(in_file)
author_id = str(ctx.message.author.id)
if author_id in list(data.keys()):
await ctx.send('This user already has a gamertag library')
print('command aborted: id already exists')
else:
data[author_id] = {}
with open(gamertags, 'w') as out_file:
json.dump(data, out_file)
await ctx.send('Created new library under this user')
@commands.command()
async def addtag(self, ctx, platform, tag):
with open(gamertags, 'r') as in_file:
data = json.load(in_file)
author_id = str(ctx.message.author.id)
data[author_id][platform.lower()] = tag
with open(gamertags, 'w') as out_file:
json.dump(data, out_file)
@commands.command()
async def viewtag(self, ctx, mention, platform):
with open(gamertags, 'r') as in_file:
data = json.load(in_file)
if platform.lower() == 'all':
tags = f'{mention}\'s' + ' Gamer Tags'
for plat, tag in data[str(ctx.message.mentions[0].id).lower()].items():
tags += f'\n- {plat.capitalize()}: {tag}'
await ctx.send(tags)
else:
await ctx.send(f'{mention}\'s {platform.capitalize()}: {data[str(ctx.message.mentions[0].id).lower()][platform.lower()]}')
def setup(bot):
bot.add_cog(Gamertag(bot))
| 32.75 | 134 | 0.586025 | [
"MIT"
] | GeorgeD88/Insomniac | cogs/gamertag.py | 1,703 | Python |
# -*- coding: utf8 -*-
import json
from activity.womail.womail import WoMail
class DailySign(WoMail):
def __init__(self, mobile, openId):
super(DailySign, self).__init__(mobile, openId)
self.session.headers.update({
# 'Origin': 'https://nyan.mail.wo.cn',
'Referer': 'https://nyan.mail.wo.cn/cn/sign/wap/index.html',
'User-Agent': 'Mozilla/5.0 (Linux; Android 8.1.0; MI 8 SE Build/OPM1.171019.019; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/78.0.3904.62 XWEB/2797 MMWEBSDK/20210501 Mobile Safari/537.36 MMWEBID/107 MicroMessenger/8.0.6.1900(0x28000635) Process/toolsmp WeChat/arm64 Weixin NetType/4G Language/zh_CN ABI/arm64',
'X-Requested-With': 'com.tencent.mm' # XMLHttpRequest
})
self.message = ''
def login(self):
url = f'https://nyan.mail.wo.cn/cn/sign/index/index?mobile={self.mobile}&userName=&openId={self.openId}'
self.session.get(url=url)
print(self.session.cookies.get_dict())
def index(self):
url = 'https://nyan.mail.wo.cn/cn/sign/wap/index.html'
self.session.get(url=url)
def userInfo(self):
url = f'https://nyan.mail.wo.cn/cn/sign/index/userinfo.do?rand={self.randomNum}'
resp = self.session.post(url=url)
data = resp.json()
try:
print(json.dumps(data, indent=4, ensure_ascii=False))
return str(data['result']['lastDay']), str(data['result']['keepSign'])
except:
print(resp.text)
def isLogin(self):
url = f'https://nyan.mail.wo.cn/cn/sign/user/isLoginMail.do?rand={self.randomNum}'
resp = self.session.post(url=url)
print(resp.text)
def check(self):
url = f'https://nyan.mail.wo.cn/cn/sign/user/checkin.do?rand={self.randomNum}'
resp = self.session.post(url=url)
print(resp.text)
def prizeDetail(self):
url = f'https://nyan.mail.wo.cn/cn/sign/user/prizes.do?rand={self.randomNum}'
resp = self.session.post(url=url)
data = resp.json()
if len(data['result']) > 3:
data['result'] = data['result'][:3]
print(json.dumps(data, indent=4, ensure_ascii=False))
def doTask(self, task_name):
url = f'https://nyan.mail.wo.cn/cn/sign/user/doTask.do?rand={self.randomNum}'
data = {
'taskName': task_name
}
resp = self.session.post(url=url, data=data)
print(resp.text)
def overTask(self):
url = f'https://nyan.mail.wo.cn/cn/sign/user/overtask.do?rand={self.randomNum}'
data = {
'taskLevel': '2'
}
resp = self.session.post(url=url, data=data)
data = resp.json()
print(json.dumps(data, indent=4, ensure_ascii=False))
result = [item['taskName'] for item in data['result']]
# data = {
# 'taskLevel': '1'
# }
return result
def run(self):
if int(self.now_date.replace('-', '')) > 20220228:
return
try:
self.login()
self.index()
result = self.overTask()
for task_name in ["loginmail", "clubactivity", "club"]: # , "download"
if task_name in result:
continue
self.doTask(task_name)
self.flushTime(1)
else:
print("积分签到任务已完成")
lastDay, keepSign = self.userInfo()
if keepSign == '21':
print('跳过21天之后的打卡')
self.message = '每日签到: 跳过21天之后的打卡'
self.recordLog(self.message)
return
else:
if self.now_date.replace('-', '') == lastDay:
print("今日已打卡")
return
else:
self.check()
self.prizeDetail()
lastDay, _ = self.userInfo()
if self.now_date.replace('-', '') == lastDay:
self.message = '每日签到: 已签到'
else:
self.message = '每日签到: 未签到'
self.recordLog(self.message)
except Exception as e:
print(e)
if __name__ == "__main__":
pass
| 36.273504 | 340 | 0.545476 | [
"MIT"
] | Blessingorz/UnicomDailyTask | activity/womail/dailyTask.py | 4,340 | Python |
'''
References:
- An Outline of Set Theory, Henle
'''
from . import fol
class ElementSymbol(fol.ImproperSymbol):
def __init__(self):
fol.PrimitiveSymbol.__init__('∈')
def symbol_type(self) -> str:
return 'element of'
@staticmethod
def new() -> "ElementSymbol":
return ElementSymbol()
| 17.368421 | 41 | 0.636364 | [
"Unlicense"
] | jadnohra/connect | ddq_1/lang/set.py | 332 | Python |
from flask import (Blueprint, Response, request, render_template)
import json
from flask_test import db
from flask_test.schema import FORM_SCHEMA
from flask_test.utils import row_as_json, list_as_json
bp = Blueprint('list', __name__)
@bp.route('/resource/<doc_type>', methods=['GET'])
def get(doc_type):
data = db.get_list(doc_type)
if 'application/json' in request.headers.get('accept'):
response = json.dumps(list_as_json(data))
return Response(response, mimetype="application/json")
return render_template(
'list.html',
items=data,
doc_type=doc_type,
fields=FORM_SCHEMA[doc_type],
get_options=db.get_list
)
| 28.583333 | 65 | 0.704082 | [
"MIT"
] | barredterra/flask-test | flask_test/views/listview.py | 686 | Python |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.