ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a473361264af9af33964ef70a91f14022f67f6e | """Demonstrates how to upload and download files to a remote server.
"""
from ixnetwork_restpy import SessionAssistant, Files
session_assistant = SessionAssistant(IpAddress='127.0.0.1',
UserName='admin', Password='admin',
LogLevel=SessionAssistant.LOGLEVEL_INFO,
ClearConfig=True)
ixnetwork = session_assistant.Ixnetwork
# add 4 vport objects
ixnetwork.Vport.add().add().add().add()
# save the configuration on the server
ixnetwork.SaveConfig(Files('sample.ixncfg'))
# get a list of remote files
print(session_assistant.Session.GetFileList())
# download the remote saved configuration as some other local file
session_assistant.Session.DownloadFile('sample.ixncfg', 'local.ixncfg')
# upload the local file
print(session_assistant.Session.UploadFile('local.ixncfg'))
# load the remote local configuration
print(ixnetwork.LoadConfig(Files('local.ixncfg')))
# verify that the vport objects exist
assert(len(ixnetwork.Vport.find()) == 4)
|
py | 1a4733eb3958f35e8d3f71dd1eb89afe9a4d4b26 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
import re
import os
from bomlib.columns import ColumnList
# Check python version to determine which version of ConfirParser to import
if sys.version_info.major >= 3:
import configparser as ConfigParser
else:
import ConfigParser
class BomPref:
SECTION_IGNORE = "IGNORE_COLUMNS"
SECTION_COLUMN_ORDER = "COLUMN_ORDER"
SECTION_GENERAL = "BOM_OPTIONS"
SECTION_ALIASES = "COMPONENT_ALIASES"
SECTION_GROUPING_FIELDS = "GROUP_FIELDS"
SECTION_REGEXCLUDES = "REGEX_EXCLUDE"
SECTION_REGINCLUDES = "REGEX_INCLUDE"
OPT_PCB_CONFIG = "pcb_configuration"
OPT_NUMBER_ROWS = "number_rows"
OPT_GROUP_CONN = "group_connectors"
OPT_USE_REGEX = "test_regex"
OPT_USE_ALT = "use_alt"
OPT_ALT_WRAP = "alt_wrap"
OPT_MERGE_BLANK = "merge_blank_fields"
OPT_IGNORE_DNF = "ignore_dnf"
OPT_BACKUP = "make_backup"
OPT_OUTPUT_FILE_NAME = "output_file_name"
OPT_VARIANT_FILE_NAME_FORMAT = "variant_file_name_format"
OPT_DEFAULT_BOARDS = "number_boards"
OPT_DEFAULT_PCBCONFIG = "board_variant"
OPT_CONFIG_FIELD = "fit_field"
OPT_HIDE_HEADERS = "hide_headers"
OPT_HIDE_PCB_INFO = "hide_pcb_info"
def __init__(self):
# List of headings to ignore in BoM generation
self.ignore = [
ColumnList.COL_PART_LIB,
ColumnList.COL_FP_LIB,
]
self.corder = ColumnList._COLUMNS_DEFAULT
self.useAlt = False # Use alternate reference representation
self.altWrap = None # Wrap to n items when using alt representation
self.ignoreDNF = True # Ignore rows for do-not-fit parts
self.numberRows = True # Add row-numbers to BoM output
self.groupConnectors = True # Group connectors and ignore component value
self.useRegex = True # Test various columns with regex
self.boards = 1 # Quantity of boards to be made
self.mergeBlankFields = True # Blanks fields will be merged when possible
self.hideHeaders = False
self.hidePcbInfo = False
self.verbose = False # By default, is not verbose
self.configField = "Config" # Default field used for part fitting config
self.pcbConfig = ["default"]
self.backup = "%O.tmp"
self.separatorCSV = None
self.outputFileName = "%O_bom_%v%V"
self.variantFileNameFormat = "_(%V)"
self.xlsxwriter_available = False
self.xlsxwriter2_available = False
# Default fields used to group components
self.groups = [
ColumnList.COL_PART,
ColumnList.COL_PART_LIB,
ColumnList.COL_VALUE,
ColumnList.COL_FP,
ColumnList.COL_FP_LIB,
# User can add custom grouping columns in bom.ini
]
self.regIncludes = [] # None by default
self.regExcludes = [
[ColumnList.COL_REFERENCE, '^TP[0-9]*'],
[ColumnList.COL_REFERENCE, '^FID'],
[ColumnList.COL_PART, 'mount.*hole'],
[ColumnList.COL_PART, 'solder.*bridge'],
[ColumnList.COL_PART, 'test.*point'],
[ColumnList.COL_FP, 'test.*point'],
[ColumnList.COL_FP, 'mount.*hole'],
[ColumnList.COL_FP, 'fiducial'],
]
# Default component groupings
self.aliases = [
["c", "c_small", "cap", "capacitor"],
["r", "r_small", "res", "resistor"],
["sw", "switch"],
["l", "l_small", "inductor"],
["zener", "zenersmall"],
["d", "diode", "d_small"]
]
# Check an option within the SECTION_GENERAL group
def checkOption(self, parser, opt, default=False):
if parser.has_option(self.SECTION_GENERAL, opt):
return parser.get(self.SECTION_GENERAL, opt).lower() in ["1", "true", "yes"]
else:
return default
def checkInt(self, parser, opt, default=False):
if parser.has_option(self.SECTION_GENERAL, opt):
return int(parser.get(self.SECTION_GENERAL, opt).lower())
else:
return default
# Read KiBOM preferences from file
def Read(self, file, verbose=False):
file = os.path.abspath(file)
if not os.path.exists(file) or not os.path.isfile(file):
print("{f} is not a valid file!".format(f=file))
return
cf = ConfigParser.RawConfigParser(allow_no_value=True)
cf.optionxform = str
cf.read(file)
# Read general options
if self.SECTION_GENERAL in cf.sections():
self.ignoreDNF = self.checkOption(cf, self.OPT_IGNORE_DNF, default=True)
self.useAlt = self.checkOption(cf, self.OPT_USE_ALT, default=False)
self.altWrap = self.checkInt(cf, self.OPT_ALT_WRAP, default=None)
self.numberRows = self.checkOption(cf, self.OPT_NUMBER_ROWS, default=True)
self.groupConnectors = self.checkOption(cf, self.OPT_GROUP_CONN, default=True)
self.useRegex = self.checkOption(cf, self.OPT_USE_REGEX, default=True)
self.mergeBlankFields = self.checkOption(cf, self.OPT_MERGE_BLANK, default=True)
self.outputFileName = cf.get(self.SECTION_GENERAL, self.OPT_OUTPUT_FILE_NAME)
self.variantFileNameFormat = cf.get(self.SECTION_GENERAL, self.OPT_VARIANT_FILE_NAME_FORMAT)
if cf.has_option(self.SECTION_GENERAL, self.OPT_CONFIG_FIELD):
self.configField = cf.get(self.SECTION_GENERAL, self.OPT_CONFIG_FIELD)
if cf.has_option(self.SECTION_GENERAL, self.OPT_DEFAULT_BOARDS):
self.boards = self.checkInt(cf, self.OPT_DEFAULT_BOARDS, default=None)
if cf.has_option(self.SECTION_GENERAL, self.OPT_DEFAULT_PCBCONFIG):
self.pcbConfig = cf.get(self.SECTION_GENERAL, self.OPT_DEFAULT_PCBCONFIG).strip().split(",")
if cf.has_option(self.SECTION_GENERAL, self.OPT_BACKUP):
self.backup = cf.get(self.SECTION_GENERAL, self.OPT_BACKUP)
else:
self.backup = False
if cf.has_option(self.SECTION_GENERAL, self.OPT_HIDE_HEADERS):
self.hideHeaders = cf.get(self.SECTION_GENERAL, self.OPT_HIDE_HEADERS) == '1'
if cf.has_option(self.SECTION_GENERAL, self.OPT_HIDE_PCB_INFO):
self.hidePcbInfo = cf.get(self.SECTION_GENERAL, self.OPT_HIDE_PCB_INFO) == '1'
# Read out grouping colums
if self.SECTION_GROUPING_FIELDS in cf.sections():
self.groups = [i for i in cf.options(self.SECTION_GROUPING_FIELDS)]
# Read out ignored-rows
if self.SECTION_IGNORE in cf.sections():
self.ignore = [i for i in cf.options(self.SECTION_IGNORE)]
# Read out column order
if self.SECTION_COLUMN_ORDER in cf.sections():
self.corder = [i for i in cf.options(self.SECTION_COLUMN_ORDER)]
# Read out component aliases
if self.SECTION_ALIASES in cf.sections():
self.aliases = [re.split('[ \t]+', a) for a in cf.options(self.SECTION_ALIASES)]
if self.SECTION_REGEXCLUDES in cf.sections():
self.regExcludes = []
for pair in cf.options(self.SECTION_REGEXCLUDES):
if len(re.split('[ \t]+', pair)) == 2:
self.regExcludes.append(re.split('[ \t]+', pair))
if self.SECTION_REGINCLUDES in cf.sections():
self.regIncludes = []
for pair in cf.options(self.SECTION_REGINCLUDES):
if len(re.split('[ \t]+', pair)) == 2:
self.regIncludes.append(re.split('[ \t]+', pair))
# Add an option to the SECTION_GENRAL group
def addOption(self, parser, opt, value, comment=None):
if comment:
if not comment.startswith(";"):
comment = "; " + comment
parser.set(self.SECTION_GENERAL, comment)
parser.set(self.SECTION_GENERAL, opt, "1" if value else "0")
# Write KiBOM preferences to file
def Write(self, file):
file = os.path.abspath(file)
cf = ConfigParser.RawConfigParser(allow_no_value=True)
cf.optionxform = str
cf.add_section(self.SECTION_GENERAL)
cf.set(self.SECTION_GENERAL, "; General BoM options here")
self.addOption(cf, self.OPT_IGNORE_DNF, self.ignoreDNF, comment="If '{opt}' option is set to 1, rows that are not to be fitted on the PCB will not be written to the BoM file".format(opt=self.OPT_IGNORE_DNF))
self.addOption(cf, self.OPT_USE_ALT, self.useAlt, comment="If '{opt}' option is set to 1, grouped references will be printed in the alternate compressed style eg: R1-R7,R18".format(opt=self.OPT_USE_ALT))
self.addOption(cf, self.OPT_ALT_WRAP, self.altWrap, comment="If '{opt}' option is set to and integer N, the references field will wrap after N entries are printed".format(opt=self.OPT_ALT_WRAP))
self.addOption(cf, self.OPT_NUMBER_ROWS, self.numberRows, comment="If '{opt}' option is set to 1, each row in the BoM will be prepended with an incrementing row number".format(opt=self.OPT_NUMBER_ROWS))
self.addOption(cf, self.OPT_GROUP_CONN, self.groupConnectors, comment="If '{opt}' option is set to 1, connectors with the same footprints will be grouped together, independent of the name of the connector".format(opt=self.OPT_GROUP_CONN))
self.addOption(cf, self.OPT_USE_REGEX, self.useRegex, comment="If '{opt}' option is set to 1, each component group will be tested against a number of regular-expressions (specified, per column, below). If any matches are found, the row is ignored in the output file".format(opt=self.OPT_USE_REGEX))
self.addOption(cf, self.OPT_MERGE_BLANK, self.mergeBlankFields, comment="If '{opt}' option is set to 1, component groups with blank fields will be merged into the most compatible group, where possible".format(opt=self.OPT_MERGE_BLANK))
cf.set(self.SECTION_GENERAL, "; Specify output file name format, %O is the defined output name, %v is the version, %V is the variant name which will be ammended according to 'variant_file_name_format'.")
cf.set(self.SECTION_GENERAL, self.OPT_OUTPUT_FILE_NAME, self.outputFileName)
cf.set(self.SECTION_GENERAL, "; Specify the variant file name format, this is a unique field as the variant is not always used/specified. When it is unused you will want to strip all of this.")
cf.set(self.SECTION_GENERAL, self.OPT_VARIANT_FILE_NAME_FORMAT, self.variantFileNameFormat)
cf.set(self.SECTION_GENERAL, '; Field name used to determine if a particular part is to be fitted')
cf.set(self.SECTION_GENERAL, self.OPT_CONFIG_FIELD, self.configField)
cf.set(self.SECTION_GENERAL, '; Make a backup of the bom before generating the new one, using the following template')
cf.set(self.SECTION_GENERAL, self.OPT_BACKUP, self.backup)
cf.set(self.SECTION_GENERAL, '; Default number of boards to produce if none given on CLI with -n')
cf.set(self.SECTION_GENERAL, self.OPT_DEFAULT_BOARDS, self.boards)
cf.set(self.SECTION_GENERAL, '; Default PCB variant if none given on CLI with -r')
cf.set(self.SECTION_GENERAL, self.OPT_DEFAULT_PCBCONFIG, self.pcbConfig)
cf.set(self.SECTION_GENERAL, '; Whether to hide headers from output file')
cf.set(self.SECTION_GENERAL, self.OPT_HIDE_HEADERS, self.hideHeaders)
cf.set(self.SECTION_GENERAL, '; Whether to hide PCB info from output file')
cf.set(self.SECTION_GENERAL, self.OPT_HIDE_PCB_INFO, self.hidePcbInfo)
cf.add_section(self.SECTION_IGNORE)
cf.set(self.SECTION_IGNORE, "; Any column heading that appears here will be excluded from the Generated BoM")
cf.set(self.SECTION_IGNORE, "; Titles are case-insensitive")
for i in self.ignore:
cf.set(self.SECTION_IGNORE, i)
cf.add_section(self.SECTION_COLUMN_ORDER)
cf.set(self.SECTION_COLUMN_ORDER, "; Columns will apear in the order they are listed here")
cf.set(self.SECTION_COLUMN_ORDER, "; Titles are case-insensitive")
for i in self.corder:
cf.set(self.SECTION_COLUMN_ORDER, i)
# Write the component grouping fields
cf.add_section(self.SECTION_GROUPING_FIELDS)
cf.set(self.SECTION_GROUPING_FIELDS, '; List of fields used for sorting individual components into groups')
cf.set(self.SECTION_GROUPING_FIELDS, '; Components which match (comparing *all* fields) will be grouped together')
cf.set(self.SECTION_GROUPING_FIELDS, '; Field names are case-insensitive')
for i in self.groups:
cf.set(self.SECTION_GROUPING_FIELDS, i)
cf.add_section(self.SECTION_ALIASES)
cf.set(self.SECTION_ALIASES, "; A series of values which are considered to be equivalent for the part name")
cf.set(self.SECTION_ALIASES, "; Each line represents a list of equivalent component name values separated by white space")
cf.set(self.SECTION_ALIASES, "; e.g. 'c c_small cap' will ensure the equivalent capacitor symbols can be grouped together")
cf.set(self.SECTION_ALIASES, '; Aliases are case-insensitive')
for a in self.aliases:
cf.set(self.SECTION_ALIASES, "\t".join(a))
cf.add_section(self.SECTION_REGINCLUDES)
cf.set(self.SECTION_REGINCLUDES, '; A series of regular expressions used to include parts in the BoM')
cf.set(self.SECTION_REGINCLUDES, '; If there are any regex defined here, only components that match against ANY of them will be included in the BOM')
cf.set(self.SECTION_REGINCLUDES, '; Column names are case-insensitive')
cf.set(self.SECTION_REGINCLUDES, '; Format is: "[ColumName] [Regex]" (white-space separated)')
for i in self.regIncludes:
if not len(i) == 2:
continue
cf.set(self.SECTION_REGINCLUDES, i[0] + "\t" + i[1])
cf.add_section(self.SECTION_REGEXCLUDES)
cf.set(self.SECTION_REGEXCLUDES, '; A series of regular expressions used to exclude parts from the BoM')
cf.set(self.SECTION_REGEXCLUDES, '; If a component matches ANY of these, it will be excluded from the BoM')
cf.set(self.SECTION_REGEXCLUDES, '; Column names are case-insensitive')
cf.set(self.SECTION_REGEXCLUDES, '; Format is: "[ColumName] [Regex]" (white-space separated)')
for i in self.regExcludes:
if not len(i) == 2:
continue
cf.set(self.SECTION_REGEXCLUDES, i[0] + "\t" + i[1])
with open(file, 'wb') as configfile:
cf.write(configfile)
|
py | 1a473434ea6dd616318ce76c3d7c80235544abbb | import numpy as np
from collections import OrderedDict
from matplotlib import pyplot as plt
class GesturesVisualizer():
def __init__(self, gestures, deviceWidth=360, deviceHeight=640):
self.gestures = gestures
self.width = deviceWidth
self.height = deviceHeight
def plot_gestures(self):
fig = plt.figure(figsize=(3.75, 2.5 * (self.height / self.width)))
ax = fig.add_axes([0.15, 0.05, 0.55, 0.85])
labels = OrderedDict()
for i, _ind in enumerate(self.gestures.index):
labels["gesture_" + str(i)] = np.random.rand(1, 3)
x_data = []
y_data = []
if(len(self.gestures.iloc[i]["data"]) == 0):
continue
x_data.append(self.gestures.iloc[i]["data"][0]["x0"])
y_data.append(self.gestures.iloc[i]["data"][0]["y0"])
if(self.gestures.iloc[i]["type"] == "swipe"):
for d in self.gestures.iloc[i]["data"]:
x_data.append(d["moveX"])
y_data.append(d["moveY"])
keys = list(labels.keys())
if(self.gestures.iloc[i]["type"] == "tap"):
plt.scatter(x_data, y_data, label=keys[i], color = labels[keys[i]][0])
else:
plt.plot(x_data, y_data, label=keys[i], color = labels[keys[i]][0])
handles, labels = plt.gca().get_legend_handles_labels()
by_label = OrderedDict(zip(labels, handles))
plt.xlim(0, self.width)
plt.ylim(0, self.height)
plt.xlabel('X - Dimension')
plt.ylabel('Y - Dimension')
plt.gca().invert_yaxis()
plt.legend(by_label.values(), by_label.keys(), bbox_to_anchor=(1.01, 0.5), loc="center left")
ax.xaxis.tick_top()
ax.xaxis.set_label_position('top')
plt.show() |
py | 1a4734a2353edf5059be0588d032c74d23742d7e | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GLES2 import _types as _cs
# End users want this...
from OpenGL.raw.GLES2._types import *
from OpenGL.raw.GLES2 import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GLES2_QCOM_texture_foveated_subsampled_layout'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GLES2,'GLES2_QCOM_texture_foveated_subsampled_layout',error_checker=_errors._error_checker)
GL_FOVEATION_SUBSAMPLED_LAYOUT_METHOD_BIT_QCOM=_C('GL_FOVEATION_SUBSAMPLED_LAYOUT_METHOD_BIT_QCOM',0x00000004)
GL_MAX_SHADER_SUBSAMPLED_IMAGE_UNITS_QCOM=_C('GL_MAX_SHADER_SUBSAMPLED_IMAGE_UNITS_QCOM',0x8FA1)
|
py | 1a47356f2116661592af87303463334fc83a403e | # Copyright 2021 The SODA Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from delfin import db
from delfin.api import api_utils
from delfin.api.common import wsgi
from delfin.api.views import storage_host_initiators as \
storage_host_initiator_view
class StorageHostInitiatorController(wsgi.Controller):
def __init__(self):
super(StorageHostInitiatorController, self).__init__()
self.search_options = ['name', 'status', 'wwn', 'id', 'storage_id',
'native_storage_host_id',
'native_storage_host_initiator_id']
def _get_storage_host_initiator_search_options(self):
"""Return storage host initiator search options allowed ."""
return self.search_options
def show(self, req, id):
ctxt = req.environ['delfin.context']
query_params = {"storage_id": id}
query_params.update(req.GET)
# Update options other than filters
sort_keys, sort_dirs = api_utils.get_sort_params(query_params)
marker, limit, offset = api_utils.get_pagination_params(query_params)
# Strip out options except supported search options
api_utils.remove_invalid_options(
ctxt, query_params,
self._get_storage_host_initiator_search_options())
storage_host_initiators = db.storage_host_initiators_get_all(
ctxt, marker, limit, sort_keys, sort_dirs, query_params, offset)
return storage_host_initiator_view.build_storage_host_initiators(
storage_host_initiators)
def create_resource():
return wsgi.Resource(StorageHostInitiatorController())
|
py | 1a4735afad465e31e383e7b23521f6b17a250989 | import soundfile as sf
import math
from uuid import uuid4
from typing import List
from .exceptions import ShellError
from pathlib import Path
def fftsanitise(fftsettings) -> List[int]:
return [
int(fftsettings[0]),
int(fftsettings[1]),
int(fftsettings[2])
]
def get_buffer(audio_file_path: str, output: str = "list"):
"""Returns an audio files fp32 values as a numpy array"""
data, _ = sf.read(audio_file_path)
data = data.transpose()
if output == "list":
return data.tolist()
if output == "numpy":
return data
def odd_snap(number: int) -> int:
"""snaps a number to the next odd number"""
if (number % 2) == 0:
return number + 1
else:
return number
def fftformat(fftsettings: List[int]) -> int:
"""Handles the FFT size so you can pass maxfftsize"""
fftsize = fftsettings[2]
if fftsize == -1:
fftsize = fftsettings[0]
return math.floor(2 ** math.ceil(math.log(fftsize)/math.log(2)))
def handle_ret(retval: int):
"""Handle return value and raise exceptions if necessary"""
if retval != 0:
raise ShellError(retval)
def make_temp() -> str:
"""Create temporary files in local hidden directory"""
tempfiles = Path.home() / ".python-flucoma"
if not tempfiles.exists():
tempfiles.mkdir()
uuid = str(uuid4().hex)
full_path = tempfiles / f"{uuid}.wav"
return str(full_path)
def cleanup():
tempfiles = Path.home() / ".python-flucoma"
if tempfiles.exists():
for x in tempfiles.iterdir():
x.unlink()
|
py | 1a47362d2e2bff1150677debe4e10ee19a5e2f3f | from typing import Optional, List, Literal, Union, Any
from enum import Enum
class Scalars:
ID = Union[str]
String = Union[str]
Boolean = Union[bool]
Int = Union[int]
Float = Union[float]
class User:
__typename: Optional[Literal["User"]]
id: Scalars.Int
name: Scalars.String
email: Scalars.String
__GQL_CODEGEN_User__ = User
class Query:
__typename: Optional[Literal["Query"]]
allUsers: List[Optional["__GQL_CODEGEN_User__"]]
userById: Optional["__GQL_CODEGEN_User__"]
"""
Generates a new answer for th
guessing game
"""
answer: List[Scalars.Int]
testArr1: Optional[List[Optional[Scalars.String]]]
testArr2: List[Optional[Scalars.String]]
testArr3: List[Scalars.String]
__GQL_CODEGEN_Query__ = Query
class QueryUserByIdArgs:
id: Scalars.Int
__GQL_CODEGEN_QueryUserByIdArgs__ = QueryUserByIdArgs
|
py | 1a4737ea4b3871714ad67f91ab5c7e2781de9ccd | # coding=utf-8
from requests import HTTPError
from ..base import BitbucketCloudBase
class DefaultReviewers(BitbucketCloudBase):
def __init__(self, url, *args, **kwargs):
super(DefaultReviewers, self).__init__(url, *args, **kwargs)
def _get_object(self, data):
if "errors" in data:
return
return DefaultReviewer(data, **self._new_session_args)
def add(self, user):
"""
Adds the specified user to the repository"s list of default reviewers.
This method is idempotent. Adding a user a second time has no effect.
:param user: string: The user to add
:return: The added DefaultReviewer object
"""
# the mention_id parameter is undocumented but if missed, leads to 400 statuses
return self._get_object(self.put(user, data={"mention_id": user}))
def each(self, q=None, sort=None):
"""
Returns the repository"s default reviewers.
These are the users that are automatically added as reviewers on every new pull request
that is created.
:param q: string: Query string to narrow down the response.
See https://developer.atlassian.com/bitbucket/api/2/reference/meta/filtering for details.
:param sort: string: Name of a response property to sort results.
See https://developer.atlassian.com/bitbucket/api/2/reference/meta/filtering for details.
:return: A generator for the DefaultReviewer objects
"""
params = {}
if sort is not None:
params["sort"] = sort
if q is not None:
params["q"] = q
for default_reviewer in self._get_paged(None, params=params):
yield self._get_object(default_reviewer)
return
def get(self, user):
"""
Returns the default reviewer in this repository.
:param user: string: The requested user name
:return: The requested DefaultReviewer object, None if not a default reviewer
"""
default_reviewer = None
try:
default_reviewer = self._get_object(super(DefaultReviewers, self).get(user))
except HTTPError as e:
# A 404 indicates that that specified user is not a default reviewer.
if not e.response.status_code == 404:
# Rethrow the exception
raise
return default_reviewer
class DefaultReviewer(BitbucketCloudBase):
def __init__(self, data, *args, **kwargs):
super(DefaultReviewer, self).__init__(None, *args, data=data, expected_type="user", **kwargs)
@property
def display_name(self):
return str(self.get_data("display_name"))
@property
def nickname(self):
return self.get_data("nickname")
@property
def account_id(self):
return self.get_data("account_id")
@property
def uuid(self):
return self.get_data("uuid")
def delete(self):
"""
Deletes the default reviewer
"""
return super(DefaultReviewer, self).delete(self.url, absolute=True)
|
py | 1a4738fc766047d3b18c131d0961f81c6212a3ae | import logging
import os.path as osp
import pathlib
from typing import Optional, Sequence, Type, Union
import gym
import torch as th
from sacred.observers import FileStorageObserver
from stable_baselines3.common import vec_env
from torch.utils import data as th_data
from imitation.algorithms import bc
from imitation.data import rollout, types
from imitation.scripts.config.train_bc import train_bc_ex
from imitation.util import logger
from imitation.util import sacred as sacred_util
@train_bc_ex.main
def train_bc(
_run,
expert_data_src: Union[types.AnyPath, Sequence[types.Trajectory]],
expert_data_src_format: str,
n_expert_demos: Optional[int],
observation_space: gym.Space,
action_space: gym.Space,
batch_size: int,
# TODO(shwang): Doesn't currently accept Iterable[Mapping] or
# types.TransitionsMinimal, unlike BC.__init__ or BC.set_expert_data_loader().
n_epochs: Optional[int],
n_batches: Optional[int],
l2_weight: float,
optimizer_cls: Type[th.optim.Optimizer],
optimizer_kwargs: dict,
log_dir: types.AnyPath,
venv: Optional[vec_env.VecEnv],
log_interval: int,
log_rollouts_n_episodes: int,
n_episodes_eval: int,
) -> dict:
"""Sacred interface to Behavioral Cloning.
Args:
expert_data_src: Either a path to pickled `Sequence[Trajectory]` or
`Sequence[Trajectory]`.
expert_data_src_format: Either "path" if `expert_data_src` is a path, or
"trajectory" if `expert_data_src` if `Sequence[Trajectory]`.
n_expert_demos: If not None, then a positive number used to truncate the number
expert demonstrations used from `expert_data_src`. If this number is larger
than the total number of demonstrations available, then a ValueError is
raised.
observation_space: The observation space corresponding to the expert data.
action_space: The action space corresponding to the expert data.
batch_size: Number of observation-action samples used in each BC update.
n_epochs: The total number of training epochs. Set exactly one of n_epochs and
n_batches.
n_batches: The total number of training batches. Set exactly one of n_epochs and
n_batches.
l2_weight: L2 regularization weight.
optimizer_cls: The Torch optimizer class used for BC updates.
optimizer_kwargs: keyword arguments, excluding learning rate and
weight decay, for optimiser construction.
log_dir: Log output directory. Final policy is also saved in this directory as
"{log_dir}/final.pkl"
venv: If not None, then this VecEnv is used to generate rollout episodes for
evaluating policy performance during and after training.
log_interval: The number of updates in between logging various training
statistics to stdout and Tensorboard.
log_rollouts_n_episodes: The number of rollout episodes generated for
training statistics every `log_interval` updates. If `venv` is None or
this argument is nonpositive, then no rollouts are generated.
n_episodes_eval: The number of final evaluation rollout episodes, if `venv` is
provided. These rollouts are used to generate final statistics saved into
Sacred results, which can be compiled into a table by
`imitation.scripts.analyze.analyze_imitation`.
"""
if action_space is None:
raise ValueError("action_space cannot be None")
if observation_space is None:
raise ValueError("observation_space cannot be None")
log_dir = pathlib.Path(log_dir)
log_dir.mkdir(parents=True, exist_ok=True)
logging.info("Logging to %s", log_dir)
custom_logger = logger.configure(log_dir, ["tensorboard", "stdout"])
sacred_util.build_sacred_symlink(log_dir, _run)
if expert_data_src_format == "path":
expert_trajs = types.load(expert_data_src)
elif expert_data_src_format == "trajectory":
# Convenience option for launching experiment from Python script with
# in-memory trajectories.
expert_trajs = expert_data_src
else:
raise ValueError(f"Invalid expert_data_src_format={expert_data_src_format}")
# TODO(shwang): Copied from scripts/train_adversarial -- refactor with "auto",
# or combine all train_*.py into a single script?
if n_expert_demos is not None:
if not len(expert_trajs) >= n_expert_demos:
raise ValueError(
f"Want to use n_expert_demos={n_expert_demos} trajectories, but only "
f"{len(expert_trajs)} are available."
)
expert_trajs = expert_trajs[:n_expert_demos]
expert_data_trans = rollout.flatten_trajectories(expert_trajs)
expert_data = th_data.DataLoader(
expert_data_trans,
batch_size=batch_size,
shuffle=True,
collate_fn=types.transitions_collate_fn,
)
model = bc.BC(
observation_space,
action_space,
expert_data=expert_data,
l2_weight=l2_weight,
optimizer_cls=optimizer_cls,
optimizer_kwargs=optimizer_kwargs,
custom_logger=custom_logger,
)
model.train(
n_epochs=n_epochs,
n_batches=n_batches,
log_interval=log_interval,
log_rollouts_venv=venv,
log_rollouts_n_episodes=log_rollouts_n_episodes,
)
model.save_policy(policy_path=pathlib.Path(log_dir, "final.th"))
print(f"Visualize results with: tensorboard --logdir '{log_dir}'")
# TODO(shwang): Use auto env, auto stats thing with shared `env` and stats
# ingredient, or something like that.
sample_until = rollout.make_sample_until(
min_timesteps=None, min_episodes=n_episodes_eval
)
trajs = rollout.generate_trajectories(
model.policy,
venv,
sample_until=sample_until,
)
results = {}
results["expert_stats"] = rollout.rollout_stats(expert_trajs)
results["imit_stats"] = rollout.rollout_stats(trajs)
return results
def main_console():
observer = FileStorageObserver(osp.join("output", "sacred", "train_bc"))
train_bc_ex.observers.append(observer)
train_bc_ex.run_commandline()
if __name__ == "__main__":
main_console()
|
py | 1a4739154a420371c6546e7aa1c769581d6da058 | import pytest
from sklearn.utils.estimator_checks import check_estimator
from skltemplate import TemplateEstimator
from skltemplate import TemplateClassifier
from skltemplate import TemplateTransformer
@pytest.mark.parametrize(
"Estimator", [TemplateEstimator, TemplateTransformer, TemplateClassifier]
)
def test_all_estimators(Estimator):
return check_estimator(Estimator)
|
py | 1a473997540ea1ef9f05ac51b8004d0b03c8d094 | """Named tuples and enumerated types.
Defines enums and other schemas for `vectorbt.portfolio`."""
import numpy as np
from vectorbt import _typing as tp
from vectorbt.utils.docs import to_doc
__all__ = [
'RejectedOrderError',
'InitCashMode',
'CallSeqType',
'ConflictMode',
'SizeType',
'Direction',
'OrderStatus',
'OrderSide',
'StatusInfo',
'TradeDirection',
'TradeStatus',
'TradeType',
'ProcessOrderState',
'ExecuteOrderState',
'SimulationContext',
'GroupContext',
'RowContext',
'SegmentContext',
'OrderContext',
'PostOrderContext',
'Order',
'NoOrder',
'OrderResult',
'order_dt',
'trade_dt',
'position_dt',
'log_dt'
]
__pdoc__ = {}
# ############# Errors ############# #
class RejectedOrderError(Exception):
"""Rejected order error."""
pass
# ############# Enums ############# #
class InitCashModeT(tp.NamedTuple):
Auto: int
AutoAlign: int
InitCashMode = InitCashModeT(*range(2))
"""_"""
__pdoc__['InitCashMode'] = f"""Initial cash mode.
```json
{to_doc(InitCashMode)}
```
Attributes:
Auto: Initial cash is infinite within simulation, and then set to the total cash spent.
AutoAlign: Initial cash is set to the total cash spent across all columns.
"""
class CallSeqTypeT(tp.NamedTuple):
Default: int
Reversed: int
Random: int
Auto: int
CallSeqType = CallSeqTypeT(*range(4))
"""_"""
__pdoc__['CallSeqType'] = f"""Call sequence type.
```json
{to_doc(CallSeqType)}
```
Attributes:
Default: Place calls from left to right.
Reversed: Place calls from right to left.
Random: Place calls randomly.
Auto: Place calls dynamically based on order value.
"""
class ConflictModeT(tp.NamedTuple):
Ignore: int
Entry: int
Exit: int
Opposite: int
ConflictMode = ConflictModeT(*range(4))
"""_"""
__pdoc__['ConflictMode'] = f"""Conflict mode.
```json
{to_doc(ConflictMode)}
```
What should happen if both entry and exit signals occur simultaneously?
Attributes:
Ignore: Ignore both signals.
Entry: Execute entry signal.
Exit: Execute exit signal.
Opposite: Execute opposite signal. Takes effect only when in position.
"""
class SizeTypeT(tp.NamedTuple):
Amount: int
Value: int
Percent: int
TargetAmount: int
TargetValue: int
TargetPercent: int
SizeType = SizeTypeT(*range(6))
"""_"""
__pdoc__['SizeType'] = f"""Size type.
```json
{to_doc(SizeType)}
```
Attributes:
Amount: Amount of assets to trade.
Value: Asset value to trade.
Gets converted into `SizeType.Amount` using `OrderContext.val_price_now`.
Percent: Percentage of available resources to use in either direction (not to be confused with
the percentage of position value!)
* When buying, it's the percentage of `OrderContext.cash_now`.
* When selling, it's the percentage of `OrderContext.position_now`.
* When short selling, it's the percentage of `OrderContext.free_cash_now`.
* When selling and short selling (i.e. reversing position), it's the percentage of
`OrderContext.position_now` and `OrderContext.free_cash_now`.
!!! note
Takes into account fees and slippage to find the limit.
In reality, slippage and fees are not known beforehand.
TargetAmount: Target amount of assets to hold (= target position).
Uses `OrderContext.position_now` to get the current position.
Gets converted into `SizeType.Amount`.
TargetValue: Target asset value.
Uses `OrderContext.val_price_now` to get the current asset value.
Gets converted into `SizeType.TargetAmount`.
TargetPercent: Target percentage of total value.
Uses `OrderContext.value_now` to get the current total value.
Gets converted into `SizeType.TargetValue`.
"""
class DirectionT(tp.NamedTuple):
LongOnly: int
ShortOnly: int
All: int
Direction = DirectionT(*range(3))
"""_"""
__pdoc__['Direction'] = f"""Position direction.
```json
{to_doc(Direction)}
```
Attributes:
LongOnly: Only long positions.
ShortOnly: Only short positions.
All: Both long and short positions.
"""
class OrderStatusT(tp.NamedTuple):
Filled: int
Ignored: int
Rejected: int
OrderStatus = OrderStatusT(*range(3))
"""_"""
__pdoc__['OrderStatus'] = f"""Order status.
```json
{to_doc(OrderStatus)}
```
Attributes:
Filled: Order has been filled.
Ignored: Order has been ignored.
Rejected: Order has been rejected.
"""
class OrderSideT(tp.NamedTuple):
Buy: int
Sell: int
OrderSide = OrderSideT(*range(2))
"""_"""
__pdoc__['OrderSide'] = f"""Order side.
```json
{to_doc(OrderSide)}
```
"""
class StatusInfoT(tp.NamedTuple):
SizeNaN: int
PriceNaN: int
ValPriceNaN: int
ValueNaN: int
ValueZeroNeg: int
SizeZero: int
NoCashShort: int
NoCashLong: int
NoOpenPosition: int
MaxSizeExceeded: int
RandomEvent: int
CantCoverFees: int
MinSizeNotReached: int
PartialFill: int
StatusInfo = StatusInfoT(*range(14))
"""_"""
__pdoc__['StatusInfo'] = f"""Order status information.
```json
{to_doc(StatusInfo)}
```
"""
status_info_desc = [
"Size is NaN",
"Price is NaN",
"Asset valuation price is NaN",
"Asset/group value is NaN",
"Asset/group value is zero or negative",
"Size is zero",
"Not enough cash to short",
"Not enough cash to long",
"No open position to reduce/close",
"Size is greater than maximum allowed",
"Random event happened",
"Not enough cash to cover fees",
"Final size is less than minimum allowed",
"Final size is less than requested"
]
"""_"""
__pdoc__['status_info_desc'] = f"""Order status description.
```json
{to_doc(status_info_desc)}
```
"""
class TradeDirectionT(tp.NamedTuple):
Long: int
Short: int
TradeDirection = TradeDirectionT(*range(2))
"""_"""
__pdoc__['TradeDirection'] = f"""Event direction.
```json
{to_doc(TradeDirection)}
```
"""
class TradeStatusT(tp.NamedTuple):
Open: int
Closed: int
TradeStatus = TradeStatusT(*range(2))
"""_"""
__pdoc__['TradeStatus'] = f"""Event status.
```json
{to_doc(TradeStatus)}
```
"""
class TradeTypeT(tp.NamedTuple):
Trade: int
Position: int
TradeType = TradeTypeT(*range(2))
"""_"""
__pdoc__['TradeType'] = f"""Trade type.
```json
{to_doc(TradeType)}
```
"""
# ############# Named tuples ############# #
class ProcessOrderState(tp.NamedTuple):
cash: float
position: float
debt: float
free_cash: float
val_price: float
value: float
oidx: int
lidx: int
__pdoc__['ProcessOrderState'] = "State before or after order processing."
__pdoc__['ProcessOrderState.cash'] = "Cash in the current column or group with cash sharing."
__pdoc__['ProcessOrderState.position'] = "Position in the current column."
__pdoc__['ProcessOrderState.debt'] = "Debt from shorting in the current column."
__pdoc__['ProcessOrderState.free_cash'] = "Free cash in the current column or group with cash sharing."
__pdoc__['ProcessOrderState.val_price'] = "Valuation price in the current column."
__pdoc__['ProcessOrderState.value'] = "Value in the current column or group with cash sharing."
__pdoc__['ProcessOrderState.oidx'] = "Index of order record."
__pdoc__['ProcessOrderState.lidx'] = "Index of log record."
class ExecuteOrderState(tp.NamedTuple):
cash: float
position: float
debt: float
free_cash: float
__pdoc__['ExecuteOrderState'] = "State after order execution."
__pdoc__['ExecuteOrderState.cash'] = "See `ProcessOrderState.cash`."
__pdoc__['ExecuteOrderState.position'] = "See `ProcessOrderState.position`."
__pdoc__['ExecuteOrderState.debt'] = "See `ProcessOrderState.debt`."
__pdoc__['ExecuteOrderState.free_cash'] = "See `ProcessOrderState.free_cash`."
class SimulationContext(tp.NamedTuple):
target_shape: tp.Shape
close: tp.Array2d
group_lens: tp.Array1d
init_cash: tp.Array1d
cash_sharing: bool
call_seq: tp.Array2d
segment_mask: tp.Array2d
ffill_val_price: bool
update_value: bool
order_records: tp.RecordArray
log_records: tp.RecordArray
last_cash: tp.Array1d
last_position: tp.Array1d
last_debt: tp.Array1d
last_free_cash: tp.Array1d
last_val_price: tp.Array1d
last_value: tp.Array1d
second_last_value: tp.Array1d
last_return: tp.Array1d
last_oidx: tp.Array1d
last_lidx: tp.Array1d
last_pos_record: tp.RecordArray
__pdoc__['SimulationContext'] = """A named tuple representing the context of a simulation.
Contains general information available to all other contexts.
Passed to `pre_sim_func_nb` and `post_sim_func_nb`."""
__pdoc__['SimulationContext.target_shape'] = """Target shape of the simulation.
A tuple with exactly two elements: the number of rows and columns.
## Example
One day of minute data for three assets would yield a `target_shape` of `(1440, 3)`,
where the first axis are rows (minutes) and the second axis are columns (assets).
"""
__pdoc__['SimulationContext.close'] = """Last asset price at each time step.
Has shape `SimulationContext.target_shape`.
"""
__pdoc__['SimulationContext.group_lens'] = """Number of columns per each group.
Even if columns are not grouped, `group_lens` contains ones - one column per group.
## Example
In pairs trading, `group_lens` would be `np.array([2])`, while three independent
columns would require `group_lens` of `np.array([1, 1, 1])`.
"""
__pdoc__['SimulationContext.init_cash'] = """Initial capital per column or group with cash sharing.
If `SimulationContext.cash_sharing`, has shape `(group_lens.shape[0],)`,
otherwise has shape `(target_shape[1],)`.
## Example
Consider three columns, each having $100 of starting capital. If we built one group of two columns
with cash sharing and one (imaginary) group with the last column, the `init_cash` would be
`np.array([200, 100])`. Without cash sharing, the `init_cash` would be `np.array([100, 100, 100])`.
"""
__pdoc__['SimulationContext.cash_sharing'] = "Whether cash sharing is enabled."
__pdoc__['SimulationContext.call_seq'] = """Default sequence of calls per segment.
Controls the sequence in which `order_func_nb` is executed within each segment.
Has shape `SimulationContext.target_shape` and each value must exist in the range `[0, group_len)`.
!!! note
To change the call sequence dynamically, better change `SegmentContext.call_seq_now` in-place.
## Example
The default call sequence for three data points and two groups with three columns each:
```python
np.array([
[0, 1, 2, 0, 1, 2],
[0, 1, 2, 0, 1, 2],
[0, 1, 2, 0, 1, 2]
])
```
"""
__pdoc__['SimulationContext.segment_mask'] = """Mask of whether order functions of a particular segment
should be executed.
A segment is simply a sequence of `order_func_nb` calls under the same group and row.
The segment pre- and postprocessing functions are executed regardless of the mask.
You can change this mask in-place to dynamically disable future segments.
Has shape `(target_shape[0], group_lens.shape[0])`.
## Example
Consider two groups with two columns each and the following activity mask:
```python
np.array([[ True, False],
[False, True]])
```
Only the first group is executed in the first row and only the second group is executed
in the second row.
"""
__pdoc__['SimulationContext.ffill_val_price'] = """Whether to track valuation price only if it's known.
Otherwise, unknown `SimulationContext.close` will lead to NaN in valuation price at the next timestamp."""
__pdoc__['SimulationContext.update_value'] = "Whether to update group value after each filled order."
__pdoc__['SimulationContext.order_records'] = """Order records.
It's a 1-dimensional array with records of type `order_dt`.
The array is initialized with empty records first (they contain random data), and then
gradually filled with order data. The number of initialized records depends upon `max_orders`,
but usually it's `target_shape[0] * target_shape[1]`, meaning there is maximal one order record per element.
`max_orders` can be chosen lower if not every `order_func_nb` leads to a filled order, to save memory.
You can use `SimulationContext.last_oidx` to get the index of the last filled order of each column.
## Example
Before filling, each order record looks like this:
```python
np.array([(-8070450532247928832, -8070450532247928832, 4, 0., 0., 0., 5764616306889786413)]
```
After filling, it becomes like this:
```python
np.array([(0, 0, 1, 50., 1., 0., 1)]
```
"""
__pdoc__['SimulationContext.log_records'] = """Log records.
Similar to `SimulationContext.order_records` but of type `log_dt` and index `SimulationContext.last_lidx`."""
__pdoc__['SimulationContext.last_cash'] = """Last cash per column or group with cash sharing.
Has the same shape as `SimulationContext.init_cash`.
Gets updated right after `order_func_nb`.
"""
__pdoc__['SimulationContext.last_position'] = """Last position per column.
Has shape `(target_shape[1],)`.
Gets updated right after `order_func_nb`.
"""
__pdoc__['SimulationContext.last_debt'] = """Last debt from shorting per column.
Debt is the total value from shorting that hasn't been covered yet. Used to update `OrderContext.free_cash_now`.
Has shape `(target_shape[1],)`.
Gets updated right after `order_func_nb`.
"""
__pdoc__['SimulationContext.last_free_cash'] = """Last free cash per column or group with cash sharing.
Free cash never goes above the initial level, because an operation always costs money.
Has shape `(target_shape[1],)`.
Gets updated right after `order_func_nb`.
"""
__pdoc__['SimulationContext.last_val_price'] = """Last valuation price per column.
Has shape `(target_shape[1],)`.
Enables `SizeType.Value`, `SizeType.TargetValue`, and `SizeType.TargetPercent`.
Gets multiplied by the current position to get the value of the column (see `SimulationContext.last_value`).
Defaults to the `SimulationContext.close` before `post_segment_func_nb`.
If `SimulationContext.ffill_val_price`, gets updated only if `SimulationContext.close` is not NaN.
For example, close of `[1, 2, np.nan, np.nan, 5]` yields valuation price of `[1, 2, 2, 2, 5]`.
Also gets updated right after `pre_segment_func_nb` - you can use `pre_segment_func_nb` to
override `last_val_price` in-place, such that `order_func_nb` can use the new group value.
You are not allowed to use `-np.inf` or `np.inf` - only finite values.
If `SimulationContext.update_value`, gets also updated right after `order_func_nb` using
filled order price as the latest known price.
!!! note
Since the previous `SimulationContext.close` is NaN in the first row, the first `last_val_price` is also NaN.
Overriding `last_val_price` with NaN won't apply `SimulationContext.ffill_val_price`,
so your entire group will become NaN.
## Example
Consider 10 units in column 1 and 20 units in column 2. The previous close of them is
$40 and $50 respectively, which is also the default valuation price in the current row,
available as `last_val_price` in `pre_segment_func_nb`. If both columns are in the same group
with cash sharing, the group is valued at $1400 before any `order_func_nb` is called, and can
be later accessed via `OrderContext.value_now`.
"""
__pdoc__['SimulationContext.last_value'] = """Last value per column or group with cash sharing.
Has the same shape as `SimulationContext.init_cash`.
Calculated by multiplying valuation price by the current position.
The value of each column in a group with cash sharing is summed to get the value of the entire group.
Gets updated using `SimulationContext.last_val_price` after `pre_segment_func_nb` and
before `post_segment_func_nb`. If `SimulationContext.update_value`, gets also updated right after
`order_func_nb` using filled order price as the latest known price (the difference will be minimal,
only affected by costs).
"""
__pdoc__['SimulationContext.second_last_value'] = """Second-last value per column or group with cash sharing.
Has the same shape as `SimulationContext.last_value`.
Contains the latest known value two rows before (`i - 2`) to be compared either with the latest known value
one row before (`i - 1`) or now (`i`).
Gets updated at the end of each segment/row.
"""
__pdoc__['SimulationContext.last_return'] = """Last return per column or group with cash sharing.
Has the same shape as `SimulationContext.last_value`.
Calculated by comparing `SimulationContext.last_value` to `SimulationContext.second_last_value`.
Gets updated each time `SimulationContext.last_value` is updated.
"""
__pdoc__['SimulationContext.last_oidx'] = """Index of the last order record of each column.
Points to `SimulationContext.order_records` and has shape `(target_shape[1],)`.
## Example
`last_oidx` of `np.array([1, 100, -1])` means the last filled order is `order_records[1]` for the
first column, `order_records[100]` for the second column, and no orders have been filled yet
for the third column.
"""
__pdoc__['SimulationContext.last_lidx'] = """Index of the last log record of each column.
Similar to `SimulationContext.last_oidx` but for log records.
"""
__pdoc__['SimulationContext.last_pos_record'] = """Last position record of each column.
It's a 1-dimensional array with records of type `position_dt`.
Has shape `(target_shape[1],)`.
The array is initialized with empty records first (they contain random data)
and the field `id` is set to -1. Once the first position is entered in a column,
the `id` becomes 0 and the record materializes. Once the position is closed, the record
fixes its identifier and other data until the next position is entered.
The fields `entry_price` and `exit_price` are average entry and exit price respectively.
The fields `pnl` and `return` contain statistics as if the position has been closed and are
re-calculated using `SimulationContext.last_val_price` after `pre_segment_func_nb`
(in case `SimulationContext.last_val_price` has been overridden) and before `post_segment_func_nb`.
!!! note
In an open position record, the field `exit_price` doesn't reflect the latest valuation price,
but keeps the average price at which the position has been reduced.
The position record is updated after successfully filling an order (after `order_func_nb` and
before `post_order_func_nb`).
## Example
Consider a simulation that orders `order_size` for `order_price` and $1 fixed fees.
Here's order info from `order_func_nb` and the updated position info from `post_order_func_nb`:
```plaintext
order_size order_price id col size entry_idx entry_price \\
0 NaN 1 -1 0 1.0 13 14.000000
1 0.5 2 0 0 0.5 1 2.000000
2 1.0 3 0 0 1.5 1 2.666667
3 NaN 4 0 0 1.5 1 2.666667
4 -1.0 5 0 0 1.5 1 2.666667
5 -0.5 6 0 0 1.5 1 2.666667
6 NaN 7 0 0 1.5 1 2.666667
7 -0.5 8 1 0 0.5 7 8.000000
8 -1.0 9 1 0 1.5 7 8.666667
9 1.0 10 1 0 1.5 7 8.666667
10 0.5 11 1 0 1.5 7 8.666667
11 1.0 12 2 0 1.0 11 12.000000
12 -2.0 13 3 0 1.0 12 13.000000
13 2.0 14 4 0 1.0 13 14.000000
entry_fees exit_idx exit_price exit_fees pnl return direction status
0 0.5 -1 NaN 0.0 -0.50 -0.035714 0 0
1 1.0 -1 NaN 0.0 -1.00 -1.000000 0 0
2 2.0 -1 NaN 0.0 -1.50 -0.375000 0 0
3 2.0 -1 NaN 0.0 -0.75 -0.187500 0 0
4 2.0 -1 5.000000 1.0 0.50 0.125000 0 0
5 2.0 5 5.333333 2.0 0.00 0.000000 0 1
6 2.0 5 5.333333 2.0 0.00 0.000000 0 1
7 1.0 -1 NaN 0.0 -1.00 -0.250000 1 0
8 2.0 -1 NaN 0.0 -2.50 -0.192308 1 0
9 2.0 -1 10.000000 1.0 -5.00 -0.384615 1 0
10 2.0 10 10.333333 2.0 -6.50 -0.500000 1 1
11 1.0 -1 NaN 0.0 -1.00 -0.083333 0 0
12 0.5 -1 NaN 0.0 -0.50 -0.038462 1 0
13 0.5 -1 NaN 0.0 -0.50 -0.035714 0 0
```
"""
class GroupContext(tp.NamedTuple):
target_shape: tp.Shape
close: tp.Array2d
group_lens: tp.Array1d
init_cash: tp.Array1d
cash_sharing: bool
call_seq: tp.Array2d
segment_mask: tp.Array2d
ffill_val_price: bool
update_value: bool
order_records: tp.RecordArray
log_records: tp.RecordArray
last_cash: tp.Array1d
last_position: tp.Array1d
last_debt: tp.Array1d
last_free_cash: tp.Array1d
last_val_price: tp.Array1d
last_value: tp.Array1d
second_last_value: tp.Array1d
last_return: tp.Array1d
last_oidx: tp.Array1d
last_lidx: tp.Array1d
last_pos_record: tp.RecordArray
group: int
group_len: int
from_col: int
to_col: int
__pdoc__['GroupContext'] = """A named tuple representing the context of a group.
A group is a set of nearby columns that are somehow related (for example, by sharing the same capital).
In each row, the columns under the same group are bound to the same segment.
Contains all fields from `SimulationContext` plus fields describing the current group.
Passed to `pre_group_func_nb` and `post_group_func_nb`.
## Example
Consider a group of three columns, a group of two columns, and one more column:
| group | group_len | from_col | to_col |
| ----- | --------- | -------- | ------ |
| 0 | 3 | 0 | 3 |
| 1 | 2 | 3 | 5 |
| 2 | 1 | 5 | 6 |
"""
for field in GroupContext._fields:
if field in SimulationContext._fields:
__pdoc__['GroupContext.' + field] = f"See `SimulationContext.{field}`."
__pdoc__['GroupContext.group'] = """Index of the current group.
Has range `[0, group_lens.shape[0])`.
"""
__pdoc__['GroupContext.group_len'] = """Number of columns in the current group.
Scalar value. Same as `group_lens[group]`.
"""
__pdoc__['GroupContext.from_col'] = """Index of the first column in the current group.
Has range `[0, target_shape[1])`.
"""
__pdoc__['GroupContext.to_col'] = """Index of the last column in the current group plus one.
Has range `[1, target_shape[1] + 1)`.
If columns are not grouped, equals to `from_col + 1`.
!!! warning
In the last group, `to_col` points at a column that doesn't exist.
"""
class RowContext(tp.NamedTuple):
target_shape: tp.Shape
close: tp.Array2d
group_lens: tp.Array1d
init_cash: tp.Array1d
cash_sharing: bool
call_seq: tp.Array2d
segment_mask: tp.Array2d
ffill_val_price: bool
update_value: bool
order_records: tp.RecordArray
log_records: tp.RecordArray
last_cash: tp.Array1d
last_position: tp.Array1d
last_debt: tp.Array1d
last_free_cash: tp.Array1d
last_val_price: tp.Array1d
last_value: tp.Array1d
second_last_value: tp.Array1d
last_return: tp.Array1d
last_oidx: tp.Array1d
last_lidx: tp.Array1d
last_pos_record: tp.RecordArray
i: int
__pdoc__['RowContext'] = """A named tuple representing the context of a row.
A row is a time step in which segments are executed.
Contains all fields from `SimulationContext` plus fields describing the current row.
Passed to `pre_row_func_nb` and `post_row_func_nb`.
"""
for field in RowContext._fields:
if field in SimulationContext._fields:
__pdoc__['RowContext.' + field] = f"See `SimulationContext.{field}`."
__pdoc__['RowContext.i'] = """Index of the current row.
Has range `[0, target_shape[0])`.
"""
class SegmentContext(tp.NamedTuple):
target_shape: tp.Shape
close: tp.Array2d
group_lens: tp.Array1d
init_cash: tp.Array1d
cash_sharing: bool
call_seq: tp.Array2d
segment_mask: tp.Array2d
ffill_val_price: bool
update_value: bool
order_records: tp.RecordArray
log_records: tp.RecordArray
last_cash: tp.Array1d
last_position: tp.Array1d
last_debt: tp.Array1d
last_free_cash: tp.Array1d
last_val_price: tp.Array1d
last_value: tp.Array1d
second_last_value: tp.Array1d
last_return: tp.Array1d
last_oidx: tp.Array1d
last_lidx: tp.Array1d
last_pos_record: tp.RecordArray
group: int
group_len: int
from_col: int
to_col: int
i: int
call_seq_now: tp.Array1d
__pdoc__['SegmentContext'] = """A named tuple representing the context of a segment.
A segment is an intersection between groups and rows. It's an entity that defines
how and in which order elements within the same group and row are processed.
Contains all fields from `SimulationContext`, `GroupContext`, and `RowContext`, plus fields
describing the current segment.
Passed to `pre_segment_func_nb` and `post_segment_func_nb`.
"""
for field in SegmentContext._fields:
if field in SimulationContext._fields:
__pdoc__['SegmentContext.' + field] = f"See `SimulationContext.{field}`."
elif field in GroupContext._fields:
__pdoc__['SegmentContext.' + field] = f"See `GroupContext.{field}`."
elif field in RowContext._fields:
__pdoc__['SegmentContext.' + field] = f"See `RowContext.{field}`."
__pdoc__['SegmentContext.call_seq_now'] = """Sequence of calls within the current segment.
Has shape `(group_len,)`.
Each value in this sequence should indicate the position of column in the group to
call next. Processing goes always from left to right.
You can use `pre_segment_func_nb` to override `call_seq_now`.
## Example
`[2, 0, 1]` would first call column 2, then 0, and finally 1.
"""
class OrderContext(tp.NamedTuple):
target_shape: tp.Shape
close: tp.Array2d
group_lens: tp.Array1d
init_cash: tp.Array1d
cash_sharing: bool
call_seq: tp.Array2d
segment_mask: tp.Array2d
ffill_val_price: bool
update_value: bool
order_records: tp.RecordArray
log_records: tp.RecordArray
last_cash: tp.Array1d
last_position: tp.Array1d
last_debt: tp.Array1d
last_free_cash: tp.Array1d
last_val_price: tp.Array1d
last_value: tp.Array1d
second_last_value: tp.Array1d
last_return: tp.Array1d
last_oidx: tp.Array1d
last_lidx: tp.Array1d
last_pos_record: tp.RecordArray
group: int
group_len: int
from_col: int
to_col: int
i: int
call_seq_now: tp.Array1d
col: int
call_idx: int
cash_now: float
position_now: float
debt_now: float
free_cash_now: float
val_price_now: float
value_now: float
return_now: float
pos_record_now: tp.Record
__pdoc__['OrderContext'] = """A named tuple representing the context of an order.
Contains all fields from `SegmentContext` plus fields describing the current state.
Passed to `order_func_nb`.
"""
for field in OrderContext._fields:
if field in SimulationContext._fields:
__pdoc__['OrderContext.' + field] = f"See `SimulationContext.{field}`."
elif field in GroupContext._fields:
__pdoc__['OrderContext.' + field] = f"See `GroupContext.{field}`."
elif field in RowContext._fields:
__pdoc__['OrderContext.' + field] = f"See `RowContext.{field}`."
elif field in SegmentContext._fields:
__pdoc__['OrderContext.' + field] = f"See `SegmentContext.{field}`."
__pdoc__['OrderContext.col'] = """Current column.
Has range `[0, target_shape[1])` and is always within `[from_col, to_col)`.
"""
__pdoc__['OrderContext.call_idx'] = """Index of the current call in `SegmentContext.call_seq_now`.
Has range `[0, group_len)`.
"""
__pdoc__['OrderContext.cash_now'] = "`SimulationContext.last_cash` for the current column/group."
__pdoc__['OrderContext.position_now'] = "`SimulationContext.last_position` for the current column."
__pdoc__['OrderContext.debt_now'] = "`SimulationContext.last_debt` for the current column."
__pdoc__['OrderContext.free_cash_now'] = "`SimulationContext.last_free_cash` for the current column/group."
__pdoc__['OrderContext.val_price_now'] = "`SimulationContext.last_val_price` for the current column."
__pdoc__['OrderContext.value_now'] = "`SimulationContext.last_value` for the current column/group."
__pdoc__['OrderContext.return_now'] = "`SimulationContext.last_return` for the current column/group."
__pdoc__['OrderContext.pos_record_now'] = "`SimulationContext.last_pos_record` for the current column."
class PostOrderContext(tp.NamedTuple):
target_shape: tp.Shape
close: tp.Array2d
group_lens: tp.Array1d
init_cash: tp.Array1d
cash_sharing: bool
call_seq: tp.Array2d
segment_mask: tp.Array2d
ffill_val_price: bool
update_value: bool
order_records: tp.RecordArray
log_records: tp.RecordArray
last_cash: tp.Array1d
last_position: tp.Array1d
last_debt: tp.Array1d
last_free_cash: tp.Array1d
last_val_price: tp.Array1d
last_value: tp.Array1d
second_last_value: tp.Array1d
last_return: tp.Array1d
last_oidx: tp.Array1d
last_lidx: tp.Array1d
last_pos_record: tp.RecordArray
group: int
group_len: int
from_col: int
to_col: int
i: int
call_seq_now: tp.Array1d
col: int
call_idx: int
cash_before: float
position_before: float
debt_before: float
free_cash_before: float
val_price_before: float
value_before: float
order_result: "OrderResult"
cash_now: float
position_now: float
debt_now: float
free_cash_now: float
val_price_now: float
value_now: float
return_now: float
pos_record_now: tp.Record
__pdoc__['PostOrderContext'] = """A named tuple representing the context after an order has been processed.
Contains all fields from `OrderContext` plus fields describing the order result and the previous state.
Passed to `post_order_func_nb`.
"""
for field in PostOrderContext._fields:
if field in SimulationContext._fields:
__pdoc__['PostOrderContext.' + field] = f"See `SimulationContext.{field}`."
elif field in GroupContext._fields:
__pdoc__['PostOrderContext.' + field] = f"See `GroupContext.{field}`."
elif field in RowContext._fields:
__pdoc__['PostOrderContext.' + field] = f"See `RowContext.{field}`."
elif field in SegmentContext._fields:
__pdoc__['PostOrderContext.' + field] = f"See `SegmentContext.{field}`."
elif field in OrderContext._fields:
__pdoc__['PostOrderContext.' + field] = f"See `OrderContext.{field}`."
__pdoc__['PostOrderContext.cash_before'] = "`OrderContext.cash_now` before execution."
__pdoc__['PostOrderContext.position_before'] = "`OrderContext.position_now` before execution."
__pdoc__['PostOrderContext.debt_before'] = "`OrderContext.debt_now` before execution."
__pdoc__['PostOrderContext.free_cash_before'] = "`OrderContext.free_cash_now` before execution."
__pdoc__['PostOrderContext.val_price_before'] = "`OrderContext.val_price_now` before execution."
__pdoc__['PostOrderContext.value_before'] = "`OrderContext.value_now` before execution."
__pdoc__['PostOrderContext.order_result'] = """Order result of type `OrderResult`.
Can be used to check whether the order has been filled, ignored, or rejected.
"""
__pdoc__['PostOrderContext.cash_now'] = "`OrderContext.cash_now` after execution."
__pdoc__['PostOrderContext.position_now'] = "`OrderContext.position_now` after execution."
__pdoc__['PostOrderContext.debt_now'] = "`OrderContext.debt_now` after execution."
__pdoc__['PostOrderContext.free_cash_now'] = "`OrderContext.free_cash_now` after execution."
__pdoc__['PostOrderContext.val_price_now'] = """`OrderContext.val_price_now` after execution.
If `SimulationContext.update_value`, gets replaced with the fill price,
as it becomes the most recently known price. Otherwise, stays the same.
"""
__pdoc__['PostOrderContext.value_now'] = """`OrderContext.value_now` after execution.
If `SimulationContext.update_value`, gets updated with the new cash and value of the column. Otherwise, stays the same.
"""
__pdoc__['PostOrderContext.return_now'] = "`OrderContext.return_now` after execution."
__pdoc__['PostOrderContext.pos_record_now'] = "`OrderContext.pos_record_now` after execution."
class Order(tp.NamedTuple):
size: float = np.inf
price: float = np.inf
size_type: int = SizeType.Amount
direction: int = Direction.All
fees: float = 0.0
fixed_fees: float = 0.0
slippage: float = 0.0
min_size: float = 0.0
max_size: float = np.inf
reject_prob: float = 0.0
lock_cash: bool = False
allow_partial: bool = True
raise_reject: bool = False
log: bool = False
__pdoc__['Order'] = """A named tuple representing an order.
!!! note
Currently, Numba has issues with using defaults when filling named tuples.
Use `vectorbt.portfolio.nb.order_nb` to create an order."""
__pdoc__['Order.size'] = """Size in units.
Behavior depends upon `Order.size_type` and `Order.direction`.
For any fixed size:
* Set to any number to buy/sell some fixed amount or value.
Longs are limited by the current cash balance, while shorts are only limited if `Order.lock_cash`.
* Set to `np.inf` to buy for all cash, or `-np.inf` to sell for all free cash.
If `Order.direction` is not `Direction.All`, `-np.inf` will close the position.
* Set to `np.nan` or 0 to skip.
For any target size:
* Set to any number to buy/sell an amount relative to the current position or value.
* Set to 0 to close the current position.
* Set to `np.nan` to skip.
"""
__pdoc__['Order.price'] = """Price per unit.
Final price will depend upon slippage.
* If `-np.inf`, replaced by the previous close (~ the current open).
* If `np.inf`, replaced by the current close.
!!! note
Make sure to use timestamps that come between (and ideally not including) the current open and close."""
__pdoc__['Order.size_type'] = "See `SizeType`."
__pdoc__['Order.direction'] = "See `Direction`."
__pdoc__['Order.fees'] = """Fees in percentage of the order value.
Note that 0.01 = 1%."""
__pdoc__['Order.fixed_fees'] = "Fixed amount of fees to pay for this order."
__pdoc__['Order.slippage'] = """Slippage in percentage of `Order.price`.
Note that 0.01 = 1%."""
__pdoc__['Order.min_size'] = """Minimum size in both directions.
Lower than that will be rejected."""
__pdoc__['Order.max_size'] = """Maximum size in both directions.
Higher than that will be partly filled."""
__pdoc__['Order.reject_prob'] = """Probability of rejecting this order to simulate a random rejection event.
Not everything goes smoothly in real life. Use random rejections to test your order management for robustness."""
__pdoc__['Order.lock_cash'] = """Whether to lock cash when shorting.
Keeps free cash from turning negative."""
__pdoc__['Order.allow_partial'] = """Whether to allow partial fill.
Otherwise, the order gets rejected.
Does not apply when `Order.size` is `np.inf`."""
__pdoc__['Order.raise_reject'] = """Whether to raise exception if order has been rejected.
Terminates the simulation."""
__pdoc__['Order.log'] = """Whether to log this order by filling a log record.
Remember to increase `max_logs`."""
NoOrder = Order(
np.nan,
np.nan,
-1,
-1,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
False,
False,
False,
False
)
"""_"""
__pdoc__['NoOrder'] = "Order that should not be processed."
class OrderResult(tp.NamedTuple):
size: float
price: float
fees: float
side: int
status: int
status_info: int
__pdoc__['OrderResult'] = "A named tuple representing an order result."
__pdoc__['OrderResult.size'] = "Filled size."
__pdoc__['OrderResult.price'] = "Filled price per unit, adjusted with slippage."
__pdoc__['OrderResult.fees'] = "Total fees paid for this order."
__pdoc__['OrderResult.side'] = "See `OrderSide`."
__pdoc__['OrderResult.status'] = "See `OrderStatus`."
__pdoc__['OrderResult.status_info'] = "See `StatusInfo`."
# ############# Records ############# #
order_dt = np.dtype([
('id', np.int_),
('idx', np.int_),
('col', np.int_),
('size', np.float_),
('price', np.float_),
('fees', np.float_),
('side', np.int_),
], align=True)
"""_"""
__pdoc__['order_dt'] = f"""`np.dtype` of order records.
```json
{to_doc(order_dt)}
```
"""
_trade_fields = [
('id', np.int_),
('col', np.int_),
('size', np.float_),
('entry_idx', np.int_),
('entry_price', np.float_),
('entry_fees', np.float_),
('exit_idx', np.int_),
('exit_price', np.float_),
('exit_fees', np.float_),
('pnl', np.float_),
('return', np.float_),
('direction', np.int_),
('status', np.int_),
('position_id', np.int_)
]
trade_dt = np.dtype(_trade_fields, align=True)
"""_"""
__pdoc__['trade_dt'] = f"""`np.dtype` of trade records.
```json
{to_doc(trade_dt)}
```
"""
_position_fields = _trade_fields[:-1]
position_dt = np.dtype(_position_fields, align=True)
"""_"""
__pdoc__['position_dt'] = f"""`np.dtype` of position records.
```json
{to_doc(position_dt)}
```
"""
_log_fields = [
('id', np.int_),
('idx', np.int_),
('col', np.int_),
('group', np.int_),
('cash', np.float_),
('position', np.float_),
('debt', np.float_),
('free_cash', np.float_),
('val_price', np.float_),
('value', np.float_),
('size', np.float_),
('price', np.float_),
('size_type', np.int_),
('direction', np.int_),
('fees', np.float_),
('fixed_fees', np.float_),
('slippage', np.float_),
('min_size', np.float_),
('max_size', np.float_),
('reject_prob', np.float_),
('lock_cash', np.bool_),
('allow_partial', np.bool_),
('raise_reject', np.bool_),
('log', np.bool_),
('new_cash', np.float_),
('new_position', np.float_),
('new_debt', np.float_),
('new_free_cash', np.float_),
('new_val_price', np.float_),
('new_value', np.float_),
('res_size', np.float_),
('res_price', np.float_),
('res_fees', np.float_),
('res_side', np.int_),
('res_status', np.int_),
('res_status_info', np.int_),
('order_id', np.int_)
]
log_dt = np.dtype(_log_fields, align=True)
"""_"""
__pdoc__['log_dt'] = f"""`np.dtype` of log records.
```json
{to_doc(log_dt)}
```
"""
|
py | 1a4739a32073dddf44fbaac97ba5f3b729878c65 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..utils import ImageStats
def test_ImageStats_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='%s',
mandatory=True,
position=2,
),
mask_file=dict(argstr='',
),
op_string=dict(argstr='%s',
mandatory=True,
position=3,
),
output_type=dict(),
split_4d=dict(argstr='-t',
position=1,
),
terminal_output=dict(nohash=True,
),
)
inputs = ImageStats.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_ImageStats_outputs():
output_map = dict(out_stat=dict(),
)
outputs = ImageStats.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
py | 1a473a0ed0f7c970440ce310e4f062e75bc9e1a0 | import tensorflow as tf
from tensorflow.keras import backend
from tensorflow.keras import layers
from tensorflow.keras import models
from models.backbone.resnet import ResNet18, ResNet34, ResNet50, ResNet101, ResNet152
from models.backbone.resnext import ResNeXt50, ResNeXt101
from models.backbone.efficientnet import EfficientNetB0, EfficientNetB1, EfficientNetB2, EfficientNetB3, EfficientNetB4
from models.backbone.efficientnet import EfficientNetB5, EfficientNetB6, EfficientNetB7, EfficientNetL2
from tensorflow.keras.applications.inception_resnet_v2 import InceptionResNetV2
from tensorflow.keras.applications.inception_v3 import InceptionV3
from tensorflow.keras.applications import DenseNet121, DenseNet169, DenseNet201
from tensorflow.keras.applications import VGG16
from tensorflow.keras.applications import VGG19
from models.backbone.senet import SENet154, SEResNet50, SEResNet101, SEResNet152, SEResNeXt50, SEResNeXt101
DEFAULT_SKIP_CONNECTIONS = {
'vgg16': ('block5_conv3', 'block4_conv3', 'block3_conv3', 'block2_conv2'),
'vgg19': ('block5_conv4', 'block4_conv4', 'block3_conv4', 'block2_conv2'),
'resnet18': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0'),
'resnet34': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0'),
'resnet50': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0'),
'resnet101': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0'),
'resnet152': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0'),
'resnext50': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0'),
'resnext101': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0'),
'inceptionv3': (228, 86, 16, 9),
'inceptionresnetv2': (594, 260, 16, 9),
'densenet121': (311, 139, 51, 4),
'densenet169': (367, 139, 51, 4),
'densenet201': (479, 139, 51, 4),
'efficientnetb0': ('block6a_expand_activation', 'block4a_expand_activation',
'block3a_expand_activation', 'block2a_expand_activation'),
'efficientnetb1': ('block6a_expand_activation', 'block4a_expand_activation',
'block3a_expand_activation', 'block2a_expand_activation'),
'efficientnetb2': ('block6a_expand_activation', 'block4a_expand_activation',
'block3a_expand_activation', 'block2a_expand_activation'),
'efficientnetb3': ('block6a_expand_activation', 'block4a_expand_activation',
'block3a_expand_activation', 'block2a_expand_activation'),
'efficientnetb4': ('block6a_expand_activation', 'block4a_expand_activation',
'block3a_expand_activation', 'block2a_expand_activation'),
'efficientnetb5': ('block6a_expand_activation', 'block4a_expand_activation',
'block3a_expand_activation', 'block2a_expand_activation'),
'efficientnetb6': ('block6a_expand_activation', 'block4a_expand_activation',
'block3a_expand_activation', 'block2a_expand_activation'),
'efficientnetb7': ('block6a_expand_activation', 'block4a_expand_activation',
'block3a_expand_activation', 'block2a_expand_activation'),
'efficientnetl2': ('block6a_expand_activation', 'block4a_expand_activation',
'block3a_expand_activation', 'block2a_expand_activation'),
'seresnext50': ('activation_65', 'activation_35', 'activation_15', 'activation'),
'seresnext101': ('activation_150', 'activation_35', 'activation_15', 'activation'),
'seresnet50': ('activation_65', 'activation_35', 'activation_15', 'activation'),
'seresnet101': ('activation_150', 'activation_35', 'activation_15', 'activation'),
'seresnet152': ('activation_235', 'activation_55', 'activation_15', 'activation'),
'senet154': ('activation_237', 'activation_55', 'activation_17', 'activation_2'),
}
# ---------------------------------------------------------------------
# PSP Model
# ---------------------------------------------------------------------
def PSPNet(
backbone_name='vgg16',
input_shape=(384, 384, 3),
classes=21,
activation='softmax',
weights=None,
encoder_weights='imagenet',
encoder_freeze=False,
downsample_factor=8,
psp_conv_filters=512,
psp_pooling_type='avg',
psp_use_batchnorm=True,
psp_dropout=None,
**kwargs
):
"""PSPNet_ is a fully convolution neural network for image semantic segmentation
Args:
backbone_name: name of classification model used as feature
extractor to build segmentation model.
input_shape: shape of input data/image ``(H, W, C)``.
``H`` and ``W`` should be divisible by ``6 * downsample_factor`` and **NOT** ``None``!
classes: a number of classes for output (output shape - ``(h, w, classes)``).
activation: name of one of ``keras.activations`` for last model layer
(e.g. ``sigmoid``, ``softmax``, ``linear``).
weights: optional, path to model weights.
encoder_weights: one of ``None`` (random initialization), ``imagenet`` (pre-training on ImageNet).
encoder_freeze: if ``True`` set all layers of encoder (backbone model) as non-trainable.
downsample_factor: one of 4, 8 and 16. Downsampling rate or in other words backbone depth
to construct PSP module on it.
psp_conv_filters: number of filters in ``Conv2D`` layer in each PSP block.
psp_pooling_type: one of 'avg', 'max'. PSP block pooling type (maximum or average).
psp_use_batchnorm: if ``True``, ``BatchNormalisation`` layer between ``Conv2D`` and ``Activation`` layers
is used.
psp_dropout: dropout rate between 0 and 1.
Returns:
``keras.models.Model``: **PSPNet**
.. _PSPNet:
https://arxiv.org/pdf/1612.01105.pdf
"""
# control image input shape
check_input_shape(input_shape, downsample_factor)
backbone = get_backbone(backbone_name,
input_shape=input_shape,
weights=encoder_weights,
include_top=False)
feature_layers = DEFAULT_SKIP_CONNECTIONS[backbone_name]
if downsample_factor == 16:
psp_layer_idx = feature_layers[0]
elif downsample_factor == 8:
psp_layer_idx = feature_layers[1]
elif downsample_factor == 4:
psp_layer_idx = feature_layers[2]
else:
raise ValueError('Unsupported factor - `{}`, Use 4, 8 or 16.'.format(downsample_factor))
model = build_psp(
backbone,
psp_layer_idx,
pooling_type=psp_pooling_type,
conv_filters=psp_conv_filters,
use_batchnorm=psp_use_batchnorm,
final_upsampling_factor=downsample_factor,
classes=classes,
activation=activation,
dropout=psp_dropout,
)
# lock encoder weights for fine-tuning
if encoder_freeze:
freeze_model(backbone, **kwargs)
# loading model weights
if weights is not None:
model.load_weights(weights)
return model
# ---------------------------------------------------------------------
# PSP Decoder
# ---------------------------------------------------------------------
def build_psp(
backbone,
psp_layer_idx,
pooling_type='avg',
conv_filters=512,
use_batchnorm=True,
final_upsampling_factor=8,
classes=21,
activation='softmax',
dropout=None,
):
input_ = backbone.input
x = (backbone.get_layer(name=psp_layer_idx).output if isinstance(psp_layer_idx, str)
else backbone.get_layer(index=psp_layer_idx).output)
# x = (get_layer_number(backbone, psp_layer_idx) if isinstance(psp_layer_idx, str) else psp_layer_idx)
# build spatial pyramid
x1 = SpatialContextBlock(1, conv_filters, pooling_type, use_batchnorm)(x)
x2 = SpatialContextBlock(2, conv_filters, pooling_type, use_batchnorm)(x)
x3 = SpatialContextBlock(3, conv_filters, pooling_type, use_batchnorm)(x)
x6 = SpatialContextBlock(6, conv_filters, pooling_type, use_batchnorm)(x)
# aggregate spatial pyramid
concat_axis = 3 if backend.image_data_format() == 'channels_last' else 1
x = layers.Concatenate(axis=concat_axis, name='psp_concat')([x, x1, x2, x3, x6])
x = Conv1x1BnReLU(conv_filters, use_batchnorm, name='aggregation')(x)
# model regularization
if dropout is not None:
x = layers.SpatialDropout2D(dropout, name='spatial_dropout')(x)
# model head
x = layers.Conv2D(
filters=classes,
kernel_size=(3, 3),
padding='same',
kernel_initializer='glorot_uniform',
name='final_conv',
)(x)
x = layers.UpSampling2D(final_upsampling_factor, name='final_upsampling', interpolation='bilinear')(x)
if activation in {'softmax', 'sigmoid'}:
x = layers.Activation(activation, name=activation)(x)
model = models.Model(input_, x)
return model
# ---------------------------------------------------------------------
# Utility functions
# ---------------------------------------------------------------------
def Conv2dBn(
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
activation=None,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
use_batchnorm=False,
**kwargs
):
"""Extension of Conv2D layer with batchnorm"""
conv_name, act_name, bn_name = None, None, None
block_name = kwargs.pop('name', None)
if block_name is not None:
conv_name = block_name + '_conv'
if block_name is not None and activation is not None:
act_str = activation.__name__ if callable(activation) else str(activation)
act_name = block_name + '_' + act_str
if block_name is not None and use_batchnorm:
bn_name = block_name + '_bn'
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
def wrapper(input_tensor):
x = layers.Conv2D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=None,
use_bias=not use_batchnorm,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
name=conv_name,
)(input_tensor)
if use_batchnorm:
x = layers.BatchNormalization(axis=bn_axis, name=bn_name)(x)
if activation:
x = layers.Activation(activation, name=act_name)(x)
return x
return wrapper
def check_input_shape(input_shape, factor):
if input_shape is None:
raise ValueError("Input shape should be a tuple of 3 integers, not None!")
h, w = input_shape[:2] if backend.image_data_format() == 'channels_last' else input_shape[1:]
min_size = factor * 6
is_wrong_shape = (
h % min_size != 0 or w % min_size != 0 or
h < min_size or w < min_size
)
if is_wrong_shape:
raise ValueError('Wrong shape {}, input H and W should '.format(input_shape) +
'be divisible by `{}`'.format(min_size))
# ---------------------------------------------------------------------
# Blocks
# ---------------------------------------------------------------------
def Conv1x1BnReLU(filters, use_batchnorm, name=None):
def wrapper(input_tensor):
return Conv2dBn(
filters,
kernel_size=1,
activation='relu',
kernel_initializer='he_uniform',
padding='same',
use_batchnorm=use_batchnorm,
name=name
)(input_tensor)
return wrapper
def SpatialContextBlock(
level,
conv_filters=512,
pooling_type='avg',
use_batchnorm=True,
):
if pooling_type not in ('max', 'avg'):
raise ValueError('Unsupported pooling type - `{}`.'.format(pooling_type) +
'Use `avg` or `max`.')
Pooling2D = layers.MaxPool2D if pooling_type == 'max' else layers.AveragePooling2D
pooling_name = 'psp_level{}_pooling'.format(level)
conv_block_name = 'psp_level{}'.format(level)
upsampling_name = 'psp_level{}_upsampling'.format(level)
def wrapper(input_tensor):
# extract input feature maps size (h, and w dimensions)
input_shape = backend.int_shape(input_tensor)
spatial_size = input_shape[1:3] if backend.image_data_format() == 'channels_last' else input_shape[2:]
# Compute the kernel and stride sizes according to how large the final feature map will be
# When the kernel factor and strides are equal, then we can compute the final feature map factor
# by simply dividing the current factor by the kernel or stride factor
# The final feature map sizes are 1x1, 2x2, 3x3, and 6x6.
pool_size = up_size = [spatial_size[0] // level, spatial_size[1] // level]
x = Pooling2D(pool_size, strides=pool_size, padding='same', name=pooling_name)(input_tensor)
x = Conv1x1BnReLU(conv_filters, use_batchnorm, name=conv_block_name)(x)
x = layers.UpSampling2D(up_size, interpolation='bilinear', name=upsampling_name)(x)
return x
return wrapper
def freeze_model(model, **kwargs):
"""Set all layers non trainable, excluding BatchNormalization layers"""
for layer in model.layers:
if not isinstance(layer, layers.BatchNormalization):
layer.trainable = False
return
def filter_keras_submodules(kwargs):
"""Selects only arguments that define keras_application submodules. """
submodule_keys = kwargs.keys() & {'backend', 'layers', 'models', 'utils'}
return {key: kwargs[key] for key in submodule_keys}
def get_layer_number(model, layer_name):
"""
Help find layer in Keras model by name
Args:
model: Keras `Model`
layer_name: str, name of layer
Returns:
index of layer
Raises:
ValueError: if model does not contains layer with such name
"""
for i, l in enumerate(model.layers):
if l.name == layer_name:
return i
raise ValueError('No layer with name {} in model {}.'.format(layer_name, model.name))
backbones = {
"vgg16": VGG16,
"vgg19": VGG19,
"resnet18": ResNet18,
"resnet34": ResNet34,
"resnet50": ResNet50,
"resnet101": ResNet101,
"resnet152": ResNet152,
"resnext50": ResNeXt50,
"resnext101": ResNeXt101,
"inceptionresnetv2": InceptionResNetV2,
"inceptionv3": InceptionV3,
"densenet121": DenseNet121,
"densenet169": DenseNet169,
"densenet201": DenseNet201,
"efficientnetb0": EfficientNetB0,
"efficientnetb1": EfficientNetB1,
"efficientnetb2": EfficientNetB2,
"efficientnetb3": EfficientNetB3,
"efficientnetb4": EfficientNetB4,
"efficientnetb5": EfficientNetB5,
"efficientnetb6": EfficientNetB6,
"efficientnetb7": EfficientNetB7,
"efficientnetl2": EfficientNetL2,
"seresnext50": SEResNeXt50,
"seresnext101": SEResNeXt101,
"seresnet50": SEResNet50,
"seresnet101": SEResNet101,
"seresnet152": SEResNet152,
'senet154': SENet154
}
def get_backbone(name, *args, **kwargs):
return backbones[name](*args, **kwargs)
if __name__ == "__main__":
model1 = PSPNet('efficientnetb4', (1200, 1200, 3), encoder_weights='imagenet')
model1.summary() |
py | 1a473b729e9008be2be6b0fdac26a483a381bf06 | from django.conf.urls import url
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from . import views
urlpatterns =[
url(r'^profile_edit/',views.profile_edit,name='edit_profile'),
url(r'^signup/$', views.signup, name='signup'),
url(r'^profile/', views.profile, name='profile'),
url(r'^home/', views.home, name='home'),
url(r'^follow/(?P<operation>,+)/(?P<pk>\d+)/$', views.follower, name='follow'),
url(r'^comment/(?P<image_id>\d+)', views.add_comment, name='comment'),
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT) |
py | 1a473bdbc01d3ec4cafdd5584afbad16a9d0fc71 | # --Requires--:
# game.get_moves()
# game.execute_move()
# game.undo_move()
# game.is_final()
# game.get_score()
# game.get_states()
# get_board()
# get_turn()
# TODO: update find moves
import numpy as np
import time
class TicTacToe:
def __init__(self):
self.board = np.array([[[0, 0], [0, 0], [0, 0]],
[[0, 0], [0, 0], [0, 0]],
[[0, 0], [0, 0], [0, 0]]])
self.history = []
def get_moves(self):
return [x for x in range(9) if self.board[x // 3, x % 3, 0] == self.board[x // 3, x % 3, 1] == 0]
def get_legal_NN_output(self):
return [1 if self.board[x // 3, x % 3, 0] == self.board[x // 3, x % 3, 1] == 0 else 0 for x in range(9)]
# moves = []
# for x in range(9):
# if self.board[x // 3, x % 3, 0] == self.board[x // 3, x % 3, 1] == 0:
# moves.append(x)
# return moves
def execute_move(self, move):
self.board[move // 3, move % 3, len(self.history) % 2] = 1
self.history.append(move)
# poss_moves = self.get_moves()
# if move in poss_moves:
# self.board[move // 3, move % 3, len(self.history) % 2] = 1
# self.history.append(move)
# else:
# print('illegal move')
def undo_move(self):
if len(self.history) > 0:
move = self.history[-1]
self.board[move // 3, move % 3, (len(self.history) - 1) % 2] = 0
self.history.pop()
else:
print('could not undo move')
def _won(self):
player = 1 * (len(self.history) % 2 == 0)
for x in range(3):
# Horizontal
if self.board[x, 0, player] == self.board[x, 1, player] == self.board[x, 2, player] != 0:
return True
# Vertical
if self.board[0, x, player] == self.board[1, x, player] == self.board[2, x, player] != 0:
return True
# Diagonal
if self.board[0, 0, player] == self.board[1, 1, player] == self.board[2, 2, player] != 0:
return True
if self.board[0, 2, player] == self.board[1, 1, player] == self.board[2, 0, player] != 0:
return True
def is_final(self):
if self._won():
return True
if len(self.history) == 9:
return True
return False
def get_score(self):
if self.is_final():
if self._won():
return 2
else:
return 1
else:
print('not final')
def get_outcome(self):
if self.is_final():
if self._won():
return [1, -1] if len(self.history) % 2 == 1 else [-1, 1]
else:
return [0, 0]
else:
print("not finished")
def get_state(self):
# return [str(self.get_board())]
return str(self.history)
def get_turn(self):
return len(self.history) % 2 if not self.is_final() else None
def get_board(self):
return self.board if len(self.history) % 2 == 0 else np.flip(self.board, -1)
def print_board(self):
for x in range(3):
string = '|'
for y in range(3):
string += 'X' * int(self.board[x, y, 0] == 1)
string += 'O' * int(self.board[x, y, 1] == 1)
string += ' ' * int(self.board[x, y, 0] == self.board[x, y, 1] == 0)
string += '|'
print(string)
# game = TicTacToe()
# game.print_board()
# while True:
# inp = int(input("Number:"))
# game.execute_move(inp)
# game.print_board()
# game.undo_move()
|
py | 1a473c2cf10d3dd1891fd8f02f4767baf1d2da5a | di = float(input('Dinheiro: R$'))
o = di * 3.27
print('Com R${:.3f} você pode comprar {:.3f} dolarés!'.format(di,o)) |
py | 1a473c6068b845a10394096d6a32ea83d404692c | from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
from torch.optim.lr_scheduler import ExponentialLR, StepLR
import torch.nn.functional as F
from sklearn import metrics
from sklearn.model_selection import KFold, StratifiedKFold
from torch.autograd import Variable
import os
import warnings
import math
import numpy as np
from tqdm import tqdm, trange
import time
import random
import csv
from sklearn.ensemble import RandomForestRegressor as RFR
import rdkit
from rdkit import Chem, DataStructs
from rdkit.Chem import QED
from joblib import dump, load
import threading
from inference_schema.schema_decorators import input_schema, output_schema
from inference_schema.parameter_types.numpy_parameter_type import NumpyParameterType
from sklearn.externals import joblib
import pickle
def normalize_desc(desc_array, desc_mean=None):
desc_array = np.array(desc_array).reshape(len(desc_array), -1)
ind = np.zeros(desc_array.shape)
for i in range(desc_array.shape[0]):
for j in range(desc_array.shape[1]):
try:
if np.isfinite(desc_array[i, j]):
ind[i, j] = 1
except:
pass
for i in range(desc_array.shape[0]):
for j in range(desc_array.shape[1]):
if ind[i, j] == 0:
desc_array[i, j] = 0
if desc_mean is None:
desc_mean = np.mean(desc_array, axis=0)
for i in range(desc_array.shape[0]):
for j in range(desc_array.shape[1]):
if ind[i, j] == 0:
desc_array[i, j] = desc_mean[j]
return desc_array, desc_mean
class Iterator(object):
"""Abstract base class for data iterators.
# Arguments
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_generator = self._flow_index(n, batch_size, shuffle, seed)
if n < batch_size:
raise ValueError('Input data length is shorter than batch_size\nAdjust batch_size')
def reset(self):
self.batch_index = 0
def _flow_index(self, n, batch_size=32, shuffle=False, seed=None):
# Ensure self.batch_index is 0.
self.reset()
while 1:
if seed is not None:
np.random.seed(seed + self.total_batches_seen)
if self.batch_index == 0:
index_array = np.arange(n)
if shuffle:
index_array = np.random.permutation(n)
current_index = (self.batch_index * batch_size) % n
if n > current_index + batch_size:
current_batch_size = batch_size
self.batch_index += 1
else:
current_batch_size = n - current_index
self.batch_index = 0
self.total_batches_seen += 1
yield (index_array[current_index: current_index + current_batch_size],
current_index, current_batch_size)
def __iter__(self):
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
class SmilesIterator(Iterator):
"""Iterator yielding data from a SMILES array.
# Arguments
x: Numpy array of SMILES input data.
y: Numpy array of targets data.
smiles_data_generator: Instance of `SmilesEnumerator`
to use for random SMILES generation.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
dtype: dtype to use for returned batch. Set to keras.backend.floatx if using Keras
"""
def __init__(self, x, y, smiles_data_generator,
batch_size=32, shuffle=False, seed=None,
dtype=np.float32
):
if y is not None and len(x) != len(y):
raise ValueError('X (images tensor) and y (labels) '
'should have the same length. '
'Found: X.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
self.x = np.asarray(x)
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
self.smiles_data_generator = smiles_data_generator
self.dtype = dtype
super(SmilesIterator, self).__init__(x.shape[0], batch_size, shuffle, seed)
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array, current_index, current_batch_size = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
batch_x = np.zeros(
tuple([current_batch_size] + [self.smiles_data_generator.pad, self.smiles_data_generator._charlen]),
dtype=self.dtype)
for i, j in enumerate(index_array):
smiles = self.x[j:j + 1]
x = self.smiles_data_generator.transform(smiles)
batch_x[i] = x
if self.y is None:
return batch_x
batch_y = self.y[index_array]
return batch_x, batch_y
def get_desc(smiles, calc):
desc = []
processed_indices = []
invalid_indices = []
for i in range(len(smiles)):
sm = smiles[i]
try:
mol = Chem.MolFromSmiles(sm)
tmp = np.array(calc(mol))
desc.append(tmp)
processed_indices.append(i)
except:
invalid_indices.append(i)
desc_array = np.array(desc)
return desc_array, processed_indices, invalid_indices
def sanitize_smiles(smiles, canonical=True, throw_warning=False):
"""
Takes list of SMILES strings and returns list of their sanitized versions.
For definition of sanitized SMILES check
http://www.rdkit.org/docs/api/rdkit.Chem.rdmolops-module.html#SanitizeMol
Parameters
----------
smiles: list
list of SMILES strings
canonical: bool (default True)
parameter specifying whether SMILES will be converted to canonical
format
throw_warning: bool (default False)
parameter specifying whether warnings will be thrown if a SMILES is
invalid
Returns
-------
new_smiles: list
list of SMILES and NaNs if SMILES string is invalid or unsanitized.
If canonical is True, returns list of canonical SMILES.
When canonical is True this function is analogous to:
canonical_smiles(smiles, sanitize=True).
"""
new_smiles = []
for sm in smiles:
try:
if canonical:
new_smiles.append(Chem.MolToSmiles(Chem.MolFromSmiles(sm, sanitize=True)))
else:
new_smiles.append(sm)
except:
if throw_warning:
warnings.warn('Unsanitized SMILES string: ' + sm, UserWarning)
new_smiles.append('')
return new_smiles
def canonical_smiles(smiles, sanitize=True, throw_warning=False):
"""
Takes list of SMILES strings and returns list of their canonical SMILES.
Parameters
----------
smiles: list
list of SMILES strings to convert into canonical format
sanitize: bool (default True)
parameter specifying whether to sanitize SMILES or not.
For definition of sanitized SMILES check
http://www.rdkit.org/docs/api/rdkit.Chem.rdmolops-module.html#SanitizeMol
throw_warning: bool (default False)
parameter specifying whether warnings will be thrown if a SMILES is
invalid
Returns
-------
new_smiles: list
list of canonical SMILES and NaNs if SMILES string is invalid or
unsanitized (when sanitize is True)
When sanitize is True the function is analogous to:
sanitize_smiles(smiles, canonical=True).
"""
new_smiles = []
for sm in smiles:
try:
mol = Chem.MolFromSmiles(sm, sanitize=sanitize)
new_smiles.append(Chem.MolToSmiles(mol))
except:
if throw_warning:
warnings.warn(sm + ' can not be canonized: invalid '
'SMILES string!', UserWarning)
new_smiles.append('')
return new_smiles
def save_smi_to_file(filename, smiles, unique=True):
"""
Takes path to file and list of SMILES strings and writes SMILES to the specified file.
Args:
filename (str): path to the file
smiles (list): list of SMILES strings
unique (bool): parameter specifying whether to write only unique copies or not.
Output:
success (bool): defines whether operation was successfully completed or not.
"""
if unique:
smiles = list(set(smiles))
else:
smiles = list(smiles)
f = open(filename, 'w')
for mol in smiles:
f.writelines([mol, '\n'])
f.close()
return f.closed
def read_smi_file(filename, unique=True, add_start_end_tokens=False):
"""
Reads SMILES from file. File must contain one SMILES string per line
with \n token in the end of the line.
Args:
filename (str): path to the file
unique (bool): return only unique SMILES
Returns:
smiles (list): list of SMILES strings from specified file.
success (bool): defines whether operation was successfully completed or not.
If 'unique=True' this list contains only unique copies.
"""
f = open(filename, 'r')
molecules = []
for line in f:
if add_start_end_tokens:
molecules.append('<' + line[:-1] + '>')
else:
molecules.append(line[:-1])
if unique:
molecules = list(set(molecules))
else:
molecules = list(molecules)
f.close()
return molecules, f.closed
def tokenize(smiles, tokens=None):
"""
Returns list of unique tokens, token-2-index dictionary and number of
unique tokens from the list of SMILES
Parameters
----------
smiles: list
list of SMILES strings to tokenize.
tokens: list, str (default None)
list of unique tokens
Returns
-------
tokens: list
list of unique tokens/SMILES alphabet.
token2idx: dict
dictionary mapping token to its index.
num_tokens: int
number of unique tokens.
"""
if tokens is None:
tokens = list(set(''.join(smiles)))
tokens = list(np.sort(tokens))
tokens = ''.join(tokens)
token2idx = dict((token, i) for i, token in enumerate(tokens))
num_tokens = len(tokens)
return tokens, token2idx, num_tokens
def time_since(since):
s = time.time() - since
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
class VanillaQSAR(object):
def __init__(self, model_instance=None, model_params=None,
model_type='classifier', ensemble_size=5, normalization=False):
super(VanillaQSAR, self).__init__()
self.model_instance = model_instance
self.model_params = model_params
self.ensemble_size = ensemble_size
self.model = []
self.normalization = normalization
if model_type not in ['classifier', 'regressor']:
raise InvalidArgumentError("model type must be either"
"classifier or regressor")
self.model_type = model_type
if isinstance(self.model_instance, list):
assert(len(self.model_instance) == self.ensemble_size)
assert(isinstance(self.model_params, list))
assert(len(self.model_params) == self.ensemble_size)
for i in range(self.ensemble_size):
self.model.append(self.model_instance[i](**model_params[i]))
else:
for _ in range(self.ensemble_size):
self.model.append(self.model_instance(**model_params))
if self.normalization:
self.desc_mean = [0]*self.ensemble_size
self.metrics_type = None
def fit_model(self, data, cv_split='stratified'):
eval_metrics = []
x = data.x
if self.model_type == 'classifier' and data.binary_y is not None:
y = data.binary_y
else:
y = data.y
cross_val_data, cross_val_labels = cross_validation_split(x=x, y=y,
split=cv_split,
n_folds=self.ensemble_size)
for i in range(self.ensemble_size):
train_x = np.concatenate(cross_val_data[:i] +
cross_val_data[(i + 1):])
test_x = cross_val_data[i]
train_y = np.concatenate(cross_val_labels[:i] +
cross_val_labels[(i + 1):])
test_y = cross_val_labels[i]
if self.normalization:
train_x, desc_mean = normalize_desc(train_x)
self.desc_mean[i] = desc_mean
test_x, _ = normalize_desc(test_x, desc_mean)
self.model[i].fit(train_x, train_y.ravel())
predicted = self.model[i].predict(test_x)
if self.model_type == 'classifier':
eval_metrics.append(metrics.f1_score(test_y, predicted))
self.metrics_type = 'F1 score'
elif self.model_type == 'regressor':
r2 = metrics.r2_score(test_y, predicted)
eval_metrics.append(r2)
self.metrics_type = 'R^2 score'
else:
raise RuntimeError()
return eval_metrics, self.metrics_type
def load_model(self, path):
# TODO: add iterable path object instead of static path
self.model = joblib.load(path)
if self.normalization:
arr = np.load(path + 'desc_mean.npy')
self.desc_mean = arr
def save_model(self, path):
joblib.dump(self.model, path + '.joblib')
if self.normalization:
np.save(path + 'desc_mean.npy', self.desc_mean)
def predict(self, objects=None, average=True, get_features=None,
**kwargs):
objects = np.array(objects)
invalid_objects = []
processed_objects = []
if get_features is not None:
x, processed_indices, invalid_indices = get_features(objects,
**kwargs)
processed_objects = objects[processed_indices]
invalid_objects = objects[invalid_indices]
else:
x = objects
if len(x) == 0:
processed_objects = []
prediction = []
invalid_objects = objects
else:
prediction = []
for i in range(self.ensemble_size):
m = self.model[i]
if self.normalization:
x, _ = normalize_desc(x, self.desc_mean[i])
prediction.append(m.predict(x))
prediction = np.array(prediction)
if average:
prediction = prediction.mean(axis=0)
return processed_objects, prediction, invalid_objects
class StackAugmentedRNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size, layer_type='GRU',
n_layers=1, is_bidirectional=False, has_stack=False,
stack_width=None, stack_depth=None, use_cuda=None,
optimizer_instance=torch.optim.Adadelta, lr=0.01):
"""
Constructor for the StackAugmentedRNN object.
Parameters
----------
input_size: int
number of characters in the alphabet
hidden_size: int
size of the RNN layer(s)
output_size: int
again number of characters in the alphabet
layer_type: str (default 'GRU')
type of the RNN layer to be used. Could be either 'LSTM' or 'GRU'.
n_layers: int (default 1)
number of RNN layers
is_bidirectional: bool (default False)
parameter specifying if RNN is bidirectional
has_stack: bool (default False)
parameter specifying if augmented memory stack is used
stack_width: int (default None)
if has_stack is True then this parameter defines width of the
augmented stack memory
stack_depth: int (default None)
if has_stack is True then this parameter define depth of the augmented
stack memory. Hint: no need fo stack depth to be larger than the
length of the longest sequence you plan to generate
use_cuda: bool (default None)
parameter specifying if GPU is used for computations. If left
unspecified, GPU will be used if available
optimizer_instance: torch.optim object (default torch.optim.Adadelta)
optimizer to be used for training
lr: float (default 0.01)
learning rate for the optimizer
"""
super(StackAugmentedRNN, self).__init__()
if layer_type not in ['GRU', 'LSTM']:
raise InvalidArgumentError('Layer type must be GRU or LSTM')
self.layer_type = layer_type
self.is_bidirectional = is_bidirectional
if self.is_bidirectional:
self.num_dir = 2
else:
self.num_dir = 1
if layer_type == 'LSTM':
self.has_cell = True
else:
self.has_cell = False
self.has_stack = has_stack
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
if self.has_stack:
self.stack_width = stack_width
self.stack_depth = stack_depth
self.use_cuda = use_cuda
if self.use_cuda is None:
self.use_cuda = torch.cuda.is_available()
self.n_layers = n_layers
if self.has_stack:
self.stack_controls_layer = nn.Linear(in_features=self.hidden_size *
self.num_dir,
out_features=3)
self.stack_input_layer = nn.Linear(in_features=self.hidden_size *
self.num_dir,
out_features=self.stack_width)
self.encoder = nn.Embedding(input_size, hidden_size)
if self.has_stack:
rnn_input_size = hidden_size + stack_width
else:
rnn_input_size = hidden_size
if self.layer_type == 'LSTM':
self.rnn = nn.LSTM(rnn_input_size, hidden_size, n_layers,
bidirectional=self.is_bidirectional)
self.decoder = nn.Linear(hidden_size * self.num_dir, output_size)
elif self.layer_type == 'GRU':
self.rnn = nn.GRU(rnn_input_size, hidden_size, n_layers,
bidirectional=self.is_bidirectional)
self.decoder = nn.Linear(hidden_size * self.num_dir, output_size)
self.log_softmax = torch.nn.LogSoftmax(dim=1)
if self.use_cuda:
self = self.cuda()
self.criterion = nn.CrossEntropyLoss()
self.lr = lr
self.optimizer_instance = optimizer_instance
self.optimizer = self.optimizer_instance(self.parameters(), lr=lr,
weight_decay=0.00001)
def load_model(self, path):
"""
Loads pretrained parameters from the checkpoint into the model.
Parameters
----------
path: str
path to the checkpoint file model will be loaded from.
"""
weights = torch.load(path, map_location=lambda storage, loc: storage)
self.load_state_dict(weights)
def save_model(self, path):
"""
Saves model parameters into the checkpoint file.
Parameters
----------
path: str
path to the checkpoint file model will be saved to.
"""
torch.save(self.state_dict(), path)
def change_lr(self, new_lr):
"""
Updates learning rate of the optimizer.
Parameters
----------
new_lr: float
new learning rate value
"""
self.optimizer = self.optimizer_instance(self.parameters(), lr=new_lr)
self.lr = new_lr
def forward(self, inp, hidden, stack):
"""
Forward step of the model. Generates probability of the next character
given the prefix.
Parameters
----------
inp: torch.tensor
input tensor that contains prefix string indices
hidden: torch.tensor or tuple(torch.tensor, torch.tensor)
previous hidden state of the model. If layer_type is 'LSTM',
then hidden is a tuple of hidden state and cell state, otherwise
hidden is torch.tensor
stack: torch.tensor
previous state of the augmented memory stack
Returns
-------
output: torch.tensor
tensor with non-normalized probabilities of the next character
next_hidden: torch.tensor or tuple(torch.tensor, torch.tensor)
next hidden state of the model. If layer_type is 'LSTM',
then next_hidden is a tuple of hidden state and cell state,
otherwise next_hidden is torch.tensor
next_stack: torch.tensor
next state of the augmented memory stack
"""
inp = self.encoder(inp.view(1, -1))
if self.has_stack:
if self.has_cell:
hidden_ = hidden[0]
else:
hidden_ = hidden
if self.is_bidirectional:
hidden_2_stack = torch.cat((hidden_[0], hidden_[1]), dim=1)
else:
hidden_2_stack = hidden_.squeeze(0)
stack_controls = self.stack_controls_layer(hidden_2_stack)
stack_controls = F.softmax(stack_controls, dim=1)
stack_input = self.stack_input_layer(hidden_2_stack.unsqueeze(0))
stack_input = torch.tanh(stack_input)
stack = self.stack_augmentation(stack_input.permute(1, 0, 2),
stack, stack_controls)
stack_top = stack[:, 0, :].unsqueeze(0)
inp = torch.cat((inp, stack_top), dim=2)
output, next_hidden = self.rnn(inp.view(1, 1, -1), hidden)
output = self.decoder(output.view(1, -1))
return output, next_hidden, stack
def stack_augmentation(self, input_val, prev_stack, controls):
"""
Augmentation of the tensor into the stack. For more details see
https://arxiv.org/abs/1503.01007
Parameters
----------
input_val: torch.tensor
tensor to be added to stack
prev_stack: torch.tensor
previous stack state
controls: torch.tensor
predicted probabilities for each operation in the stack, i.e
PUSH, POP and NO_OP. Again, see https://arxiv.org/abs/1503.01007
Returns
-------
new_stack: torch.tensor
new stack state
"""
batch_size = prev_stack.size(0)
controls = controls.view(-1, 3, 1, 1)
zeros_at_the_bottom = torch.zeros(batch_size, 1, self.stack_width)
if self.use_cuda:
zeros_at_the_bottom = Variable(zeros_at_the_bottom.cuda())
else:
zeros_at_the_bottom = Variable(zeros_at_the_bottom)
a_push, a_pop, a_no_op = controls[:, 0], controls[:, 1], controls[:, 2]
stack_down = torch.cat((prev_stack[:, 1:], zeros_at_the_bottom), dim=1)
stack_up = torch.cat((input_val, prev_stack[:, :-1]), dim=1)
new_stack = a_no_op * prev_stack + a_push * stack_up + a_pop * stack_down
return new_stack
def init_hidden(self):
"""
Initialization of the hidden state of RNN.
Returns
-------
hidden: torch.tensor
tensor filled with zeros of an appropriate size (taking into
account number of RNN layers and directions)
"""
if self.use_cuda:
return Variable(torch.zeros(self.n_layers * self.num_dir, 1,
self.hidden_size).cuda())
else:
return Variable(torch.zeros(self.n_layers * self.num_dir, 1,
self.hidden_size))
def init_cell(self):
"""
Initialization of the cell state of LSTM. Only used when layers_type is
'LSTM'
Returns
-------
cell: torch.tensor
tensor filled with zeros of an appropriate size (taking into
account number of RNN layers and directions)
"""
if self.use_cuda:
return Variable(torch.zeros(self.n_layers * self.num_dir, 1,
self.hidden_size).cuda())
else:
return Variable(torch.zeros(self.n_layers * self.num_dir, 1,
self.hidden_size))
def init_stack(self):
"""
Initialization of the stack state. Only used when has_stack is True
Returns
-------
stack: torch.tensor
tensor filled with zeros
"""
result = torch.zeros(1, self.stack_depth, self.stack_width)
if self.use_cuda:
return Variable(result.cuda())
else:
return Variable(result)
def train_step(self, inp, target):
"""
One train step, i.e. forward-backward and parameters update, for
a single training example.
Parameters
----------
inp: torch.tensor
tokenized training string from position 0 to position (seq_len - 1)
target:
tokenized training string from position 1 to position seq_len
Returns
-------
loss: float
mean value of the loss function (averaged through the sequence
length)
"""
hidden = self.init_hidden()
if self.has_cell:
cell = self.init_cell()
hidden = (hidden, cell)
if self.has_stack:
stack = self.init_stack()
else:
stack = None
self.optimizer.zero_grad()
loss = 0
for c in range(len(inp)):
output, hidden, stack = self(inp[c], hidden, stack)
loss += self.criterion(output, target[c].unsqueeze(0))
loss.backward()
self.optimizer.step()
return loss.item() / len(inp)
def evaluate(self, data, prime_str='<', end_token='>', predict_len=100):
"""
Generates new string from the model distribution.
Parameters
----------
data: object of type GeneratorData
stores information about the generator data format such alphabet, etc
prime_str: str (default '<')
prime string that will be used as prefix. Deafult value is just the
START_TOKEN
end_token: str (default '>')
when end_token is sampled from the model distribution,
the generation of a new example is finished
predict_len: int (default 100)
maximum length of the string to be generated. If the end_token is
not sampled, the generation will be aborted when the length of the
generated sequence is equal to predict_len
Returns
-------
new_sample: str
Newly generated sample from the model distribution.
"""
hidden = self.init_hidden()
if self.has_cell:
cell = self.init_cell()
hidden = (hidden, cell)
if self.has_stack:
stack = self.init_stack()
else:
stack = None
prime_input = data.char_tensor(prime_str)
new_sample = prime_str
# Use priming string to "build up" hidden state
for p in range(len(prime_str)-1):
_, hidden, stack = self.forward(prime_input[p], hidden, stack)
inp = prime_input[-1]
for p in range(predict_len):
output, hidden, stack = self.forward(inp, hidden, stack)
# Sample from the network as a multinomial distribution
probs = torch.softmax(output, dim=1)
top_i = torch.multinomial(probs.view(-1), 1)[0].cpu().numpy()
# Add predicted character to string and use as next input
predicted_char = data.all_characters[top_i]
new_sample += predicted_char
inp = data.char_tensor(predicted_char)
if predicted_char == end_token:
break
return new_sample
def fit(self, data, n_iterations, all_losses=[], print_every=100,
plot_every=10, augment=False):
"""
This methods fits the parameters of the model. Training is performed to
minimize the cross-entropy loss when predicting the next character
given the prefix.
Parameters
----------
data: object of type GeneratorData
stores information about the generator data format such alphabet, etc
n_iterations: int
how many iterations of training will be performed
all_losses: list (default [])
list to store the values of the loss function
print_every: int (default 100)
feedback will be printed to std_out once every print_every
iterations of training
plot_every: int (default 10)
value of the loss function will be appended to all_losses once every
plot_every iterations of training
augment: bool (default False)
parameter specifying if SMILES enumeration will be used. For mode
details on SMILES enumeration see https://arxiv.org/abs/1703.07076
Returns
-------
all_losses: list
list that stores the values of the loss function (learning curve)
"""
start = time.time()
loss_avg = 0
if augment:
smiles_augmentation = SmilesEnumerator()
else:
smiles_augmentation = None
for epoch in trange(1, n_iterations + 1, desc='Training in progress...'):
inp, target = data.random_training_set(smiles_augmentation)
loss = self.train_step(inp, target)
loss_avg += loss
if epoch % print_every == 0:
print('[%s (%d %d%%) %.4f]' % (time_since(start), epoch,
epoch / n_iterations * 100, loss)
)
print(self.evaluate(data=data, prime_str = '<',
predict_len=100), '\n')
if epoch % plot_every == 0:
all_losses.append(loss_avg / plot_every)
loss_avg = 0
return all_losses
class SmilesEnumerator(object):
"""SMILES Enumerator, vectorizer and devectorizer
#Arguments
charset: string containing the characters for the vectorization
can also be generated via the .fit() method
pad: Length of the vectorization
leftpad: Add spaces to the left of the SMILES
isomericSmiles: Generate SMILES containing information about stereogenic centers
enum: Enumerate the SMILES during transform
canonical: use canonical SMILES during transform (overrides enum)
"""
def __init__(self, charset='@C)(=cOn1S2/H[N]\\', pad=120, leftpad=True, isomericSmiles=True, enum=True,
canonical=False):
self._charset = None
self.charset = charset
self.pad = pad
self.leftpad = leftpad
self.isomericSmiles = isomericSmiles
self.enumerate = enum
self.canonical = canonical
@property
def charset(self):
return self._charset
@charset.setter
def charset(self, charset):
self._charset = charset
self._charlen = len(charset)
self._char_to_int = dict((c, i) for i, c in enumerate(charset))
self._int_to_char = dict((i, c) for i, c in enumerate(charset))
def fit(self, smiles, extra_chars=[], extra_pad=5):
"""Performs extraction of the charset and length of a SMILES datasets and sets self.pad and self.charset
#Arguments
smiles: Numpy array or Pandas series containing smiles as strings
extra_chars: List of extra chars to add to the charset (e.g. "\\\\" when "/" is present)
extra_pad: Extra padding to add before or after the SMILES vectorization
"""
charset = set("".join(list(smiles)))
self.charset = "".join(charset.union(set(extra_chars)))
self.pad = max([len(smile) for smile in smiles]) + extra_pad
def randomize_smiles(self, smiles):
"""Perform a randomization of a SMILES string
must be RDKit sanitizable"""
m = Chem.MolFromSmiles(smiles)
ans = list(range(m.GetNumAtoms()))
np.random.shuffle(ans)
nm = Chem.RenumberAtoms(m, ans)
return Chem.MolToSmiles(nm, canonical=self.canonical, isomericSmiles=self.isomericSmiles)
def transform(self, smiles):
"""Perform an enumeration (randomization) and vectorization of a Numpy array of smiles strings
#Arguments
smiles: Numpy array or Pandas series containing smiles as strings
"""
one_hot = np.zeros((smiles.shape[0], self.pad, self._charlen), dtype=np.int8)
for i, ss in enumerate(smiles):
if self.enumerate: ss = self.randomize_smiles(ss)
for j, c in enumerate(ss):
one_hot[i, j, self._char_to_int[c]] = 1
return one_hot
def reverse_transform(self, vect):
""" Performs a conversion of a vectorized SMILES to a smiles strings
charset must be the same as used for vectorization.
#Arguments
vect: Numpy array of vectorized SMILES.
"""
smiles = []
for v in vect:
# mask v
v = v[v.sum(axis=1) == 1]
# Find one hot encoded index with argmax, translate to char and join to string
smile = "".join(self._int_to_char[i] for i in v.argmax(axis=1))
smiles.append(smile)
return np.array(smiles)
def cross_validation_split(x, y, n_folds=5, split='random', folds=None):
assert(len(x) == len(y))
x = np.array(x)
y = np.array(y)
if split not in ['random', 'stratified', 'fixed']:
raise ValueError('Invalid value for argument \'split\': '
'must be either \'random\', \'stratified\' '
'or \'fixed\'')
if split == 'random':
cv_split = KFold(n_splits=n_folds, shuffle=True)
folds = list(cv_split.split(x, y))
elif split == 'stratified':
cv_split = StratifiedKFold(n_splits=n_folds, shuffle=True)
folds = list(cv_split.split(x, y))
elif split == 'fixed' and folds is None:
raise TypeError(
'Invalid type for argument \'folds\': found None, but must be list')
cross_val_data = []
cross_val_labels = []
if len(folds) == n_folds:
for fold in folds:
cross_val_data.append(x[fold[1]])
cross_val_labels.append(y[fold[1]])
elif len(folds) == len(x) and np.max(folds) == n_folds:
for f in range(n_folds):
left = np.where(folds == f)[0].min()
right = np.where(folds == f)[0].max()
cross_val_data.append(x[left:right + 1])
cross_val_labels.append(y[left:right + 1])
return cross_val_data, cross_val_labels
class PredictorData(object):
def __init__(self, path, delimiter=',', cols=[0, 1], get_features=None,
has_label=True, labels_start=1, **kwargs):
super(PredictorData, self).__init__()
data = read_object_property_file(path, delimiter, cols_to_read=cols)
if has_label:
self.objects = np.array(data[:labels_start]).reshape(-1)
self.y = np.array(data[labels_start:], dtype='float32')
self.y = self.y.reshape(-1, len(cols) - labels_start)
if self.y.shape[1] == 1:
self.y = self.y.reshape(-1)
else:
self.objects = np.array(data[:labels_start]).reshape(-1)
self.y = [None]*len(self.objects)
assert len(self.objects) == len(self.y)
if get_features is not None:
self.x, processed_indices, invalid_indices = \
get_features(self.objects, **kwargs)
self.invalid_objects = self.objects[invalid_indices]
self.objects = self.objects[processed_indices]
self.invalid_y = self.y[invalid_indices]
self.y = self.y[processed_indices]
else:
self.x = self.objects
self.invalid_objects = None
self.invalid_y = None
self.binary_y = None
def binarize(self, threshold):
self.binary_y = np.array(self.y >= threshold, dtype='int32')
class GeneratorData(object):
def __init__(self, training_data_path, tokens=None, start_token='<',
end_token='>', max_len=120, use_cuda=None, **kwargs):
super(GeneratorData, self).__init__()
if 'cols_to_read' not in kwargs:
kwargs['cols_to_read'] = []
data = read_object_property_file(training_data_path,
**kwargs)
self.start_token = start_token
self.end_token = end_token
self.file = []
for i in range(len(data)):
if len(data[i]) <= max_len:
self.file.append(self.start_token + data[i] + self.end_token)
self.file_len = len(self.file)
self.all_characters, self.char2idx, \
self.n_characters = tokenize(self.file, tokens)
self.use_cuda = use_cuda
if self.use_cuda is None:
self.use_cuda = torch.cuda.is_available()
def load_dictionary(self, tokens, char2idx):
self.all_characters = tokens
self.char2idx = char2idx
self.n_characters = len(tokens)
def random_chunk(self):
index = random.randint(0, self.file_len-1)
return self.file[index]
def char_tensor(self, string):
tensor = torch.zeros(len(string)).long()
for c in range(len(string)):
tensor[c] = self.all_characters.index(string[c])
if self.use_cuda:
return torch.tensor(tensor).cuda()
else:
return torch.tensor(tensor)
def random_training_set(self, smiles_augmentation):
chunk = self.random_chunk()
if smiles_augmentation is not None:
chunk = '<' + smiles_augmentation.randomize_smiles(chunk[1:-1]) + '>'
inp = self.char_tensor(chunk[:-1])
target = self.char_tensor(chunk[1:])
return inp, target
def read_sdf_file(self, path, fields_to_read):
raise NotImplementedError
def update_data(self, path):
self.file, success = read_smi_file(path, unique=True)
self.file_len = len(self.file)
assert success
def read_object_property_file(path, delimiter=',', cols_to_read=[0, 1],
keep_header=False):
f = open(path, 'r')
reader = csv.reader(f, delimiter=delimiter)
data_full = np.array(list(reader))
if keep_header:
start_position = 0
else:
start_position = 1
assert len(data_full) > start_position
data = [[] for _ in range(len(cols_to_read))]
for i in range(len(cols_to_read)):
col = cols_to_read[i]
data[i] = data_full[start_position:, col]
f.close()
if len(cols_to_read) == 1:
data = data[0]
return data
def estimate_and_update(generator, predictor, n_to_generate, **kwargs):
generated = []
pbar = tqdm(range(n_to_generate))
for i in pbar:
pbar.set_description("Generating molecules...")
generated.append(generator.evaluate(gen_data, predict_len=120)[1:-1])
sanitized = canonical_smiles(generated, sanitize=False, throw_warning=False)[:-1]
unique_smiles = list(np.unique(sanitized))[1:]
smiles, prediction, nan_smiles = predictor.predict(unique_smiles, get_features=get_fp)
return smiles, prediction
def get_fp(smiles):
fp = []
processed_indices = []
invalid_indices = []
for i in range(len(smiles)):
mol = smiles[i]
tmp = np.array(mol2image(mol, n=2048))
if np.isnan(tmp[0]):
invalid_indices.append(i)
else:
fp.append(tmp)
processed_indices.append(i)
return np.array(fp), processed_indices, invalid_indices
def mol2image(x, n=2048):
try:
m = Chem.MolFromSmiles(x)
fp = Chem.RDKFingerprint(m, maxPath=4, fpSize=n)
res = np.zeros(len(fp))
DataStructs.ConvertToNumpyArray(fp, res)
return res
except:
return [np.nan]
def init():
global use_cuda
global my_generator
global gen_data
global my_predictor
hidden_size = 1500
stack_width = 1500
stack_depth = 200
layer_type = 'GRU'
n_characters = 45
lr = 0.001
optimizer_instance = torch.optim.Adadelta
# model_path = os.path.join(os.getenv('AZUREML_MODEL_DIR'), './deploy_files')
use_cuda = False
# gen_data_path = model_path +'/6000smiles.csv'
gen_data_path = './deploy_files/6000smiles.csv'
tokens = ['<', '>', '#', '%', ')', '(', '+', '-', '/', '.', '1', '0', '3', '2', '5', '4', '7',
'6', '9', '8', '=', 'A', '@', 'C', 'B', 'F', 'I', 'H', 'O', 'N', 'P', 'S', '[', ']',
'\\', 'c', 'e', 'i', 'l', 'o', 'n', 'p', 's', 'r', '\n']
gen_data = GeneratorData(training_data_path=gen_data_path, delimiter='\t',
cols_to_read=[0], keep_header=True, tokens=tokens)
my_generator = StackAugmentedRNN(input_size=gen_data.n_characters, hidden_size=hidden_size,
output_size=gen_data.n_characters, layer_type=layer_type,
n_layers=1, is_bidirectional=False, has_stack=True,
stack_width=stack_width, stack_depth=stack_depth,
use_cuda=use_cuda,
optimizer_instance=optimizer_instance, lr=lr)
# gen_model_path = model_path +'/generative_model_max.pth'
gen_model_path = "./deploy_files/generative_model_max.pth"
my_generator.load_model(gen_model_path)
pred_data = PredictorData(path='./deploy_files/jak2_data.csv', get_features=get_fp)
#my_predict = predictor model
model_instance = RFR
model_params = {'n_estimators': 175, 'n_jobs': 10}
my_predictor = VanillaQSAR(model_instance=model_instance,
model_params=model_params,
model_type='regressor')
my_predictor.fit_model(pred_data, cv_split='random')
my_predictor.save_model('./deploy_files/vanillaqsar')
my_predictor.load_model('./deploy_files/vanillaqsar.joblib')
# @input_schema('n_to_generate', NumpyParameterType(input_sample))
# @output_schema(NumpyParameterType(output_sample))
def run(n_to_generate):
try:
smiles, pic50 = estimate_and_update(my_generator,my_predictor,n_to_generate=n_to_generate)
molecules = [Chem.MolFromSmiles(x) for x in smiles]
qed_list = []
for x in molecules:
try:
qed_list.append(QED.qed(x))
except Exception as e:
print("early error")
print(e)
# pass
# return smiles_biased_max.tolist()
return smiles.tolist(), pic50.tolist(), qed_list
except Exception as e:
print("oof error time")
error = str(e)
return error
if __name__ == "__main__":
init()
print(run(5)) |
py | 1a473d459cced168a905250dc9e2f5981f3c8d40 | # Generated by Django 4.0.3 on 2022-03-23 17:04
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('product_name', models.CharField(max_length=50)),
('description', models.CharField(max_length=250)),
('quantity', models.IntegerField()),
('price', models.IntegerField()),
('info', models.CharField(blank=True, max_length=500, null=True)),
('pimage', models.ImageField(blank=True, null=True, upload_to='images')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'Product',
},
),
]
|
py | 1a473d7a5c933b64ee5342b471f3f0bc2d1005ec | # Generated by Django 2.2.6 on 2020-12-13 22:45
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('posts', '0019_auto_20201211_1903'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='created',
field=models.DateTimeField(auto_now_add=True, help_text='Дата публикации', verbose_name='Дата публикации'),
),
migrations.AlterField(
model_name='comment',
name='post',
field=models.ForeignKey(help_text='Пост', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='posts.Post', verbose_name='Пост'),
),
migrations.AlterField(
model_name='follow',
name='author',
field=models.ForeignKey(help_text='Подписаться на автора', on_delete=django.db.models.deletion.CASCADE, related_name='following', to=settings.AUTH_USER_MODEL, verbose_name='Автор'),
),
]
|
py | 1a473df1170045e015390f2394c516f27d3d40b5 | import json
d = json.load(open('EVAL_OUTPUT'))
print('easy', d['total_scores']['easy']['exact'])
print('medium', d['total_scores']['medium']['exact'])
print('hard', d['total_scores']['hard']['exact'])
print('extra', d['total_scores']['extra']['exact'])
print('all', d['total_scores']['all']['exact']) # should be ~0.42
print()
# print(d['total_scores']['all'].keys())
# print(d['total_scores']['easy']) |
py | 1a473e8abbd46b5d63525e206ef0af6a2685197b | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from blueapps.account import models
from data_migration.account.mixins import UserCompatibleMixin, UserManagerMixin
def patch():
models.User.__bases__ = (
models.AbstractBaseUser,
models.PermissionsMixin,
UserCompatibleMixin
)
models.UserManager.__bases__ = (
UserManagerMixin,
models.BaseUserManager,
)
|
py | 1a473fde18ad7a28aa5ce9744336b2666b90cd5d | # (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
__version__ = "3.0.0"
|
py | 1a474010817f06696dabcd835ab8ada14e599955 | from app.announces.models import Announce
from tests.equipments.fakes import equipment1, equipment2, equipment3, get_equipment
from tests.shops.fakes import shop1, shop2, shop3, get_shop
shop1_equipment1_announce1 = Announce(1, shop1.id, shop1.name, equipment1.id, equipment1.name,
'New', 199.99)
shop1_equipment2_announce1 = Announce(2, shop1.id, shop1.name, equipment2.id, equipment2.name,
'Used', 149.99)
shop2_equipment2_announce1 = Announce(3, shop2.id, shop2.name, equipment2.id, equipment2.name,
'New', 400.00)
shop2_equipment2_announce2 = Announce(4, shop2.id, shop2.name, equipment2.id, equipment2.name,
'Needs repair',
300.00)
shop3_equipment1_announce1 = Announce(5, shop3.id, shop3.name, equipment1.id, equipment1.name,
'Used', 49.99)
shop3_equipment3_announce1 = Announce(6, shop3.id, shop3.name, equipment3.id, equipment3.name,
'Used', 99.99)
shop1.announces = [shop1_equipment1_announce1, shop1_equipment2_announce1]
shop2.announces = [shop2_equipment2_announce1, shop2_equipment2_announce2]
shop3.announces = [shop3_equipment1_announce1, shop3_equipment3_announce1]
equipment1.announces = [shop1_equipment1_announce1, shop3_equipment1_announce1]
equipment2.announces = [shop1_equipment2_announce1, shop2_equipment2_announce1,
shop2_equipment2_announce2]
equipment3.announces = [shop3_equipment3_announce1]
def get_announces_for_shop(shop_id):
return get_shop(shop_id).announces
def get_announces_for_equipment(equipment_id):
return get_equipment(equipment_id).announces
|
py | 1a47403bc60c7fc255b1b7da02c649975a9d49c3 | #!/usr/bin/env python
from time import sleep, ctime
def loop0():
print('start loop 0 at:', ctime())
sleep(4)
print('loop 0 done at:', ctime())
def loop1():
print('start loop 1 at:', ctime())
sleep(2)
print('loop 1 done at:', ctime())
def main():
print('starting at:', ctime())
loop0()
loop1()
print('all DONE at:', ctime())
if __name__ == '__main__':
main() |
py | 1a4740ec95c49ee04c754a38eb05424af26e1c53 | import os
import secrets
from PIL import Image
from flask import render_template, url_for, flash, redirect, request, abort
from flaskblog import app, db, bcrypt
from flaskblog.forms import RegistrationForm, LoginForm, UpdateAccountForm, PostForm, CommentForm
from flaskblog.models import User, Post, Comment
from flask_login import login_user, current_user, logout_user, login_required
from flaskblog import request
@app.route("/")
@app.route("/home")
def home():
#page = request.args.get('page', 1, type=int)
quote = request.get_quote()
posts = Post.query.order_by(Post.date_posted.desc()).paginate( per_page=5)
return render_template('home.html', posts=posts, quote=quote)
@app.route("/register", methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = RegistrationForm()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user = User(username=form.username.data, email=form.email.data, password=hashed_password)
db.session.add(user)
db.session.commit()
flash('Your account has been created! You are now able to log in', 'success')
return redirect(url_for('login'))
return render_template('register.html', title='Register', form=form)
@app.route("/login", methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
login_user(user, remember=form.remember.data)
# next_page = request.args.get('next')
return redirect(url_for('home'))
else:
flash('Login Unsuccessful. Please check email and password', 'danger')
return render_template('login.html', title='Login', form=form)
@app.route("/logout")
def logout():
logout_user()
return redirect(url_for('home'))
def save_picture(form_picture):
random_hex = secrets.token_hex(8)
_, f_ext = os.path.splitext(form_picture.filename)
picture_fn = random_hex + f_ext
picture_path = os.path.join(app.root_path, 'static/dp', picture_fn)
output_size = (125, 125)
i = Image.open(form_picture)
i.thumbnail(output_size)
i.save(picture_path)
return picture_fn
@app.route("/account", methods=['GET', 'POST'])
@login_required
def account():
form = UpdateAccountForm()
if form.validate_on_submit():
if form.picture.data:
picture_file = save_picture(form.picture.data)
current_user.image_file = picture_file
current_user.username = form.username.data
current_user.email = form.email.data
db.session.commit()
flash('Your account has been updated!', 'success')
return redirect(url_for('account'))
elif request.method == 'GET':
form.username.data = current_user.username
form.email.data = current_user.email
image_file = url_for('static', filename='dp/' + current_user.image_file)
return render_template('account.html', title='Account',
image_file=image_file, form=form)
@app.route("/post/new", methods=['GET', 'POST'])
@login_required
def new_post():
form = PostForm()
if form.validate_on_submit():
post = Post(title=form.title.data, content=form.content.data, author=current_user)
db.session.add(post)
db.session.commit()
flash('Your post has been created!', 'success')
return redirect(url_for('home'))
return render_template('create_post.html', title='New Post',
form=form, legend='New Post')
@app.route("/post/<int:post_id>", methods=['GET', 'POST'])
def post(post_id):
post = Post.query.get_or_404(post_id)
comments = Comment.query.all()
form = CommentForm()
if form.validate_on_submit():
comment = Comment(content=form.content.data, author=current_user)
db.session.add(comment)
db.session.commit()
flash('Your comment has been created!', 'success')
return redirect(url_for('home'))
return render_template('post.html', post=post, form=form, comments=comments)
@app.route("/post/<int:post_id>/update", methods=['GET', 'POST'])
@login_required
def update_post(post_id):
post = Post.query.get_or_404(post_id)
if post.author != current_user:
abort(403)
form = PostForm()
if form.validate_on_submit():
post.title = form.title.data
post.content = form.content.data
db.session.commit()
flash('Your post has been updated!', 'success')
return redirect(url_for('post', post_id=post.id))
elif request.method == 'GET':
form.title.data = post.title
form.content.data = post.content
return render_template('create_post.html', title='Update Post',
form=form, legend='Update Post')
@app.route("/post/<int:post_id>/delete", methods=['POST'])
@login_required
def delete_post(post_id):
post = Post.query.get_or_404(post_id)
if post.author != current_user:
abort(403)
db.session.delete(post)
db.session.commit()
flash('Your post has been deleted!', 'success')
return redirect(url_for('home'))
@app.route("/user/<string:username>")
def user_posts(username):
page = request.args.get('page', 1, type=int)
user = User.query.filter_by(username=username).first_or_404()
posts = Post.query.filter_by(author=user)\
.order_by(Post.date_posted.desc())\
.paginate(page=page, per_page=5)
return render_template('user_posts.html', posts=posts, user=user)
|
py | 1a4740f1cbda4d30dbebc31c7767045899fffffd | import discord
# Imports permissions from discord.commands
from discord.commands import permissions
bot = discord.Bot()
# Note: If you want you can use commands.Bot instead of discord.Bot
# Use discord.Bot if you don't want prefixed message commands
# With discord.Bot you can use @bot.command as an alias
# of @bot.slash_command but this is overridden by commands.Bot
# by default, default_permission is set to True, you can use
# default_permission=False to disable the command for everyone.
# You can add up to 10 permissions per Command for a guild.
# You can either use the following decorators:
# --------------------------------------------
# @permissions.permission(role_id/user_id, permission)
# @permissions.has_role("ROLE_NAME") <-- can use either a name or id
# @permissions.has_any_role("ROLE_NAME", "ROLE_NAME_2") <-- can use either a name or id
# @permissions.is_user(USER_ID) <-- id only
# @permissions.is_owner()
# Note: you can supply "guild_id" to limit it to 1 guild.
# Ex: @permissions.has_role("Admin", guild_id=GUILD_ID)
# --------------------------------------------
# or supply permissions directly in @bot.slash_command
# @bot.slash_command(default_permission=False,
# permissions=[permissions.Permission(id=ID, type=TYPE, permission=True, guild_id=GUILD_ID)])
# Note: Please replace token, GUILD_ID, USER_ID and ROLE_NAME.
# Guild Slash Command Example with User Permissions
@bot.slash_command(guild_ids=[GUILD_ID], default_permission=False)
@permissions.is_user(USER_ID)
async def user(ctx):
"""Say hello to the author""" # the command description can be supplied as the docstring
await ctx.respond(f"Hello {ctx.author}!")
# Guild Slash Command Example with Owner Permissions
@bot.slash_command(guild_ids=[GUILD_ID], default_permission=False)
@permissions.is_owner()
async def owner(ctx):
"""Say hello to the author""" # the command description can be supplied as the docstring
await ctx.respond(f"Hello {ctx.author}!")
# Guild Slash Command Example with Role Permissions
@bot.slash_command(guild_ids=[GUILD_ID], default_permission=False)
@permissions.has_role("ROLE_NAME")
async def role(ctx):
"""Say hello to the author""" # the command description can be supplied as the docstring
await ctx.respond(f"Hello {ctx.author}!")
# Guild Slash Command Example with Any Specified Role Permissions
@bot.slash_command(guild_ids=[GUILD_ID], default_permission=False)
@permissions.has_any_role("ROLE_NAME", "ROLE_NAME2")
async def multirole(ctx):
"""Say hello to the author""" # the command description can be supplied as the docstring
await ctx.respond(f"Hello {ctx.author}!")
# Guild Slash Command Example with Permission Decorator
@bot.slash_command(guild_ids=[GUILD_ID], default_permission=False)
@permissions.permission(user_id=USER_ID, permission=True)
async def permission_decorator(ctx):
"""Say hello to the author""" # the command description can be supplied as the docstring
await ctx.respond(f"Hello {ctx.author}!")
# Guild Slash Command Example with Permissions Kwarg
@bot.slash_command(
guild_ids=[GUILD_ID],
default_permission=False,
permissions=[permissions.Permission(id=USER_ID, type=2, permission=True)],
)
async def permission_kwarg(ctx):
"""Say hello to the author""" # the command description can be supplied as the docstring
await ctx.respond(f"Hello {ctx.author}!")
# To learn how to add descriptions, choices to options check slash_options.py
bot.run("token")
|
py | 1a47412058e858bbe90d5320549799a3718c2d8d | # Copyright 2019 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import os
import pytest
from kubernetes import client
from kfserving import KFServingClient
from kfserving import constants
from kfserving import V1alpha2EndpointSpec
from kfserving import V1alpha2PredictorSpec
from kfserving import V1alpha2PyTorchSpec
from kfserving import V1alpha2InferenceServiceSpec
from kfserving import V1alpha2InferenceService
from kubernetes.client import V1ResourceRequirements
from ..common.utils import predict
from ..common.utils import KFSERVING_TEST_NAMESPACE
api_version = constants.KFSERVING_GROUP + '/' + constants.KFSERVING_VERSION
KFServing = KFServingClient(config_file=os.environ.get("KUBECONFIG", "~/.kube/config"))
def test_pytorch():
service_name = 'isvc-pytorch'
default_endpoint_spec = V1alpha2EndpointSpec(
predictor=V1alpha2PredictorSpec(
min_replicas=1,
parallelism=1,
pytorch=V1alpha2PyTorchSpec(
storage_uri='gs://kfserving-samples/models/pytorch/cifar10',
model_class_name="Net",
resources=V1ResourceRequirements(
requests={'cpu': '100m', 'memory': '2Gi'},
limits={'cpu': '100m', 'memory': '2Gi'}))))
isvc = V1alpha2InferenceService(api_version=api_version,
kind=constants.KFSERVING_KIND,
metadata=client.V1ObjectMeta(
name=service_name, namespace=KFSERVING_TEST_NAMESPACE),
spec=V1alpha2InferenceServiceSpec(default=default_endpoint_spec))
KFServing.create(isvc)
try:
KFServing.wait_isvc_ready(service_name, namespace=KFSERVING_TEST_NAMESPACE)
except RuntimeError as e:
print(KFServing.api_instance.get_namespaced_custom_object("serving.knative.dev", "v1", KFSERVING_TEST_NAMESPACE,
"services", service_name + "-predictor-default"))
pods = KFServing.core_api.list_namespaced_pod(KFSERVING_TEST_NAMESPACE,
label_selector='serving.kubeflow.org/inferenceservice={}'.
format(service_name))
for pod in pods.items:
print(pod)
raise e
res = predict(service_name, './data/cifar_input.json')
assert(np.argmax(res["predictions"]) == 3)
KFServing.delete(service_name, KFSERVING_TEST_NAMESPACE)
|
py | 1a4741480d4540e6341ae68df8a26bec1fc20da3 | __title__ = "PyMatting"
__version__ = "1.0.6"
__author__ = "The PyMatting Developers"
__email__ = "[email protected]"
__license__ = "MIT"
__uri__ = "https://pymatting.github.io"
__summary__ = "Python package for alpha matting."
|
py | 1a4741aeaa182b55c6468561b2c9f7356a4a74e7 | from django.forms.widgets import CheckboxSelectMultiple
class BootstrapedCheckboxSelectMultiple(CheckboxSelectMultiple):
template_name = 'imoveis/forms/widgets/multiple_input.html'
option_template_name = 'imoveis/forms/widgets/input_option.html'
|
py | 1a4741c458d0892a4ec242cd07c5f0445af1a48a | #!/usr/bin/env python3
'''
Check:
-Individual files are valid
-No overlap between any tile
TODO:
Can we use prjxray?
Relies on 074, which is too far into the process
'''
from prjxray import util
from prjxray import db as prjxraydb
import os
import parsedb
#from prjxray import db as prjxraydb
import glob
def gen_tile_bits(tile_segbits, tile_bits):
'''
For given tile and corresponding db_file structure yield
(absolute address, absolute FDRI bit offset, tag)
For each tag bit in the corresponding block_type entry, calculate absolute address and bit offsets
'''
for block_type in tile_segbits:
assert block_type in tile_bits, "block type %s is not present in current tile" % block_type
block = tile_bits[block_type]
baseaddr = block.base_address
bitbase = 32 * block.offset
frames = block.frames
for tag in tile_segbits[block_type]:
for bit in tile_segbits[block_type][tag]:
# 31_06
word_column = bit.word_column
word_bit = bit.word_bit
assert word_column <= frames, "ERROR: bit out of bound --> tag: %s; word_column = %s; frames = %s" % (
tag, word_column, frames)
yield word_column + baseaddr, word_bit + bitbase, tag
def make_tile_mask(tile_segbits, tile_name, tile_bits):
'''
Return dict
key: (address, bit index)
val: sample description of where it came from (there may be multiple, only one)
'''
# FIXME: fix mask files https://github.com/SymbiFlow/prjxray/issues/301
# in the meantime build them on the fly
# We may want this to build them anyway
ret = dict()
for absaddr, bitaddr, tag in gen_tile_bits(tile_segbits, tile_bits):
name = "%s.%s" % (tile_name, tag)
ret.setdefault((absaddr, bitaddr), name)
return ret
def parsedb_all(db_root, verbose=False):
'''Verify .db files are individually valid'''
files = 0
for bit_fn in glob.glob('%s/segbits_*.db' % db_root):
verbose and print("Checking %s" % bit_fn)
parsedb.run(bit_fn, fnout=None, strict=True, verbose=verbose)
files += 1
print("segbits_*.db: %d okay" % files)
files = 0
for bit_fn in glob.glob('%s/mask_*.db' % db_root):
verbose and print("Checking %s" % bit_fn)
parsedb.run(bit_fn, fnout=None, strict=True, verbose=verbose)
files += 1
print("mask_*.db: %d okay" % files)
def check_tile_overlap(db, verbose=False):
'''
Verifies that no two tiles use the same bit
Assume .db files are individually valid
Create a mask for all the bits the tile type uses
For each tile, create bitmasks over the entire bitstream for current part
Throw an exception if two tiles share an address
'''
mall = dict()
tiles_type_done = dict()
tile_segbits = dict()
grid = db.grid()
tiles_checked = 0
for tile_name in grid.tiles():
tile_info = grid.gridinfo_at_tilename(tile_name)
tile_type = tile_info.tile_type
tile_bits = tile_info.bits
if tile_type not in tiles_type_done:
segbits = db.get_tile_segbits(tile_type).segbits
tile_segbits[tile_type] = segbits
# If segbits has zero length the tile_type is marked True in order to be skipped
if len(segbits) == 0:
tiles_type_done[tile_type] = True
else:
tiles_type_done[tile_type] = False
if tiles_type_done[tile_type]:
continue
mtile = make_tile_mask(tile_segbits[tile_type], tile_name, tile_bits)
verbose and print(
"Checking %s, type %s, bits: %s" %
(tile_name, tile_type, len(mtile)))
if len(mtile) == 0:
continue
collisions = set()
for bits in mtile.keys():
if bits in mall.keys():
collisions.add(bits)
if collisions:
print("ERROR: %s collisions" % len(collisions))
for ck in sorted(collisions):
addr, bitaddr = ck
word, bit = util.addr_bit2word(bitaddr)
print(
" %s: had %s, got %s" %
(util.addr2str(addr, word, bit), mall[ck], mtile[ck]))
raise ValueError("%s collisions" % len(collisions))
mall.update(mtile)
tiles_checked += 1
print("Checked %s tiles, %s bits" % (tiles_checked, len(mall)))
def run(db_root, verbose=False):
# Start by running a basic check on db files
print("Checking individual .db...")
parsedb_all(db_root, verbose=verbose)
# Now load and verify tile consistency
db = prjxraydb.Database(db_root)
db._read_tilegrid()
'''
these don't load properly without .json files
See: https://github.com/SymbiFlow/prjxray/issues/303
db._read_tile_types()
print(db.tile_types.keys())
'''
verbose and print("")
print("Checking aggregate dir...")
check_tile_overlap(db, verbose=verbose)
def main():
import argparse
parser = argparse.ArgumentParser(
description="Parse a db repository, checking for consistency")
util.db_root_arg(parser)
parser.add_argument('--verbose', action='store_true', help='')
args = parser.parse_args()
run(args.db_root, verbose=args.verbose)
if __name__ == '__main__':
main()
|
py | 1a474256206e0575720efe421fa3ec2d3a891ec3 | import tensorflow as tf
import tensorflow_datasets as tfds
import matplotlib.pyplot as plt
import numpy as np
from model.unet import UNet
# download the dataset and get info
dataset, info = tfds.load('oxford_iiit_pet:3.*.*', with_info=True)
# see the possible keys we can access in the dataset dict.
# this contains the test and train splits.
print(dataset.keys())
# see information about the dataset
print(info)
# Preprocessing Utilities
def random_flip(input_image, input_mask):
"""does a random flip of the image and mask"""
if tf.random.uniform(()) > 0.5:
input_image = tf.image.flip_left_right(input_image)
input_mask = tf.image.flip_left_right(input_mask)
return input_image, input_mask
def normalize(input_image, input_mask):
"""
normalizes the input image pixel values to be from [0,1].
subtracts 1 from the mask labels to have a range from [0,2]
"""
input_image = tf.cast(input_image, tf.float32) / 255.0
input_mask -= 1
return input_image, input_mask
@tf.function
def load_image_train(datapoint):
"""resizes, normalizes, and flips the training data"""
input_image = tf.image.resize(datapoint["image"], (128, 128), method="nearest")
input_mask = tf.image.resize(
datapoint["segmentation_mask"], (128, 128), method="nearest"
)
input_image, input_mask = random_flip(input_image, input_mask)
input_image, input_mask = normalize(input_image, input_mask)
return input_image, input_mask
def load_image_test(datapoint):
"""resizes and normalizes the test data"""
input_image = tf.image.resize(datapoint["image"], (128, 128), method="nearest")
input_mask = tf.image.resize(
datapoint["segmentation_mask"], (128, 128), method="nearest"
)
input_image, input_mask = normalize(input_image, input_mask)
return input_image, input_mask
# preprocess the train and test sets
train = dataset["train"].map(
load_image_train, num_parallel_calls=tf.data.experimental.AUTOTUNE
)
test = dataset["test"].map(load_image_test)
BATCH_SIZE = 64
BUFFER_SIZE = 1000
# shuffle and group the train set into batches
train_dataset = train.cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat()
# do a prefetch to optimize processing
train_dataset = train_dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
# group the test set into batches
test_dataset = test.batch(BATCH_SIZE)
# class list of the mask pixels
class_names = ["pet", "background", "outline"]
def display_with_metrics(display_list, iou_list, dice_score_list):
"""displays a list of images/masks and overlays a list of IOU and Dice Scores"""
metrics_by_id = [
(idx, iou, dice_score)
for idx, (iou, dice_score) in enumerate(zip(iou_list, dice_score_list))
if iou > 0.0
]
metrics_by_id.sort(key=lambda tup: tup[1], reverse=True) # sorts in place
display_string_list = [
"{}: IOU: {} Dice Score: {}".format(class_names[idx], iou, dice_score)
for idx, iou, dice_score in metrics_by_id
]
display_string = "\n\n".join(display_string_list)
display(
display_list,
["Image", "Predicted Mask", "True Mask"],
display_string=display_string,
)
def display(display_list, titles=[], display_string=None):
"""displays a list of images/masks"""
plt.figure(figsize=(15, 15))
for i in range(len(display_list)):
plt.subplot(1, len(display_list), i + 1)
plt.title(titles[i])
plt.xticks([])
plt.yticks([])
if display_string and i == 1:
plt.xlabel(display_string, fontsize=12)
img_arr = tf.keras.preprocessing.image.array_to_img(display_list[i])
plt.imshow(img_arr)
plt.show()
def show_image_from_dataset(dataset):
"""displays the first image and its mask from a dataset"""
for image, mask in dataset.take(1):
sample_image, sample_mask = image, mask
display([sample_image, sample_mask], titles=["Image", "True Mask"])
def plot_metrics(metric_name, title, ylim=5):
"""plots a given metric from the model history"""
plt.title(title)
plt.ylim(0, ylim)
plt.plot(model_history.history[metric_name], color="blue", label=metric_name)
plt.plot(
model_history.history["val_" + metric_name],
color="green",
label="val_" + metric_name,
)
input_shape = (128, 128, 3, None)
model = UNet()
model.build(input_shape)
model.summary()
breakpoint()
# configure the optimizer, loss and metrics for training
model.compile(
optimizer=tf.keras.optimizers.Adam(),
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
# configure the training parameters and train the model
TRAIN_LENGTH = info.splits["train"].num_examples
EPOCHS = 10
VAL_SUBSPLITS = 5
STEPS_PER_EPOCH = TRAIN_LENGTH // BATCH_SIZE
VALIDATION_STEPS = info.splits["test"].num_examples // BATCH_SIZE // VAL_SUBSPLITS
# this will take around 20 minutes to run
model_history = model.fit(
train_dataset,
epochs=EPOCHS,
steps_per_epoch=STEPS_PER_EPOCH,
validation_steps=VALIDATION_STEPS,
validation_data=test_dataset,
)
# Prediction Utilities
def get_test_image_and_annotation_arrays():
"""
Unpacks the test dataset and returns the input images and segmentation masks
"""
ds = test_dataset.unbatch()
ds = ds.batch(info.splits["test"].num_examples)
images = []
y_true_segments = []
for image, annotation in ds.take(1):
y_true_segments = annotation.numpy()
images = image.numpy()
y_true_segments = y_true_segments[
: (
info.splits["test"].num_examples
- (info.splits["test"].num_examples % BATCH_SIZE)
)
]
return (
images[
: (
info.splits["test"].num_examples
- (info.splits["test"].num_examples % BATCH_SIZE)
)
],
y_true_segments,
)
def create_mask(pred_mask):
"""
Creates the segmentation mask by getting the channel with the highest probability. Remember that we
have 3 channels in the output of the UNet. For each pixel, the predicition will be the channel with the
highest probability.
"""
pred_mask = tf.argmax(pred_mask, axis=-1)
pred_mask = pred_mask[..., tf.newaxis]
return pred_mask[0].numpy()
def make_predictions(image, mask, num=1):
"""
Feeds an image to a model and returns the predicted mask.
"""
image = np.reshape(image, (1, image.shape[0], image.shape[1], image.shape[2]))
pred_mask = model.predict(image)
pred_mask = create_mask(pred_mask)
return pred_mask
def class_wise_metrics(y_true, y_pred):
class_wise_iou = []
class_wise_dice_score = []
smoothening_factor = 0.00001
for i in range(3):
intersection = np.sum((y_pred == i) * (y_true == i))
y_true_area = np.sum((y_true == i))
y_pred_area = np.sum((y_pred == i))
combined_area = y_true_area + y_pred_area
iou = (intersection + smoothening_factor) / (
combined_area - intersection + smoothening_factor
)
class_wise_iou.append(iou)
dice_score = 2 * (
(intersection + smoothening_factor) / (combined_area + smoothening_factor)
)
class_wise_dice_score.append(dice_score)
return class_wise_iou, class_wise_dice_score
# Setup the ground truth and predictions.
# get the ground truth from the test set
y_true_images, y_true_segments = get_test_image_and_annotation_arrays()
# feed the test set to th emodel to get the predicted masks
results = model.predict(
test_dataset, steps=info.splits["test"].num_examples // BATCH_SIZE
)
results = np.argmax(results, axis=3)
results = results[..., tf.newaxis]
# compute the class wise metrics
cls_wise_iou, cls_wise_dice_score = class_wise_metrics(y_true_segments, results)
# show the IOU for each class
for idx, iou in enumerate(cls_wise_iou):
spaces = " " * (10 - len(class_names[idx]) + 2)
print("{}{}{} ".format(class_names[idx], spaces, iou))
# show the Dice Score for each class
for idx, dice_score in enumerate(cls_wise_dice_score):
spaces = " " * (10 - len(class_names[idx]) + 2)
print("{}{}{} ".format(class_names[idx], spaces, dice_score))
|
py | 1a4744d411dbaa1480fd26e4e990bb2887b8a8a9 | """
This problem was asked by LinkedIn.
Given a list of points, a central point, and an integer k, find the nearest k points from the central point.
For example, given the list of points [(0, 0), (5, 4), (3, 1)], the central point (1, 2), and k = 2,
return [(0, 0), (3, 1)].
"""
def k_nearest(points, central, k):
squared_l2_norm = lambda p1, p2 : (p1[0]-p2[0])**2 + (p1[1]-p2[1])**2
distances = {p: squared_l2_norm(p, central) for p in points}
# return the k nearest points after sorting based on distances of points
return [p for p,_ in sorted(distances.items(), key=lambda item: item[1])][:k]
if __name__ == '__main__':
print(k_nearest([(0, 0), (5, 4), (3, 1)], (1, 2), 2)) |
py | 1a474526d667610da2500e99aaffad28bc8c2aae | # This is a polyfill for dataclasses
# https://docs.python.org/3/library/dataclasses.html
# Original PEP proposal: PEP 557
# https://www.python.org/dev/peps/pep-0557/
import re
import sys
import copy
import types
import inspect
import keyword
__all__ = [
"dataclass",
"field",
"Field",
"FrozenInstanceError",
"InitVar",
"MISSING",
# Helper functions.
"fields",
"asdict",
"astuple",
"make_dataclass",
"replace",
"is_dataclass",
]
# Conditions for adding methods. The boxes indicate what action the
# dataclass decorator takes. For all of these tables, when I talk
# about init=, repr=, eq=, order=, unsafe_hash=, or frozen=, I'm
# referring to the arguments to the @dataclass decorator. When
# checking if a dunder method already exists, I mean check for an
# entry in the class's __dict__. I never check to see if an attribute
# is defined in a base class.
# Key:
# +=========+=========================================+
# + Value | Meaning |
# +=========+=========================================+
# | <blank> | No action: no method is added. |
# +---------+-----------------------------------------+
# | add | Generated method is added. |
# +---------+-----------------------------------------+
# | raise | TypeError is raised. |
# +---------+-----------------------------------------+
# | None | Attribute is set to None. |
# +=========+=========================================+
# __init__
#
# +--- init= parameter
# |
# v | | |
# | no | yes | <--- class has __init__ in __dict__?
# +=======+=======+=======+
# | False | | |
# +-------+-------+-------+
# | True | add | | <- the default
# +=======+=======+=======+
# __repr__
#
# +--- repr= parameter
# |
# v | | |
# | no | yes | <--- class has __repr__ in __dict__?
# +=======+=======+=======+
# | False | | |
# +-------+-------+-------+
# | True | add | | <- the default
# +=======+=======+=======+
# __setattr__
# __delattr__
#
# +--- frozen= parameter
# |
# v | | |
# | no | yes | <--- class has __setattr__ or __delattr__ in __dict__?
# +=======+=======+=======+
# | False | | | <- the default
# +-------+-------+-------+
# | True | add | raise |
# +=======+=======+=======+
# Raise because not adding these methods would break the "frozen-ness"
# of the class.
# __eq__
#
# +--- eq= parameter
# |
# v | | |
# | no | yes | <--- class has __eq__ in __dict__?
# +=======+=======+=======+
# | False | | |
# +-------+-------+-------+
# | True | add | | <- the default
# +=======+=======+=======+
# __lt__
# __le__
# __gt__
# __ge__
#
# +--- order= parameter
# |
# v | | |
# | no | yes | <--- class has any comparison method in __dict__?
# +=======+=======+=======+
# | False | | | <- the default
# +-------+-------+-------+
# | True | add | raise |
# +=======+=======+=======+
# Raise because to allow this case would interfere with using
# functools.total_ordering.
# __hash__
# +------------------- unsafe_hash= parameter
# | +----------- eq= parameter
# | | +--- frozen= parameter
# | | |
# v v v | | |
# | no | yes | <--- class has explicitly defined __hash__
# +=======+=======+=======+========+========+
# | False | False | False | | | No __eq__, use the base class __hash__
# +-------+-------+-------+--------+--------+
# | False | False | True | | | No __eq__, use the base class __hash__
# +-------+-------+-------+--------+--------+
# | False | True | False | None | | <-- the default, not hashable
# +-------+-------+-------+--------+--------+
# | False | True | True | add | | Frozen, so hashable, allows override
# +-------+-------+-------+--------+--------+
# | True | False | False | add | raise | Has no __eq__, but hashable
# +-------+-------+-------+--------+--------+
# | True | False | True | add | raise | Has no __eq__, but hashable
# +-------+-------+-------+--------+--------+
# | True | True | False | add | raise | Not frozen, but hashable
# +-------+-------+-------+--------+--------+
# | True | True | True | add | raise | Frozen, so hashable
# +=======+=======+=======+========+========+
# For boxes that are blank, __hash__ is untouched and therefore
# inherited from the base class. If the base is object, then
# id-based hashing is used.
#
# Note that a class may already have __hash__=None if it specified an
# __eq__ method in the class body (not one that was created by
# @dataclass).
#
# See _hash_action (below) for a coded version of this table.
# Raised when an attempt is made to modify a frozen class.
class FrozenInstanceError(AttributeError):
pass
# A sentinel object for default values to signal that a default
# factory will be used. This is given a nice repr() which will appear
# in the function signature of dataclasses' constructors.
class _HAS_DEFAULT_FACTORY_CLASS:
def __repr__(self):
return "<factory>"
_HAS_DEFAULT_FACTORY = _HAS_DEFAULT_FACTORY_CLASS()
# A sentinel object to detect if a parameter is supplied or not. Use
# a class to give it a better repr.
class _MISSING_TYPE:
pass
MISSING = _MISSING_TYPE()
# Since most per-field metadata will be unused, create an empty
# read-only proxy that can be shared among all fields.
_EMPTY_METADATA = types.MappingProxyType({})
# Markers for the various kinds of fields and pseudo-fields.
class _FIELD_BASE:
def __init__(self, name):
self.name = name
def __repr__(self):
return self.name
_FIELD = _FIELD_BASE("_FIELD")
_FIELD_CLASSVAR = _FIELD_BASE("_FIELD_CLASSVAR")
_FIELD_INITVAR = _FIELD_BASE("_FIELD_INITVAR")
# The name of an attribute on the class where we store the Field
# objects. Also used to check if a class is a Data Class.
_FIELDS = "__dataclass_fields__"
# The name of an attribute on the class that stores the parameters to
# @dataclass.
_PARAMS = "__dataclass_params__"
# The name of the function, that if it exists, is called at the end of
# __init__.
_POST_INIT_NAME = "__post_init__"
# String regex that string annotations for ClassVar or InitVar must match.
# Allows "identifier.identifier[" or "identifier[".
# https://bugs.python.org/issue33453 for details.
_MODULE_IDENTIFIER_RE = re.compile(r"^(?:\s*(\w+)\s*\.)?\s*(\w+)")
class _InitVarMeta(type):
def __getitem__(self, params):
return self
class InitVar(metaclass=_InitVarMeta):
pass
# Instances of Field are only ever created from within this module,
# and only from the field() function, although Field instances are
# exposed externally as (conceptually) read-only objects.
#
# name and type are filled in after the fact, not in __init__.
# They're not known at the time this class is instantiated, but it's
# convenient if they're available later.
#
# When cls._FIELDS is filled in with a list of Field objects, the name
# and type fields will have been populated.
class Field:
__slots__ = (
"name",
"type",
"default",
"default_factory",
"repr",
"hash",
"init",
"compare",
"metadata",
"_field_type", # Private: not to be used by user code.
)
def __init__(self, default, default_factory, init, repr, hash, compare, metadata):
self.name = None
self.type = None
self.default = default
self.default_factory = default_factory
self.init = init
self.repr = repr
self.hash = hash
self.compare = compare
self.metadata = (
_EMPTY_METADATA
if metadata is None or len(metadata) == 0
else types.MappingProxyType(metadata)
)
self._field_type = None
def __repr__(self):
return (
"Field("
f"name={self.name!r},"
f"type={self.type!r},"
f"default={self.default!r},"
f"default_factory={self.default_factory!r},"
f"init={self.init!r},"
f"repr={self.repr!r},"
f"hash={self.hash!r},"
f"compare={self.compare!r},"
f"metadata={self.metadata!r},"
f"_field_type={self._field_type}"
")"
)
# This is used to support the PEP 487 __set_name__ protocol in the
# case where we're using a field that contains a descriptor as a
# defaul value. For details on __set_name__, see
# https://www.python.org/dev/peps/pep-0487/#implementation-details.
#
# Note that in _process_class, this Field object is overwritten
# with the default value, so the end result is a descriptor that
# had __set_name__ called on it at the right time.
def __set_name__(self, owner, name):
func = getattr(type(self.default), "__set_name__", None)
if func:
# There is a __set_name__ method on the descriptor, call
# it.
func(self.default, owner, name)
class _DataclassParams:
__slots__ = ("init", "repr", "eq", "order", "unsafe_hash", "frozen")
def __init__(self, init, repr, eq, order, unsafe_hash, frozen):
self.init = init
self.repr = repr
self.eq = eq
self.order = order
self.unsafe_hash = unsafe_hash
self.frozen = frozen
def __repr__(self):
return (
"_DataclassParams("
f"init={self.init!r},"
f"repr={self.repr!r},"
f"eq={self.eq!r},"
f"order={self.order!r},"
f"unsafe_hash={self.unsafe_hash!r},"
f"frozen={self.frozen!r}"
")"
)
# This function is used instead of exposing Field creation directly,
# so that a type checker can be told (via overloads) that this is a
# function whose type depends on its parameters.
def field(
*,
default=MISSING,
default_factory=MISSING,
init=True,
repr=True,
hash=None,
compare=True,
metadata=None,
):
"""Return an object to identify dataclass fields.
default is the default value of the field. default_factory is a
0-argument function called to initialize a field's value. If init
is True, the field will be a parameter to the class's __init__()
function. If repr is True, the field will be included in the
object's repr(). If hash is True, the field will be included in
the object's hash(). If compare is True, the field will be used
in comparison functions. metadata, if specified, must be a
mapping which is stored but not otherwise examined by dataclass.
It is an error to specify both default and default_factory.
"""
if default is not MISSING and default_factory is not MISSING:
raise ValueError("cannot specify both default and default_factory")
return Field(default, default_factory, init, repr, hash, compare, metadata)
def _tuple_str(obj_name, fields):
# Return a string representing each field of obj_name as a tuple
# member. So, if fields is ['x', 'y'] and obj_name is "self",
# return "(self.x,self.y)".
# Special case for the 0-tuple.
if not fields:
return "()"
# Note the trailing comma, needed if this turns out to be a 1-tuple.
return f'({",".join([f"{obj_name}.{f.name}" for f in fields])},)'
def _create_fn(name, args, body, *, globals=None, locals=None, return_type=MISSING):
# Note that we mutate locals when exec() is called. Caller
# beware! The only callers are internal to this module, so no
# worries about external callers.
if locals is None:
locals = {}
return_annotation = ""
if return_type is not MISSING:
locals["_return_type"] = return_type
return_annotation = "->_return_type"
args = ",".join(args)
body = "\n".join(f" {b}" for b in body)
# Compute the text of the entire function.
txt = f"def {name}({args}){return_annotation}:\n{body}"
exec(txt, globals, locals)
return locals[name]
def _field_assign(frozen, name, value, self_name):
# If we're a frozen class, then assign to our fields in __init__
# via object.__setattr__. Otherwise, just use a simple
# assignment.
#
# self_name is what "self" is called in this function: don't
# hard-code "self", since that might be a field name.
if frozen:
return f"object.__setattr__({self_name},{name!r},{value})"
return f"{self_name}.{name}={value}"
def _field_init(f, frozen, globals, self_name):
# Return the text of the line in the body of __init__ that will
# initialize this field.
default_name = f"_dflt_{f.name}"
if f.default_factory is not MISSING:
if f.init:
# This field has a default factory. If a parameter is
# given, use it. If not, call the factory.
globals[default_name] = f.default_factory
value = (
f"{default_name}() "
f"if {f.name} is _HAS_DEFAULT_FACTORY "
f"else {f.name}"
)
else:
# This is a field that's not in the __init__ params, but
# has a default factory function. It needs to be
# initialized here by calling the factory function,
# because there's no other way to initialize it.
# For a field initialized with a default=defaultvalue, the
# class dict just has the default value
# (cls.fieldname=defaultvalue). But that won't work for a
# default factory, the factory must be called in __init__
# and we must assign that to self.fieldname. We can't
# fall back to the class dict's value, both because it's
# not set, and because it might be different per-class
# (which, after all, is why we have a factory function!).
globals[default_name] = f.default_factory
value = f"{default_name}()"
else:
# No default factory.
if f.init:
if f.default is MISSING:
# There's no default, just do an assignment.
value = f.name
elif f.default is not MISSING:
globals[default_name] = f.default
value = f.name
else:
# This field does not need initialization. Signify that
# to the caller by returning None.
return None
# Only test this now, so that we can create variables for the
# default. However, return None to signify that we're not going
# to actually do the assignment statement for InitVars.
if f._field_type == _FIELD_INITVAR:
return None
# Now, actually generate the field assignment.
return _field_assign(frozen, f.name, value, self_name)
def _init_param(f):
# Return the __init__ parameter string for this field. For
# example, the equivalent of 'x:int=3' (except instead of 'int',
# reference a variable set to int, and instead of '3', reference a
# variable set to 3).
if f.default is MISSING and f.default_factory is MISSING:
# There's no default, and no default_factory, just output the
# variable name and type.
default = ""
elif f.default is not MISSING:
# There's a default, this will be the name that's used to look
# it up.
default = f"=_dflt_{f.name}"
elif f.default_factory is not MISSING:
# There's a factory function. Set a marker.
default = "=_HAS_DEFAULT_FACTORY"
return f"{f.name}:_type_{f.name}{default}"
def _init_fn(fields, frozen, has_post_init, self_name):
# fields contains both real fields and InitVar pseudo-fields.
# Make sure we don't have fields without defaults following fields
# with defaults. This actually would be caught when exec-ing the
# function source code, but catching it here gives a better error
# message, and future-proofs us in case we build up the function
# using ast.
seen_default = False
for f in fields:
# Only consider fields in the __init__ call.
if f.init:
if not (f.default is MISSING and f.default_factory is MISSING):
seen_default = True
elif seen_default:
raise TypeError(
f"non-default argument {f.name!r} " "follows default argument"
)
globals = {"MISSING": MISSING, "_HAS_DEFAULT_FACTORY": _HAS_DEFAULT_FACTORY}
body_lines = []
for f in fields:
line = _field_init(f, frozen, globals, self_name)
# line is None means that this field doesn't require
# initialization (it's a pseudo-field). Just skip it.
if line:
body_lines.append(line)
# Does this class have a post-init function?
if has_post_init:
params_str = ",".join(f.name for f in fields if f._field_type is _FIELD_INITVAR)
body_lines.append(f"{self_name}.{_POST_INIT_NAME}({params_str})")
# If no body lines, use 'pass'.
if not body_lines:
body_lines = ["pass"]
locals = {f"_type_{f.name}": f.type for f in fields}
return _create_fn(
"__init__",
[self_name] + [_init_param(f) for f in fields if f.init],
body_lines,
locals=locals,
globals=globals,
return_type=None,
)
def _repr_fn(fields):
return _create_fn(
"__repr__",
("self",),
[
'return self.__class__.__qualname__ + f"('
+ ", ".join([f"{f.name}={{self.{f.name}!r}}" for f in fields])
+ ')"'
],
)
def _frozen_get_del_attr(cls, fields):
# XXX: globals is modified on the first call to _create_fn, then
# the modified version is used in the second call. Is this okay?
globals = {"cls": cls, "FrozenInstanceError": FrozenInstanceError}
if fields:
fields_str = "(" + ",".join(repr(f.name) for f in fields) + ",)"
else:
# Special case for the zero-length tuple.
fields_str = "()"
return (
_create_fn(
"__setattr__",
("self", "name", "value"),
(
f"if type(self) is cls or name in {fields_str}:",
' raise FrozenInstanceError(f"cannot assign to field {name!r}")',
f"super(cls, self).__setattr__(name, value)",
),
globals=globals,
),
_create_fn(
"__delattr__",
("self", "name"),
(
f"if type(self) is cls or name in {fields_str}:",
' raise FrozenInstanceError(f"cannot delete field {name!r}")',
f"super(cls, self).__delattr__(name)",
),
globals=globals,
),
)
def _cmp_fn(name, op, self_tuple, other_tuple):
# Create a comparison function. If the fields in the object are
# named 'x' and 'y', then self_tuple is the string
# '(self.x,self.y)' and other_tuple is the string
# '(other.x,other.y)'.
return _create_fn(
name,
("self", "other"),
[
"if other.__class__ is self.__class__:",
f" return {self_tuple}{op}{other_tuple}",
"return NotImplemented",
],
)
def _hash_fn(fields):
self_tuple = _tuple_str("self", fields)
return _create_fn("__hash__", ("self",), [f"return hash({self_tuple})"])
def _is_classvar(a_type, typing):
# This test uses a typing internal class, but it's the best way to
# test if this is a ClassVar.
return type(a_type) is typing._ClassVar
def _is_initvar(a_type, dataclasses):
# The module we're checking against is the module we're
# currently in (dataclasses.py).
return a_type is dataclasses.InitVar
def _is_type(annotation, cls, a_module, a_type, is_type_predicate):
# Given a type annotation string, does it refer to a_type in
# a_module? For example, when checking that annotation denotes a
# ClassVar, then a_module is typing, and a_type is
# typing.ClassVar.
# It's possible to look up a_module given a_type, but it involves
# looking in sys.modules (again!), and seems like a waste since
# the caller already knows a_module.
# - annotation is a string type annotation
# - cls is the class that this annotation was found in
# - a_module is the module we want to match
# - a_type is the type in that module we want to match
# - is_type_predicate is a function called with (obj, a_module)
# that determines if obj is of the desired type.
# Since this test does not do a local namespace lookup (and
# instead only a module (global) lookup), there are some things it
# gets wrong.
# With string annotations, cv0 will be detected as a ClassVar:
# CV = ClassVar
# @dataclass
# class C0:
# cv0: CV
# But in this example cv1 will not be detected as a ClassVar:
# @dataclass
# class C1:
# CV = ClassVar
# cv1: CV
# In C1, the code in this function (_is_type) will look up "CV" in
# the module and not find it, so it will not consider cv1 as a
# ClassVar. This is a fairly obscure corner case, and the best
# way to fix it would be to eval() the string "CV" with the
# correct global and local namespaces. However that would involve
# a eval() penalty for every single field of every dataclass
# that's defined. It was judged not worth it.
match = _MODULE_IDENTIFIER_RE.match(annotation)
if match:
ns = None
module_name = match.group(1)
if not module_name:
# No module name, assume the class's module did
# "from dataclasses import InitVar".
ns = sys.modules.get(cls.__module__).__dict__
else:
# Look up module_name in the class's module.
module = sys.modules.get(cls.__module__)
if module and module.__dict__.get(module_name) is a_module:
ns = sys.modules.get(a_type.__module__).__dict__
if ns and is_type_predicate(ns.get(match.group(2)), a_module):
return True
return False
def _get_field(cls, a_name, a_type):
# Return a Field object for this field name and type. ClassVars
# and InitVars are also returned, but marked as such (see
# f._field_type).
# If the default value isn't derived from Field, then it's only a
# normal default value. Convert it to a Field().
default = getattr(cls, a_name, MISSING)
if isinstance(default, Field):
f = default
else:
if isinstance(default, types.MemberDescriptorType):
# This is a field in __slots__, so it has no default value.
default = MISSING
f = field(default=default)
# Only at this point do we know the name and the type. Set them.
f.name = a_name
f.type = a_type
# Assume it's a normal field until proven otherwise. We're next
# going to decide if it's a ClassVar or InitVar, everything else
# is just a normal field.
f._field_type = _FIELD
# In addition to checking for actual types here, also check for
# string annotations. get_type_hints() won't always work for us
# (see https://github.com/python/typing/issues/508 for example),
# plus it's expensive and would require an eval for every stirng
# annotation. So, make a best effort to see if this is a ClassVar
# or InitVar using regex's and checking that the thing referenced
# is actually of the correct type.
# For the complete discussion, see https://bugs.python.org/issue33453
# If typing has not been imported, then it's impossible for any
# annotation to be a ClassVar. So, only look for ClassVar if
# typing has been imported by any module (not necessarily cls's
# module).
typing = sys.modules.get("typing")
if typing:
if _is_classvar(a_type, typing) or (
isinstance(f.type, str)
and _is_type(f.type, cls, typing, typing.ClassVar, _is_classvar)
):
f._field_type = _FIELD_CLASSVAR
# If the type is InitVar, or if it's a matching string annotation,
# then it's an InitVar.
if f._field_type is _FIELD:
# The module we're checking against is the module we're
# currently in (dataclasses.py).
dataclasses = sys.modules[__name__]
if _is_initvar(a_type, dataclasses) or (
isinstance(f.type, str)
and _is_type(f.type, cls, dataclasses, dataclasses.InitVar, _is_initvar)
):
f._field_type = _FIELD_INITVAR
# Validations for individual fields. This is delayed until now,
# instead of in the Field() constructor, since only here do we
# know the field name, which allows for better error reporting.
# Special restrictions for ClassVar and InitVar.
if f._field_type in (_FIELD_CLASSVAR, _FIELD_INITVAR):
if f.default_factory is not MISSING:
raise TypeError(f"field {f.name} cannot have a " "default factory")
# Should I check for other field settings? default_factory
# seems the most serious to check for. Maybe add others. For
# example, how about init=False (or really,
# init=<not-the-default-init-value>)? It makes no sense for
# ClassVar and InitVar to specify init=<anything>.
# For real fields, disallow mutable defaults for known types.
if f._field_type is _FIELD and isinstance(f.default, (list, dict, set)):
raise ValueError(
f"mutable default {type(f.default)} for field "
f"{f.name} is not allowed: use default_factory"
)
return f
def _set_new_attribute(cls, name, value):
# Never overwrites an existing attribute. Returns True if the
# attribute already exists.
if name in cls.__dict__:
return True
setattr(cls, name, value)
return False
# Decide if/how we're going to create a hash function. Key is
# (unsafe_hash, eq, frozen, does-hash-exist). Value is the action to
# take. The common case is to do nothing, so instead of providing a
# function that is a no-op, use None to signify that.
def _hash_set_none(cls, fields):
return None
def _hash_add(cls, fields):
flds = [f for f in fields if (f.compare if f.hash is None else f.hash)]
return _hash_fn(flds)
def _hash_exception(cls, fields):
# Raise an exception.
raise TypeError(f"Cannot overwrite attribute __hash__ " f"in class {cls.__name__}")
#
# +-------------------------------------- unsafe_hash?
# | +------------------------------- eq?
# | | +------------------------ frozen?
# | | | +---------------- has-explicit-hash?
# | | | |
# | | | | +------- action
# | | | | |
# v v v v v
_hash_action = {
(False, False, False, False): None,
(False, False, False, True): None,
(False, False, True, False): None,
(False, False, True, True): None,
(False, True, False, False): _hash_set_none,
(False, True, False, True): None,
(False, True, True, False): _hash_add,
(False, True, True, True): None,
(True, False, False, False): _hash_add,
(True, False, False, True): _hash_exception,
(True, False, True, False): _hash_add,
(True, False, True, True): _hash_exception,
(True, True, False, False): _hash_add,
(True, True, False, True): _hash_exception,
(True, True, True, False): _hash_add,
(True, True, True, True): _hash_exception,
}
# See https://bugs.python.org/issue32929#msg312829 for an if-statement
# version of this table.
def _process_class(cls, init, repr, eq, order, unsafe_hash, frozen):
# Now that dicts retain insertion order, there's no reason to use
# an ordered dict. I am leveraging that ordering here, because
# derived class fields overwrite base class fields, but the order
# is defined by the base class, which is found first.
fields = {}
setattr(cls, _PARAMS, _DataclassParams(init, repr, eq, order, unsafe_hash, frozen))
# Find our base classes in reverse MRO order, and exclude
# ourselves. In reversed order so that more derived classes
# override earlier field definitions in base classes. As long as
# we're iterating over them, see if any are frozen.
any_frozen_base = False
has_dataclass_bases = False
for b in cls.__mro__[-1:0:-1]:
# Only process classes that have been processed by our
# decorator. That is, they have a _FIELDS attribute.
base_fields = getattr(b, _FIELDS, None)
if base_fields:
has_dataclass_bases = True
for f in base_fields.values():
fields[f.name] = f
if getattr(b, _PARAMS).frozen:
any_frozen_base = True
# Annotations that are defined in this class (not in base
# classes). If __annotations__ isn't present, then this class
# adds no new annotations. We use this to compute fields that are
# added by this class.
#
# Fields are found from cls_annotations, which is guaranteed to be
# ordered. Default values are from class attributes, if a field
# has a default. If the default value is a Field(), then it
# contains additional info beyond (and possibly including) the
# actual default value. Pseudo-fields ClassVars and InitVars are
# included, despite the fact that they're not real fields. That's
# dealt with later.
cls_annotations = cls.__dict__.get("__annotations__", {})
# Now find fields in our class. While doing so, validate some
# things, and set the default values (as class attributes) where
# we can.
cls_fields = [
_get_field(cls, name, type_) for name, type_ in cls_annotations.items()
]
for f in cls_fields:
fields[f.name] = f
# If the class attribute (which is the default value for this
# field) exists and is of type 'Field', replace it with the
# real default. This is so that normal class introspection
# sees a real default value, not a Field.
if isinstance(getattr(cls, f.name, None), Field):
if f.default is MISSING:
# If there's no default, delete the class attribute.
# This happens if we specify field(repr=False), for
# example (that is, we specified a field object, but
# no default value). Also if we're using a default
# factory. The class attribute should not be set at
# all in the post-processed class.
delattr(cls, f.name)
else:
setattr(cls, f.name, f.default)
# Do we have any Field members that don't also have annotations?
for name, value in cls.__dict__.items():
if isinstance(value, Field) and not name in cls_annotations:
raise TypeError(f"{name!r} is a field but has no type annotation")
# Check rules that apply if we are derived from any dataclasses.
if has_dataclass_bases:
# Raise an exception if any of our bases are frozen, but we're not.
if any_frozen_base and not frozen:
raise TypeError("cannot inherit non-frozen dataclass from a " "frozen one")
# Raise an exception if we're frozen, but none of our bases are.
if not any_frozen_base and frozen:
raise TypeError("cannot inherit frozen dataclass from a " "non-frozen one")
# Remember all of the fields on our class (including bases). This
# also marks this class as being a dataclass.
setattr(cls, _FIELDS, fields)
# Was this class defined with an explicit __hash__? Note that if
# __eq__ is defined in this class, then python will automatically
# set __hash__ to None. This is a heuristic, as it's possible
# that such a __hash__ == None was not auto-generated, but it
# close enough.
class_hash = cls.__dict__.get("__hash__", MISSING)
has_explicit_hash = not (
class_hash is MISSING or (class_hash is None and "__eq__" in cls.__dict__)
)
# If we're generating ordering methods, we must be generating the
# eq methods.
if order and not eq:
raise ValueError("eq must be true if order is true")
if init:
# Does this class have a post-init function?
has_post_init = hasattr(cls, _POST_INIT_NAME)
# Include InitVars and regular fields (so, not ClassVars).
flds = [f for f in fields.values() if f._field_type in (_FIELD, _FIELD_INITVAR)]
_set_new_attribute(
cls,
"__init__",
_init_fn(
flds,
frozen,
has_post_init,
# The name to use for the "self"
# param in __init__. Use "self"
# if possible.
"__dataclass_self__" if "self" in fields else "self",
),
)
# Get the fields as a list, and include only real fields. This is
# used in all of the following methods.
field_list = [f for f in fields.values() if f._field_type is _FIELD]
if repr:
flds = [f for f in field_list if f.repr]
_set_new_attribute(cls, "__repr__", _repr_fn(flds))
if eq:
# Create _eq__ method. There's no need for a __ne__ method,
# since python will call __eq__ and negate it.
flds = [f for f in field_list if f.compare]
self_tuple = _tuple_str("self", flds)
other_tuple = _tuple_str("other", flds)
_set_new_attribute(
cls, "__eq__", _cmp_fn("__eq__", "==", self_tuple, other_tuple)
)
if order:
# Create and set the ordering methods.
flds = [f for f in field_list if f.compare]
self_tuple = _tuple_str("self", flds)
other_tuple = _tuple_str("other", flds)
for name, op in [
("__lt__", "<"),
("__le__", "<="),
("__gt__", ">"),
("__ge__", ">="),
]:
if _set_new_attribute(
cls, name, _cmp_fn(name, op, self_tuple, other_tuple)
):
raise TypeError(
f"Cannot overwrite attribute {name} "
f"in class {cls.__name__}. Consider using "
"functools.total_ordering"
)
if frozen:
for fn in _frozen_get_del_attr(cls, field_list):
if _set_new_attribute(cls, fn.__name__, fn):
raise TypeError(
f"Cannot overwrite attribute {fn.__name__} "
f"in class {cls.__name__}"
)
# Decide if/how we're going to create a hash function.
hash_action = _hash_action[
bool(unsafe_hash), bool(eq), bool(frozen), has_explicit_hash
]
if hash_action:
# No need to call _set_new_attribute here, since by the time
# we're here the overwriting is unconditional.
cls.__hash__ = hash_action(cls, field_list)
if not getattr(cls, "__doc__"):
# Create a class doc-string.
cls.__doc__ = cls.__name__ + str(inspect.signature(cls)).replace(" -> None", "")
return cls
# _cls should never be specified by keyword, so start it with an
# underscore. The presence of _cls is used to detect if this
# decorator is being called with parameters or not.
def dataclass(
_cls=None,
*,
init=True,
repr=True,
eq=True,
order=False,
unsafe_hash=False,
frozen=False,
):
"""Returns the same class as was passed in, with dunder methods
added based on the fields defined in the class.
Examines PEP 526 __annotations__ to determine fields.
If init is true, an __init__() method is added to the class. If
repr is true, a __repr__() method is added. If order is true, rich
comparison dunder methods are added. If unsafe_hash is true, a
__hash__() method function is added. If frozen is true, fields may
not be assigned to after instance creation.
"""
def wrap(cls):
return _process_class(cls, init, repr, eq, order, unsafe_hash, frozen)
# See if we're being called as @dataclass or @dataclass().
if _cls is None:
# We're called with parens.
return wrap
# We're called as @dataclass without parens.
return wrap(_cls)
def fields(class_or_instance):
"""Return a tuple describing the fields of this dataclass.
Accepts a dataclass or an instance of one. Tuple elements are of
type Field.
"""
# Might it be worth caching this, per class?
try:
fields = getattr(class_or_instance, _FIELDS)
except AttributeError:
raise TypeError("must be called with a dataclass type or instance")
# Exclude pseudo-fields. Note that fields is sorted by insertion
# order, so the order of the tuple is as the fields were defined.
return tuple(f for f in fields.values() if f._field_type is _FIELD)
def _is_dataclass_instance(obj):
"""Returns True if obj is an instance of a dataclass."""
return not isinstance(obj, type) and hasattr(obj, _FIELDS)
def is_dataclass(obj):
"""Returns True if obj is a dataclass or an instance of a
dataclass."""
return hasattr(obj, _FIELDS)
def asdict(obj, *, dict_factory=dict):
"""Return the fields of a dataclass instance as a new dictionary mapping
field names to field values.
Example usage:
@dataclass
class C:
x: int
y: int
c = C(1, 2)
assert asdict(c) == {'x': 1, 'y': 2}
If given, 'dict_factory' will be used instead of built-in dict.
The function applies recursively to field values that are
dataclass instances. This will also look into built-in containers:
tuples, lists, and dicts.
"""
if not _is_dataclass_instance(obj):
raise TypeError("asdict() should be called on dataclass instances")
return _asdict_inner(obj, dict_factory)
def _asdict_inner(obj, dict_factory):
if _is_dataclass_instance(obj):
result = []
for f in fields(obj):
value = _asdict_inner(getattr(obj, f.name), dict_factory)
result.append((f.name, value))
return dict_factory(result)
elif isinstance(obj, (list, tuple)):
return type(obj)(_asdict_inner(v, dict_factory) for v in obj)
elif isinstance(obj, dict):
return type(obj)(
(_asdict_inner(k, dict_factory), _asdict_inner(v, dict_factory))
for k, v in obj.items()
)
else:
return copy.deepcopy(obj)
def astuple(obj, *, tuple_factory=tuple):
"""Return the fields of a dataclass instance as a new tuple of field values.
Example usage::
@dataclass
class C:
x: int
y: int
c = C(1, 2)
assert astuple(c) == (1, 2)
If given, 'tuple_factory' will be used instead of built-in tuple.
The function applies recursively to field values that are
dataclass instances. This will also look into built-in containers:
tuples, lists, and dicts.
"""
if not _is_dataclass_instance(obj):
raise TypeError("astuple() should be called on dataclass instances")
return _astuple_inner(obj, tuple_factory)
def _astuple_inner(obj, tuple_factory):
if _is_dataclass_instance(obj):
result = []
for f in fields(obj):
value = _astuple_inner(getattr(obj, f.name), tuple_factory)
result.append(value)
return tuple_factory(result)
elif isinstance(obj, (list, tuple)):
return type(obj)(_astuple_inner(v, tuple_factory) for v in obj)
elif isinstance(obj, dict):
return type(obj)(
(_astuple_inner(k, tuple_factory), _astuple_inner(v, tuple_factory))
for k, v in obj.items()
)
else:
return copy.deepcopy(obj)
def make_dataclass(
cls_name,
fields,
*,
bases=(),
namespace=None,
init=True,
repr=True,
eq=True,
order=False,
unsafe_hash=False,
frozen=False,
):
"""Return a new dynamically created dataclass.
The dataclass name will be 'cls_name'. 'fields' is an iterable
of either (name), (name, type) or (name, type, Field) objects. If type is
omitted, use the string 'typing.Any'. Field objects are created by
the equivalent of calling 'field(name, type [, Field-info])'.
C = make_dataclass('C', ['x', ('y', int), ('z', int, field(init=False))], bases=(Base,))
is equivalent to:
@dataclass
class C(Base):
x: 'typing.Any'
y: int
z: int = field(init=False)
For the bases and namespace parameters, see the builtin type() function.
The parameters init, repr, eq, order, unsafe_hash, and frozen are passed to
dataclass().
"""
if namespace is None:
namespace = {}
else:
# Copy namespace since we're going to mutate it.
namespace = namespace.copy()
# While we're looking through the field names, validate that they
# are identifiers, are not keywords, and not duplicates.
seen = set()
anns = {}
for item in fields:
if isinstance(item, str):
name = item
tp = "typing.Any"
elif len(item) == 2:
(name, tp) = item
elif len(item) == 3:
name, tp, spec = item
namespace[name] = spec
else:
raise TypeError(f"Invalid field: {item!r}")
if not isinstance(name, str) or not name.isidentifier():
raise TypeError(f"Field names must be valid identifers: {name!r}")
if keyword.iskeyword(name):
raise TypeError(f"Field names must not be keywords: {name!r}")
if name in seen:
raise TypeError(f"Field name duplicated: {name!r}")
seen.add(name)
anns[name] = tp
namespace["__annotations__"] = anns
# We use `types.new_class()` instead of simply `type()` to allow dynamic creation
# of generic dataclassses.
cls = types.new_class(cls_name, bases, {}, lambda ns: ns.update(namespace))
return dataclass(
cls,
init=init,
repr=repr,
eq=eq,
order=order,
unsafe_hash=unsafe_hash,
frozen=frozen,
)
def replace(obj, **changes):
"""Return a new object replacing specified fields with new values.
This is especially useful for frozen classes. Example usage:
@dataclass(frozen=True)
class C:
x: int
y: int
c = C(1, 2)
c1 = replace(c, x=3)
assert c1.x == 3 and c1.y == 2
"""
# We're going to mutate 'changes', but that's okay because it's a
# new dict, even if called with 'replace(obj, **my_changes)'.
if not _is_dataclass_instance(obj):
raise TypeError("replace() should be called on dataclass instances")
# It's an error to have init=False fields in 'changes'.
# If a field is not in 'changes', read its value from the provided obj.
for f in getattr(obj, _FIELDS).values():
if not f.init:
# Error if this field is specified in changes.
if f.name in changes:
raise ValueError(
f"field {f.name} is declared with "
"init=False, it cannot be specified with "
"replace()"
)
continue
if f.name not in changes:
changes[f.name] = getattr(obj, f.name)
# Create the new object, which calls __init__() and
# __post_init__() (if defined), using all of the init fields we've
# added and/or left in 'changes'. If there are values supplied in
# changes that aren't fields, this will correctly raise a
# TypeError.
return obj.__class__(**changes)
|
py | 1a47458723ec908664a388421536a8ff27bf4cec | import numpy as np
import matplotlibex as plx
import ml.gptheano.kernels as krn
import ml.gptheano.gplvmfullfit as gplvm
if __name__ == "__main__":
t = np.linspace(0.0, 3*2*np.pi, num=300)
y = np.vstack((3*np.sin(1*t+0.0), 3*np.sin(2*t+1.5),
1*np.sin(1*t+0.4), 1*np.sin(3*t+1.8),
1*np.sin(1*t+0.8), 1*np.sin(4*t+2.0),
1*np.sin(1*t+1.0), 1*np.sin(5*t+2.2))).T
y = y + 0.1*np.reshape(np.random.normal(size=y.size), y.shape)
XVar = krn.MatrixVariable("X", np.identity(3))
krbfnoise = krn.SumKernel([krn.RBFKernel(XVar, XVar), krn.NoiseKernel(XVar, XVar)])
gp = gplvm.GPLVM(y, 2, krbfnoise)
print("##")
plx.plot_sequence_variance_2d(gp.XVar.val, gp.predict)
|
py | 1a4745faee1c4499db1916db7ad59cb1fca521b5 | import scipy.sparse as sps
from . import register_class
from ..container import Container
from ..utils import docval, getargs, call_docval_func, to_uint_array, get_data_shape
@register_class('CSRMatrix')
class CSRMatrix(Container):
@docval({'name': 'data', 'type': (sps.csr_matrix, 'array_data'),
'doc': 'the data to use for this CSRMatrix or CSR data array.'
'If passing CSR data array, *indices*, *indptr*, and *shape* must also be provided'},
{'name': 'indices', 'type': 'array_data', 'doc': 'CSR index array', 'default': None},
{'name': 'indptr', 'type': 'array_data', 'doc': 'CSR index pointer array', 'default': None},
{'name': 'shape', 'type': 'array_data', 'doc': 'the shape of the matrix', 'default': None},
{'name': 'name', 'type': str, 'doc': 'the name to use for this when storing', 'default': 'csr_matrix'})
def __init__(self, **kwargs):
call_docval_func(super().__init__, kwargs)
data = getargs('data', kwargs)
if not isinstance(data, sps.csr_matrix):
temp_shape = get_data_shape(data)
temp_ndim = len(temp_shape)
if temp_ndim == 2:
data = sps.csr_matrix(data)
elif temp_ndim == 1:
indptr, indices, shape = getargs('indptr', 'indices', 'shape', kwargs)
if any(_ is None for _ in (indptr, indices, shape)):
raise ValueError("Must specify 'indptr', 'indices', and 'shape' arguments when passing data array.")
indptr = self.__check_arr(indptr, 'indptr')
indices = self.__check_arr(indices, 'indices')
shape = self.__check_arr(shape, 'shape')
if len(shape) != 2:
raise ValueError("'shape' argument must specify two and only two dimensions.")
data = sps.csr_matrix((data, indices, indptr), shape=shape)
else:
raise ValueError("'data' argument cannot be ndarray of dimensionality > 2.")
self.__data = data
@staticmethod
def __check_arr(ar, arg):
try:
ar = to_uint_array(ar)
except ValueError as ve:
raise ValueError("Cannot convert '%s' to an array of unsigned integers." % arg) from ve
if ar.ndim != 1:
raise ValueError("'%s' must be a 1D array of unsigned integers." % arg)
return ar
def __getattr__(self, val):
# NOTE: this provides access to self.data, self.indices, self.indptr, self.shape
attr = getattr(self.__data, val)
if val in ('indices', 'indptr', 'shape'): # needed because sps.csr_matrix may contain int arrays for these
attr = to_uint_array(attr)
return attr
def to_spmat(self):
return self.__data
|
py | 1a474649a1fcc8a57650007570a95c87ee75b161 | # Author: Simon Blanke
# Email: [email protected]
# License: MIT License
from hyperactive_insight.streamlit_setup import (
create_streamlit_setup,
)
def open_insight(search_data):
create_streamlit_setup(search_data, plots=[])
|
py | 1a47466f02d40c0b616e7a8c2bd2f1bea2b42bc6 | from typing import List
from td.session import TdAmeritradeSession
class Quotes():
"""
## Overview
----
Allows the user to query real-time quotes from the TD
API if they have an authorization token otherwise it
will be delayed by 5 minutes.
"""
def __init__(self, session: TdAmeritradeSession) -> None:
"""Initializes the `Quotes` services.
### Parameters
----
session : TdAmeritradeSession
An authenticated `TDAmeritradeSession
object.
"""
self.session = session
def get_quote(self, instrument=str) -> dict:
"""Grabs real-time quotes for an instrument.
### Overview
----
Serves as the mechanism to make a request to the Get
Quote and Get Quotes Endpoint. If one item is provided
a Get Quote request will be made and if more than one
item is provided then a Get Quotes request will be made.
### Documentation
----
https://developer.tdameritrade.com/quotes/apis
### Parameters
----
instruments: str
A list of different financial instruments.
### Usage
----
>>> quote_service = td_client.quotes()
>>> quote_service.get_quote(instrument='AAPL')
"""
params = {
'symbol': instrument
}
content = self.session.make_request(
method='get',
endpoint='marketdata/quotes',
params=params
)
return content
def get_quotes(self, instruments=List[str]) -> dict:
"""Grabs real-time quotes for multiple instruments.
### Overview
----
Serves as the mechanism to make a request to the Get
Quote and Get Quotes Endpoint. If one item is provided
a Get Quote request will be made and if more than one
item is provided then a Get Quotes request will be made.
Only 500 symbols can be sent at a single time.
### Documentation
----
https://developer.tdameritrade.com/quotes/apis
### Parameters
----
instruments: str
A list of different financial instruments.
### Usage
----
>>> quote_service = td_client.quotes()
>>> quote_service.get_quotes(instruments=['AAPL','SQ'])
"""
params = {
'symbol': ','.join(instruments)
}
content = self.session.make_request(
method='get',
endpoint='marketdata/quotes',
params=params
)
return content
|
py | 1a474684a5d39b553f26ce19f16970ffc822d854 | # -*- coding: utf-8 -*-
"""
py_vollib.black_scholes_merton.implied_volatility
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Copyright © 2017 Gammon Capital LLC
A library for option pricing, implied volatility, and
greek calculation. py_vollib is based on lets_be_rational,
a Python wrapper for LetsBeRational by Peter Jaeckel as
described below.
:copyright: © 2017 Gammon Capital LLC
:license: MIT, see LICENSE for more details.
About LetsBeRational:
~~~~~~~~~~~~~~~~~~~~~
The source code of LetsBeRational resides at www.jaeckel.org/LetsBeRational.7z .
::
========================================================================================
Copyright © 2013-2014 Peter Jäckel.
Permission to use, copy, modify, and distribute this software is freely granted,
provided that this notice is preserved.
WARRANTY DISCLAIMER
The Software is provided "as is" without warranty of any kind, either express or implied,
including without limitation any implied warranties of condition, uninterrupted use,
merchantability, fitness for a particular purpose, or non-infringement.
========================================================================================
"""
# -----------------------------------------------------------------------------
# IMPORTS
# Standard library imports
from __future__ import division
# Related third party imports
from py_lets_be_rational import implied_volatility_from_a_transformed_rational_guess as iv
import numpy
# Local application/library specific imports
from py_vollib.black_scholes_merton import black_scholes_merton
from py_vollib.helpers import binary_flag
from py_vollib.helpers.exceptions import PriceIsAboveMaximum, PriceIsBelowIntrinsic
from py_vollib.helpers.constants import MINUS_FLOAT_MAX, FLOAT_MAX
# -----------------------------------------------------------------------------
# FUNCTIONS
def implied_volatility(price, S, K, t, r, q, flag):
"""Calculate the Black-Scholes-Merton implied volatility.
:param S: underlying asset price
:type S: float
:param K: strike price
:type K: float
:param sigma: annualized standard deviation, or volatility
:type sigma: float
:param t: time to expiration in years
:type t: float
:param r: risk-free interest rate
:type r: float
:param q: annualized continuous dividend rate
:type q: float
:param flag: 'c' or 'p' for call or put.
:type flag: str
>>> S = 100
>>> K = 100
>>> sigma = .2
>>> r = .01
>>> flag = 'c'
>>> t = .5
>>> q = 0
>>> price = black_scholes_merton(flag, S, K, t, r, sigma, q)
>>> iv = implied_volatility(price, S, K, t, r, q, flag)
>>> expected_price = 5.87602423383
>>> expected_iv = 0.2
>>> abs(expected_price - price) < 0.00001
True
>>> abs(expected_iv - iv) < 0.00001
True
"""
deflater = numpy.exp(-r * t)
undiscounted_option_price = price / deflater
F = S * numpy.exp((r-q)*t)
sigma_calc = iv(undiscounted_option_price, F, K, t, binary_flag[flag])
if sigma_calc == FLOAT_MAX:
raise PriceIsAboveMaximum()
elif sigma_calc == MINUS_FLOAT_MAX:
raise PriceIsBelowIntrinsic()
return sigma_calc
if __name__ == "__main__":
from py_vollib.helpers.doctest_helper import run_doctest
run_doctest()
|
py | 1a4747176bd8e12f3f013fa8875d1e85e78e65bd | from django.contrib import admin
from .models import Todo
# Register your models here.
@admin.register(Todo)
class TodoAdmin(admin.ModelAdmin):
list_display = ['title','start_time']
search_fields = ['title']
list_filter = ['title','start_time']
|
py | 1a47474657790c2853ee1b95b7f468f988e33a52 | import copy
from nose.tools import assert_equal, assert_raises
from ckan.lib.create_test_data import CreateTestData
import ckan.lib.search as search
from ckan.lib.search.common import SolrSettings
from ckan.tests.functional.api.base import BaseModelApiTestCase
from ckan.tests.functional.api.base import Api1TestCase as Version1TestCase
from ckan.tests.functional.api.base import Api2TestCase as Version2TestCase
import ckan.tests as tests
# Todo: Remove this ckan.model stuff.
import ckan.model as model
class PackagesTestCase(BaseModelApiTestCase):
@classmethod
def setup_class(cls):
CreateTestData.create()
cls.user_name = u'annafan' # created in CreateTestData
cls.init_extra_environ(cls.user_name)
@classmethod
def teardown_class(cls):
model.repo.rebuild_db()
def teardown(self):
self.purge_package_by_name(self.package_fixture_data['name'])
def get_groups_identifiers(self, test_groups, users=[]):
groups = []
for grp in test_groups:
group = model.Group.get(grp)
if self.get_expected_api_version() == 1:
groups.append(group.name)
else:
groups.append(group.id)
if users:
model.setup_default_user_roles(group, users)
return groups
def test_register_get_ok(self):
offset = self.package_offset()
res = self.app.get(offset, status=self.STATUS_200_OK)
assert self.ref_package(self.anna) in res, res
assert self.ref_package(self.war) in res, res
def test_register_post_ok(self):
assert not self.get_package_by_name(self.package_fixture_data['name'])
offset = self.package_offset()
postparams = '%s=1' % self.dumps(self.package_fixture_data)
res = self.app.post(offset, params=postparams,
status=self.STATUS_201_CREATED,
extra_environ=self.admin_extra_environ)
# Check the returned package is as expected
pkg = self.loads(res.body)
assert_equal(pkg['name'], self.package_fixture_data['name'])
assert_equal(pkg['title'], self.package_fixture_data['title'])
assert_equal(set(pkg['tags']), set(self.package_fixture_data['tags']))
assert_equal(len(pkg['resources']), len(self.package_fixture_data['resources']))
assert_equal(pkg['extras'], self.package_fixture_data['extras'])
# Check the value of the Location header.
location = res.header('Location')
assert offset in location
res = self.app.get(location, status=self.STATUS_200_OK)
# Check the database record.
model.Session.remove()
package = self.get_package_by_name(self.package_fixture_data['name'])
assert package
self.assert_equal(package.title, self.package_fixture_data['title'])
self.assert_equal(package.url, self.package_fixture_data['url'])
self.assert_equal(package.license_id, self.testpackage_license_id)
self.assert_equal(len(package.get_tags()), 2)
self.assert_equal(len(package.extras), 2)
for key, value in self.package_fixture_data['extras'].items():
self.assert_equal(package.extras[key], value)
self.assert_equal(len(package.resources), len(self.package_fixture_data['resources']))
for (i, expected_resource) in enumerate(self.package_fixture_data['resources']):
package_resource = package.resources[i]
for key in expected_resource.keys():
if key == 'extras':
package_resource_extras = getattr(package_resource, key)
expected_resource_extras = expected_resource[key].items()
for expected_extras_key, expected_extras_value in expected_resource_extras:
package_resource_value = package_resource_extras[expected_extras_key],\
'Package:%r Extras:%r Expected_extras:%r' % \
(self.package_fixture_data['name'],
package_resource_extras, expected_resource)
else:
package_resource_value = getattr(package_resource, key, None)
if not package_resource_value:
package_resource_value = package_resource.extras[key]
expected_resource_value = expected_resource[key]
self.assert_equal(package_resource_value, expected_resource_value)
# Test Package Entity Get 200.
offset = self.package_offset(self.package_fixture_data['name'])
res = self.app.get(offset, status=self.STATUS_200_OK)
# Todo: Instead loads() the data and then check actual values.
assert self.package_fixture_data['name'] in res, res
assert '"license_id": "%s"' % self.package_fixture_data['license_id'] in res, res
assert self.package_fixture_data['tags'][0] in res, res
assert self.package_fixture_data['tags'][1] in res, res
assert '"extras": {' in res, res
for key, value in self.package_fixture_data['extras'].items():
assert '"%s": "%s"' % (key, value) in res, res
model.Session.remove()
# Test Packages Register Post 409 (conflict - create duplicate package).
offset = self.package_offset()
postparams = '%s=1' % self.dumps(self.package_fixture_data)
res = self.app.post(offset, params=postparams, status=self.STATUS_409_CONFLICT,
extra_environ=self.admin_extra_environ)
model.Session.remove()
def test_register_post_with_group(self):
assert not self.get_package_by_name(self.package_fixture_data['name'])
offset = self.package_offset()
test_groups = [u'david']
user = model.User.by_name(u'testsysadmin')
groups = self.get_groups_identifiers(test_groups,[user])
package_fixture_data = self.package_fixture_data
package_fixture_data['groups'] = groups
data = self.dumps(package_fixture_data)
res = self.post_json(offset, data, status=self.STATUS_201_CREATED,
extra_environ={'Authorization':str(user.apikey)})
# Check the database record.
model.Session.remove()
package = self.get_package_by_name(self.package_fixture_data['name'])
assert package
pkg_groups = model.Session.query(model.Group).\
join(model.Member, model.Member.group_id == model.Group.id).\
filter(model.Member.table_id == package.id).all()
if self.get_expected_api_version() == 1:
self.assert_equal([g.name for g in pkg_groups], groups)
else:
self.assert_equal([g.id for g in pkg_groups], groups)
del package_fixture_data['groups']
def test_register_post_with_group_not_authorized(self):
assert not self.get_package_by_name(self.package_fixture_data['name'])
offset = self.package_offset()
test_groups = [u'david']
groups = self.get_groups_identifiers(test_groups)
package_fixture_data = self.package_fixture_data
package_fixture_data['groups'] = groups
data = self.dumps(package_fixture_data)
res = self.post_json(offset, data, status=self.STATUS_403_ACCESS_DENIED,
extra_environ=self.extra_environ)
del package_fixture_data['groups']
def test_register_post_with_group_not_found(self):
assert not self.get_package_by_name(self.package_fixture_data['name'])
offset = self.package_offset()
test_groups = [u'this-group-does-not-exist']
groups = test_groups
package_fixture_data = self.package_fixture_data
package_fixture_data['groups'] = groups
data = self.dumps(package_fixture_data)
res = self.post_json(offset, data, status=self.STATUS_404_NOT_FOUND,
extra_environ=self.extra_environ)
del package_fixture_data['groups']
def test_register_post_with_group_sysadmin(self):
assert not self.get_package_by_name(self.package_fixture_data['name'])
offset = self.package_offset()
user = model.User.by_name(u'testsysadmin')
test_groups = [u'david']
groups = self.get_groups_identifiers(test_groups)
package_fixture_data = self.package_fixture_data
package_fixture_data['groups'] = groups
data = self.dumps(package_fixture_data)
res = self.post_json(offset, data, status=self.STATUS_201_CREATED,
extra_environ={'Authorization':str(user.apikey)})
# Check the database record.
model.Session.remove()
package = self.get_package_by_name(self.package_fixture_data['name'])
assert package
pkg_groups = model.Session.query(model.Group).\
join(model.Member, model.Member.group_id == model.Group.id).\
filter(model.Member.table_id == package.id).all()
if self.get_expected_api_version() == 1:
self.assert_equal([g.name for g in pkg_groups], groups)
else:
self.assert_equal([g.id for g in pkg_groups], groups)
del package_fixture_data['groups']
def test_register_post_json(self):
assert not self.get_package_by_name(self.package_fixture_data['name'])
offset = self.package_offset()
data = self.dumps(self.package_fixture_data)
res = self.post_json(offset, data, status=self.STATUS_201_CREATED,
extra_environ=self.admin_extra_environ)
# Check the database record.
model.Session.remove()
package = self.get_package_by_name(self.package_fixture_data['name'])
assert package
self.assert_equal(package.title, self.package_fixture_data['title'])
def test_register_post_bad_content_type(self):
assert not self.get_package_by_name(self.package_fixture_data['name'])
offset = self.package_offset()
data = self.dumps(self.package_fixture_data)
res = self.http_request(offset, data,
content_type='something/unheard_of',
status=[self.STATUS_400_BAD_REQUEST,
self.STATUS_201_CREATED],
extra_environ=self.admin_extra_environ)
model.Session.remove()
# Some versions of webob work, some don't. No matter, we record this
# behaviour.
package = self.get_package_by_name(self.package_fixture_data['name'])
if res.status == self.STATUS_400_BAD_REQUEST:
# Check there is no database record.
assert not package
else:
assert package
def test_register_post_bad_request(self):
test_params = {
'name':u'testpackage06_400',
'resources':[u'should_be_a_dict'],
}
offset = self.offset('/rest/dataset')
postparams = '%s=1' % self.dumps(test_params)
res = self.app.post(offset, params=postparams, status=self.STATUS_400_BAD_REQUEST,
extra_environ=self.admin_extra_environ)
def test_register_post_denied(self):
offset = self.offset('/rest/dataset')
postparams = '%s=1' % self.dumps(self.package_fixture_data)
res = self.app.post(offset, params=postparams, status=self.STATUS_403_ACCESS_DENIED)
def test_register_post_indexerror(self):
"""
Test that we can't add a package if Solr is down.
"""
bad_solr_url = 'http://127.0.0.1/badsolrurl'
original_settings = SolrSettings.get()[0]
try:
SolrSettings.init(bad_solr_url)
assert not self.get_package_by_name(self.package_fixture_data['name'])
offset = self.package_offset()
data = self.dumps(self.package_fixture_data)
self.post_json(offset, data, status=500, extra_environ=self.admin_extra_environ)
model.Session.remove()
finally:
SolrSettings.init(original_settings)
def test_register_post_tag_too_long(self):
pkg = {'name': 'test_tag_too_long',
'tags': ['tagok', 't'*101]}
assert not self.get_package_by_name(pkg['name'])
offset = self.package_offset()
data = self.dumps(pkg)
res = self.post_json(offset, data, status=self.STATUS_409_CONFLICT,
extra_environ=self.admin_extra_environ)
assert 'length is more than maximum 100' in res.body, res.body
assert 'tagok' not in res.body
def test_entity_get_ok(self):
package_refs = [self.anna.name, self.anna.id]
for ref in package_refs:
offset = self.offset('/rest/dataset/%s' % ref)
res = self.app.get(offset, status=self.STATUS_200_OK)
self.assert_msg_represents_anna(msg=res.body)
def test_entity_get_ok_jsonp(self):
offset = self.anna_offset(postfix='?callback=jsoncallback')
res = self.app.get(offset, status=self.STATUS_200_OK)
import re
assert re.match('jsoncallback\(.*\);', res.body), res
# Unwrap JSONP callback (we want to look at the data).
msg = res.body[len('jsoncallback')+1:-2]
self.assert_msg_represents_anna(msg=msg)
def test_entity_get_not_found(self):
offset = self.offset('/rest/dataset/22222')
res = self.app.get(offset, status=self.STATUS_404_NOT_FOUND)
model.Session.remove()
def test_entity_get_then_post(self):
# (ticket 662) Ensure an entity you 'get' from a register can be
# returned by posting it back
offset = self.package_offset(self.war.name)
res = self.app.get(offset, status=self.STATUS_200_OK)
data = self.loads(res.body)
postparams = '%s=1' % self.dumps(data)
res = self.app.post(offset, params=postparams,
status=self.STATUS_200_OK,
extra_environ=self.admin_extra_environ)
data_returned = self.loads(res.body)
assert_equal(data['name'], data_returned['name'])
assert_equal(data['license_id'], data_returned['license_id'])
def test_entity_get_then_post_new(self):
offset = self.package_offset(self.war.name)
res = self.app.get(offset, status=self.STATUS_200_OK)
data = self.loads(res.body)
# change name and create a new package
data['name'] = u'newpkg'
data['id'] = None # ensure this doesn't clash or you get 409 error
postparams = '%s=1' % self.dumps(data)
# use russianfan now because he has rights to add this package to
# the 'david' group.
extra_environ = {'REMOTE_USER': 'testsysadmin'}
res = self.app.post(self.package_offset(), params=postparams,
status=self.STATUS_201_CREATED,
extra_environ=extra_environ)
try:
data_returned = self.loads(res.body)
assert_equal(data['name'], data_returned['name'])
assert_equal(data['license_id'], data_returned['license_id'])
finally:
self.purge_package_by_name(data['name'])
def test_entity_post_changed_readonly(self):
# (ticket 662) Edit a readonly field gives error
offset = self.package_offset(self.war.name)
res = self.app.get(offset, status=self.STATUS_200_OK)
data = self.loads(res.body)
data['id'] = 'illegally changed value'
postparams = '%s=1' % self.dumps(data)
res = self.app.post(offset, params=postparams,
status=self.STATUS_409_CONFLICT,
extra_environ=self.admin_extra_environ)
assert "Cannot change value of key from" in res.body, res.body
assert "to illegally changed value. This key is read-only" in res.body, res.body
def test_entity_update_denied(self):
offset = self.anna_offset()
postparams = '%s=1' % self.dumps(self.package_fixture_data)
res = self.app.post(offset, params=postparams, status=self.STATUS_403_ACCESS_DENIED)
def test_entity_delete_denied(self):
offset = self.anna_offset()
res = self.app.delete(offset, status=self.STATUS_403_ACCESS_DENIED)
def test_09_update_package_entity_not_found(self):
offset = self.offset('/rest/dataset/22222')
postparams = '%s=1' % self.dumps(self.package_fixture_data)
res = self.app.post(offset, params=postparams,
status=self.STATUS_404_NOT_FOUND,
extra_environ=self.admin_extra_environ)
def create_package_with_admin_user(self, package_data):
'''Creates a package with self.user as admin and provided package_data.
'''
self.create_package(admins=[self.user], data=package_data)
def assert_package_update_ok(self, package_ref_attribute,
method_str):
old_fixture_data = {
'name': self.package_fixture_data['name'],
'url': self.package_fixture_data['url'],
'tags': [u'tag 1.1', u'tag2', u'tag3'],
'extras': {
u'key1': u'val1',
u'key2': u'val2'
},
}
new_fixture_data = {
'name':u'somethingnew',
'title':u'newtesttitle',
'resources': [{
u'url':u'http://blah.com/file2.xml',
u'format':u'XML',
u'description':u'Appendix 1',
u'hash':u'def123',
u'alt_url':u'alt123',
u'size_extra':u'400',
},{
u'url':u'http://blah.com/file3.xml',
u'format':u'XML',
u'description':u'Appenddic 2',
u'hash':u'ghi123',
u'alt_url':u'alt123',
u'size_extra':u'400',
}],
'extras': {
u'key3': u'val3',
u'key4': u'',
u'key2': None,
u'key7': '["a","b"]',
},
'tags': [u'tag 1.1', u'tag2', u'tag 4', u'tag5.'],
}
self.create_package_with_admin_user(old_fixture_data)
pkg = self.get_package_by_name(old_fixture_data['name'])
# This is the one occasion where we reference package explicitly
# by name or ID, rather than use the value from self.ref_package_by
# because you should be able to specify the package both ways round
# for both versions of the API.
package_ref = getattr(pkg, package_ref_attribute)
offset = self.offset('/rest/dataset/%s' % package_ref)
params = '%s=1' % self.dumps(new_fixture_data)
method_func = getattr(self.app, method_str)
res = method_func(offset, params=params, status=self.STATUS_200_OK,
extra_environ=self.admin_extra_environ)
try:
# Check the returned package is as expected
pkg = self.loads(res.body)
assert_equal(pkg['name'], new_fixture_data['name'])
assert_equal(pkg['title'], new_fixture_data['title'])
assert_equal(set(pkg['tags']), set(new_fixture_data['tags']))
assert_equal(len(pkg['resources']), len(new_fixture_data['resources']))
expected_extras = copy.deepcopy(new_fixture_data['extras'])
del expected_extras['key2']
expected_extras['key1'] = old_fixture_data['extras']['key1']
assert_equal(pkg['extras'], expected_extras)
# Check submitted field have changed.
model.Session.remove()
package = self.get_package_by_name(new_fixture_data['name'])
# - title
self.assert_equal(package.title, new_fixture_data['title'])
# - tags
package_tagnames = [tag.name for tag in package.get_tags()]
for tagname in new_fixture_data['tags']:
assert tagname in package_tagnames, 'tag %r not in %r' % (tagname, package_tagnames)
# - resources
assert len(package.resources), "Package has no resources: %s" % package
self.assert_equal(len(package.resources), 2)
resource = package.resources[0]
self.assert_equal(resource.url, u'http://blah.com/file2.xml')
self.assert_equal(resource.format, u'XML')
self.assert_equal(resource.description, u'Appendix 1')
self.assert_equal(resource.hash, u'def123')
self.assert_equal(resource.alt_url, u'alt123')
self.assert_equal(resource.extras['size_extra'], u'400')
resource = package.resources[1]
self.assert_equal(resource.url, 'http://blah.com/file3.xml')
self.assert_equal(resource.format, u'XML')
self.assert_equal(resource.description, u'Appenddic 2')
self.assert_equal(resource.hash, u'ghi123')
self.assert_equal(resource.alt_url, u'alt123')
self.assert_equal(resource.extras['size_extra'], u'400')
# Check unsubmitted fields have not changed.
# - url
self.assert_equal(package.url, self.package_fixture_data['url'])
# - extras
self.assert_equal(len(package.extras), 4)
for key, value in {u'key1':u'val1',
u'key3':u'val3',
u'key7':'["a","b"]',
u'key4':u''}.items():
self.assert_equal(package.extras[key], value)
# NB: key4 set to '' creates it
# but: key2 set to None will delete it
assert not package.extras.has_key('key2')
finally:
self.purge_package_by_name(new_fixture_data['name'])
def test_package_update_ok_by_id(self):
self.assert_package_update_ok('id', 'post')
def test_entity_update_ok_by_name(self):
self.assert_package_update_ok('name', 'post')
def test_package_update_ok_by_id_by_put(self):
self.assert_package_update_ok('id', 'put')
def test_entity_update_ok_by_name_by_put(self):
self.assert_package_update_ok('name', 'put')
def test_package_update_invalid(self):
old_fixture_data = {
'name': self.package_fixture_data['name'],
}
new_fixture_data = {
'name':u'somethingnew',
'resources': [{
u'url':u'http://blah.com/file1.xml',
u'size':u'abc', # INVALID
},{
u'url':u'http://blah.com/file2.xml',
u'size':u'400',
u'last_modified':u'123', # INVALID
}],
}
self.create_package_with_admin_user(old_fixture_data)
pkg = self.get_package_by_name(old_fixture_data['name'])
offset = self.offset('/rest/dataset/%s' % pkg.name)
params = '%s=1' % self.dumps(new_fixture_data)
res = self.app.post(offset, params=params,
status=self.STATUS_409_CONFLICT,
extra_environ=self.admin_extra_environ)
res_dict = self.loads(res.body)
assert len(res_dict['resources']) == 2, res_dict['resources']
assert_equal(res_dict['resources'][0], {u'size': [u'Invalid integer']})
assert_equal(res_dict['resources'][1], {u'last_modified': [u'Date format incorrect']})
def test_package_update_delete_last_extra(self):
old_fixture_data = {
'name': self.package_fixture_data['name'],
'extras': {
u'key1': u'val1',
},
}
new_fixture_data = {
'name':u'somethingnew',
'extras': {
u'key1': None,
},
}
self.create_package_with_admin_user(old_fixture_data)
offset = self.package_offset(old_fixture_data['name'])
params = '%s=1' % self.dumps(new_fixture_data)
res = self.app.post(offset, params=params, status=self.STATUS_200_OK,
extra_environ=self.admin_extra_environ)
try:
# Check the returned package is as expected
pkg = self.loads(res.body)
assert_equal(pkg['name'], new_fixture_data['name'])
expected_extras = copy.deepcopy(new_fixture_data['extras'])
del expected_extras['key1']
assert_equal(pkg['extras'], expected_extras)
# Check extra was deleted
model.Session.remove()
package = self.get_package_by_name(new_fixture_data['name'])
# - title
self.assert_equal(package.extras, {})
finally:
self.purge_package_by_name(new_fixture_data['name'])
def test_package_update_do_not_delete_last_extra(self):
old_fixture_data = {
'name': self.package_fixture_data['name'],
'extras': {
u'key1': u'val1',
},
}
new_fixture_data = {
'name':u'somethingnew',
'extras': {}, # no extras specified, but existing
# ones should be left alone
}
self.create_package_with_admin_user(old_fixture_data)
offset = self.package_offset(old_fixture_data['name'])
params = '%s=1' % self.dumps(new_fixture_data)
res = self.app.post(offset, params=params, status=self.STATUS_200_OK,
extra_environ=self.admin_extra_environ)
try:
# Check the returned package is as expected
pkg = self.loads(res.body)
assert_equal(pkg['name'], new_fixture_data['name'])
expected_extras = {u'key1': u'val1'} # should not be deleted
assert_equal(pkg['extras'], expected_extras)
# Check extra was not deleted
model.Session.remove()
package = self.get_package_by_name(new_fixture_data['name'])
# - title
assert len(package.extras) == 1, package.extras
finally:
self.purge_package_by_name(new_fixture_data['name'])
def test_entity_update_readd_tag(self):
name = self.package_fixture_data['name']
old_fixture_data = {
'name': name,
'tags': ['tag 1.', 'tag2']
}
new_fixture_data = {
'name': name,
'tags': ['tag 1.']
}
self.create_package_with_admin_user(old_fixture_data)
offset = self.package_offset(name)
params = '%s=1' % self.dumps(new_fixture_data)
res = self.app.post(offset, params=params, status=self.STATUS_200_OK,
extra_environ=self.admin_extra_environ)
# Check the returned package is as expected
pkg = self.loads(res.body)
assert_equal(pkg['name'], new_fixture_data['name'])
assert_equal(pkg['tags'], ['tag 1.'])
package = self.get_package_by_name(new_fixture_data['name'])
assert len(package.get_tags()) == 1, package.get_tags()
# now reinstate the tag
params = '%s=1' % self.dumps(old_fixture_data)
res = self.app.post(offset, params=params, status=self.STATUS_200_OK,
extra_environ=self.admin_extra_environ)
pkg = self.loads(res.body)
assert_equal(pkg['tags'], ['tag 1.', 'tag2'])
def test_entity_update_conflict(self):
package1_name = self.package_fixture_data['name']
package1_data = {'name': package1_name}
package1 = self.create_package_with_admin_user(package1_data)
package2_name = u'somethingnew'
package2_data = {'name': package2_name}
package2 = self.create_package_with_admin_user(package2_data)
try:
package1_offset = self.package_offset(package1_name)
# trying to rename package 1 to package 2's name
print package1_offset, package2_data
self.post(package1_offset, package2_data, self.STATUS_409_CONFLICT, extra_environ=self.admin_extra_environ)
finally:
self.purge_package_by_name(package2_name)
def test_entity_update_empty(self):
package1_name = self.package_fixture_data['name']
package1_data = {'name': package1_name}
package1 = self.create_package_with_admin_user(package1_data)
package2_data = '' # this is the error
package1_offset = self.package_offset(package1_name)
self.app.put(package1_offset, package2_data,
status=self.STATUS_400_BAD_REQUEST)
def test_entity_update_indexerror(self):
"""
Test that we can't update a package if Solr is down.
"""
bad_solr_url = 'http://127.0.0.1/badsolrurl'
original_settings = SolrSettings.get()[0]
try:
SolrSettings.init(bad_solr_url)
assert_raises(
search.SearchIndexError, self.assert_package_update_ok, 'name', 'post'
)
finally:
SolrSettings.init(original_settings)
def test_package_update_delete_resource(self):
old_fixture_data = {
'name': self.package_fixture_data['name'],
'resources': [{
u'url':u'http://blah.com/file2.xml',
u'format':u'XML',
u'description':u'Appendix 1',
u'hash':u'def123',
u'alt_url':u'alt123',
},{
u'url':u'http://blah.com/file3.xml',
u'format':u'XML',
u'description':u'Appenddic 2',
u'hash':u'ghi123',
u'alt_url':u'alt123',
}],
}
new_fixture_data = {
'name':u'somethingnew',
'resources': [],
}
self.create_package_with_admin_user(old_fixture_data)
offset = self.package_offset(old_fixture_data['name'])
params = '%s=1' % self.dumps(new_fixture_data)
res = self.app.post(offset, params=params, status=self.STATUS_200_OK,
extra_environ=self.admin_extra_environ)
try:
# Check the returned package is as expected
pkg = self.loads(res.body)
assert_equal(pkg['name'], new_fixture_data['name'])
assert_equal(pkg['resources'], [])
# Check resources were deleted
model.Session.remove()
package = self.get_package_by_name(new_fixture_data['name'])
self.assert_equal(len(package.resources), 0)
finally:
self.purge_package_by_name(new_fixture_data['name'])
def test_entity_delete_ok(self):
# create a package with package_fixture_data
if not self.get_package_by_name(self.package_fixture_data['name']):
self.create_package(admins=[self.user], name=self.package_fixture_data['name'])
assert self.get_package_by_name(self.package_fixture_data['name'])
# delete it
offset = self.package_offset(self.package_fixture_data['name'])
res = self.app.delete(offset, status=self.STATUS_200_OK,
extra_environ=self.admin_extra_environ)
package = self.get_package_by_name(self.package_fixture_data['name'])
self.assert_equal(package.state, 'deleted')
model.Session.remove()
def test_entity_delete_ok_without_request_headers(self):
# create a package with package_fixture_data
if not self.get_package_by_name(self.package_fixture_data['name']):
self.create_package(admins=[self.user], name=self.package_fixture_data['name'])
assert self.get_package_by_name(self.package_fixture_data['name'])
# delete it
offset = self.package_offset(self.package_fixture_data['name'])
res = self.delete_request(offset, status=self.STATUS_200_OK,
extra_environ=self.admin_extra_environ)
package = self.get_package_by_name(self.package_fixture_data['name'])
self.assert_equal(package.state, 'deleted')
model.Session.remove()
def test_entity_delete_not_found(self):
package_name = u'random_one'
assert not model.Session.query(model.Package).filter_by(name=package_name).count()
offset = self.offset('/rest/dataset/%s' % package_name)
res = self.app.delete(offset, status=self.STATUS_404_NOT_FOUND,
extra_environ=self.admin_extra_environ)
def test_package_revisions(self):
# check original revision
res = self.app.get(self.offset('/rest/dataset/%s/revisions' % 'annakarenina'))
revisions = res.json
assert len(revisions) == 1, len(revisions)
expected_keys = set(('id', 'message', 'author', 'timestamp', 'approved_timestamp'))
keys = set(revisions[0].keys())
assert_equal(keys, expected_keys)
# edit anna
pkg = model.Package.by_name('annakarenina')
model.repo.new_revision()
pkg.title = 'Tolstoy'
model.repo.commit_and_remove()
# check new revision is there
res = self.app.get(self.offset('/rest/dataset/%s/revisions' % 'annakarenina'))
revisions = res.json
assert len(revisions) == 2, len(revisions)
# check ordering
assert revisions[0]["timestamp"] > revisions[1]["timestamp"]
# edit related extra
pkg = model.Package.by_name('annakarenina')
model.repo.new_revision()
pkg.extras['genre'] = 'literary'
model.repo.commit_and_remove()
# check new revision is there
res = self.app.get(self.offset('/rest/dataset/%s/revisions' % 'annakarenina'))
revisions = res.json
assert len(revisions) == 3, len(revisions)
def test_create_private_package_with_no_organization(self):
'''Test that private packages with no organization cannot be created.
'''
testsysadmin = model.User.by_name('testsysadmin')
result = tests.call_action_api(self.app, 'package_create', name='test',
private=True, apikey=testsysadmin.apikey, status=409)
assert result == {'__type': 'Validation Error',
'private': ["Datasets with no organization can't be private."]}
def test_create_public_package_with_no_organization(self):
'''Test that public packages with no organization can be created.'''
testsysadmin = model.User.by_name('testsysadmin')
tests.call_action_api(self.app, 'package_create', name='test',
private=False, apikey=testsysadmin.apikey)
def test_make_package_with_no_organization_private(self):
'''Test that private packages with no organization cannot be created
by package_update.
'''
testsysadmin = model.User.by_name('testsysadmin')
package = tests.call_action_api(self.app, 'package_create',
name='test_2', private=False, apikey=testsysadmin.apikey)
package['private'] = True
result = tests.call_action_api(self.app, 'package_update',
apikey=testsysadmin.apikey, status=409, **package)
assert result == {'__type': 'Validation Error',
'private': ["Datasets with no organization can't be private."]}
class TestPackagesVersion1(Version1TestCase, PackagesTestCase):
def test_06_create_pkg_using_download_url(self):
test_params = {
'name':u'testpkg06',
'download_url':u'ftp://ftp.monash.edu.au/pub/nihongo/JMdict.gz',
}
offset = self.package_offset()
postparams = '%s=1' % self.dumps(test_params)
res = self.app.post(offset, params=postparams,
extra_environ=self.admin_extra_environ)
model.Session.remove()
pkg = self.get_package_by_name(test_params['name'])
assert pkg
assert pkg.name == test_params['name'], pkg
assert len(pkg.resources) == 1, pkg.resources
assert pkg.resources[0].url == test_params['download_url'], pkg.resources[0]
def test_10_edit_pkg_with_download_url(self):
test_params = {
'name':u'testpkg10',
'download_url':u'testurl',
}
rev = model.repo.new_revision()
pkg = model.Package()
model.Session.add(pkg)
pkg.name = test_params['name']
pkg.download_url = test_params['download_url']
model.Session.commit()
pkg = self.get_package_by_name(test_params['name'])
model.setup_default_user_roles(pkg, [self.user])
rev = model.repo.new_revision()
model.repo.commit_and_remove()
assert self.get_package_by_name(test_params['name'])
# edit it
pkg_vals = {'download_url':u'newurl'}
offset = self.package_offset(test_params['name'])
postparams = '%s=1' % self.dumps(pkg_vals)
res = self.app.post(offset, params=postparams, status=[200],
extra_environ=self.admin_extra_environ)
model.Session.remove()
pkg = model.Session.query(model.Package).filter_by(name=test_params['name']).one()
assert len(pkg.resources) == 1, pkg.resources
assert pkg.resources[0].url == pkg_vals['download_url']
class TestPackagesVersion2(Version2TestCase, PackagesTestCase): pass
|
py | 1a4747df7c2ff140a24d4ad390f7c7aed2a0ed14 | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'The StudiMY Project'
copyright = '2019, MARIMORE ENGINEERING SDN. BHD. (925539-H)'
author = 'Chee Yim, Goh and Iqbal Abdullah'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '1.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = '101Readmedoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, '101Readme.tex', 'Introduction to the StudiMY Project',
'Chee Yim, Goh \\and Iqbal Abdullah', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, '101readme', 'Introduction to the StudiMY Project',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, '101Readme', 'Introduction to the StudiMY Project',
author, '101Readme', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
|
py | 1a4747f94488e884079ee35ce7486c2a4f82b9a4 | import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
from synchrophasor.frame import *
from synchrophasor.pmu import Pmu
from synchrophasor.pmuGen import *
from time import sleep
import threading
SLEEP_TIME = 1.0/100
def test_client_single_pmu():
pmu = create_pmu(9006)
pmu.ieee_data_sample.set_freq(1)
cnt = 0
while True:
sleep(SLEEP_TIME)
if pmu.clients:
pmu.send(pmu.ieee_data_sample)
pmu.join()
def test_client_2_pmus():
pmus = [create_pmu(port) for port in [9007, 9008]]
for i, pmu in enumerate(pmus):
pmu.ieee_data_sample.set_freq(i+1)
cnt = 0
while True:
sleep(SLEEP_TIME)
for pmu in pmus:
pmu.send(pmu.ieee_data_sample)
for pmu in pmus:
pmu.join()
def test_client_10_pmus():
nSources = 4
pmus = [create_pmu(port, log_level='DEBUG') for port in range(9009, 9009+nSources)]
# pmus = [create_pmu(port) for port in range(9009, 9009+nSources)]
for i, pmu in enumerate(pmus):
pmu.ieee_data_sample.set_freq(i+1)
cnt = 0
while True:
# sleep(SLEEP_TIME)
for pmu in pmus:
pmu.send(pmu.ieee_data_sample)
for pmu in pmus:
pmu.join()
if __name__ == "__main__":
test_list = [
# test_client_single_pmu,
# test_client_2_pmus,
test_client_10_pmus
]
threads = list()
for test in test_list:
x = threading.Thread(target=test)
threads.append(x)
x.start()
for index, thread in enumerate(threads):
thread.join()
|
py | 1a4748a6b1c53bddac1207b02925942b8b8bc880 | import argparse
import itertools
from xml.etree import ElementTree
import pandas as pd
class BiocToDataFrame:
def __init__(self):
self.namespaces = {}
def __call__(self, xmlbuffer_or_path):
if isinstance(xmlbuffer_or_path, str):
with open(xmlbuffer_or_path, "r") as xmlhandle:
return self.parse(xmlhandle)
return self.parse(xmlbuffer_or_path)
def parse(self, xmlbuffer_or_path):
result_json = []
for document in self._iter_elements_by_name(xmlbuffer_or_path, "document", self.namespaces):
doc_id = document.find("id").text
passage_ele = document.find("passage")
passage = passage_ele.find("text").text
# Get all proteins
proteins = [p for p in self._find_protein_annotations(passage_ele)]
# make them unique
proteins = set(proteins)
rel_protein_pairs = set()
for p1, p2 in self._find_protein_relations(passage_ele):
rel_protein_pairs.add(frozenset([p1, p2]))
for protein_combination in itertools.combinations(proteins, 2):
# sort names so it is easier to test
protein_combination = sorted(protein_combination)
participant1 = protein_combination[0]
participant2 = protein_combination[1]
protein_combination = frozenset(protein_combination)
is_valid = protein_combination in rel_protein_pairs
result_json.append({"docid": doc_id
, "passage": passage
, "participant1": participant1
, "participant2": participant2
, "isValid": is_valid
})
return pd.DataFrame(result_json)
@staticmethod
def _find_protein_annotations(passage_ele):
for annotation_ele in passage_ele.findall("annotation"):
is_protein = False
for infon_ele in annotation_ele.findall("infon"):
if infon_ele.attrib["key"] == 'type' and infon_ele.text == 'protein':
is_protein = True
break
if is_protein:
yield annotation_ele.find("text").text
@staticmethod
def _find_protein_relations(passage_ele):
for annotation_ele in passage_ele.findall("relation"):
is_relation = False
for infon_ele in annotation_ele.findall("infon"):
if infon_ele.attrib["key"] == 'type' and infon_ele.text == 'Relation':
is_relation = True
break
if is_relation:
participant1_id = annotation_ele.find("node[@role='Arg1']").attrib["refid"]
participant1 = passage_ele.find("annotation[@id='{}']/text".format(participant1_id)).text
participant2_id = annotation_ele.find("node[@role='Arg2']").attrib["refid"]
participant2 = passage_ele.find("annotation[@id='{}']/text".format(participant2_id)).text
yield participant1, participant2
@staticmethod
def _iter_elements_by_name(handle, name, namespace):
events = ElementTree.iterparse(handle, events=("start", "end"))
_, root = next(events) # Grab the root element.
expanded_name = name
# If name has the namespace, expand it
if ":" in name:
local_name = name[name.index(":") + 1:]
namespace_short_name = name[:name.index(":")]
expanded_name = "{{{}}}{}".format(namespace[namespace_short_name], local_name)
for event, elem in events:
if event == "end" and elem.tag == expanded_name:
yield elem
elem.clear()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("input",
help="The bioc xml formatted json")
parser.add_argument("output",
help="The output_file")
args = parser.parse_args()
# Run
result = BiocToDataFrame().parse(args.input)
result.to_json(args.output)
|
py | 1a4748a8737b8cfbef85c3204cc2eddb96c80f20 | ################################################################################
# Example : perform live fire detection in video using FireNet CNN
# Copyright (c) 2017/18 - Andrew Dunnings / Toby Breckon, Durham University, UK
# License : https://github.com/tobybreckon/fire-detection-cnn/blob/master/LICENSE
################################################################################
import cv2
import os
import sys
import math
import requests
################################################################################
import tflearn
from tflearn.layers.core import *
from tflearn.layers.conv import *
from tflearn.layers.normalization import *
from tflearn.layers.estimator import regression
################################################################################
def construct_firenet (x,y):
# Build network as per architecture in [Dunnings/Breckon, 2018]
network = tflearn.input_data(shape=[None, y, x, 3], dtype=tf.float32)
network = conv_2d(network, 64, 5, strides=4, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 128, 4, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 256, 1, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 2, activation='softmax')
network = regression(network, optimizer='momentum',
loss='categorical_crossentropy',
learning_rate=0.001)
model = tflearn.DNN(network, checkpoint_path='firenet',
max_checkpoints=1, tensorboard_verbose=2)
return model
################################################################################
# construct and display model
model = construct_firenet (224, 224)
print("Constructed FireNet ...")
model.load(os.path.join("models/FireNet", "firenet"),weights_only=True)
print("Loaded CNN network weights ...")
################################################################################
# network input sizes
rows = 224
cols = 224
# display and loop settings
windowName = "Live Fire Detection - FireNet CNN";
keepProcessing = True;
################################################################################
if len(sys.argv) == 2:
# load video file from first command line argument
video = cv2.VideoCapture(sys.argv[1])
print("Loaded video ...")
# create window
cv2.namedWindow(windowName, cv2.WINDOW_NORMAL);
# get video properties
width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH));
height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = video.get(cv2.CAP_PROP_FPS)
frame_time = round(100000/fps);
while (keepProcessing):
# start a timer (to see how long processing and display takes)
start_t = cv2.getTickCount();
# get video frame from file, handle end of file
ret, frame = video.read()
if not ret:
print("... end of video file reached");
break;
# re-size image to network input size and perform prediction
small_frame = cv2.resize(frame, (rows, cols), cv2.INTER_AREA)
output = model.predict([small_frame])
# label image based on prediction
myFile = open('append.txt', 'a')
if round(output[0][0]) == 1:
print("FIRE")
myFile.write('fire')
r = requests.post('http://linksmartsensing.us-east-2.elasticbeanstalk.com/data/fire', params = {'id':"1",'fire':"true"})
print(r.text)
cv2.rectangle(frame, (0,0), (width,height), (0,0,255), 50)
cv2.putText(frame,'FIRE',(int(width/16),int(height/4)),
cv2.FONT_HERSHEY_SIMPLEX, 4,(255,255,255),10,cv2.LINE_AA);
else:
print("CLEAR")
myFile.write('clear')
r = requests.post('http://linksmartsensing.us-east-2.elasticbeanstalk.com/data/fire', params = {'id':"1",'fire':"false"})
print(r.text)
cv2.rectangle(frame, (0,0), (width,height), (0,255,0), 50)
cv2.putText(frame,'CLEAR',(int(width/16),int(height/4)),
cv2.FONT_HERSHEY_SIMPLEX, 4,(255,255,255),10,cv2.LINE_AA);
# stop the timer and convert to ms. (to see how long processing and display takes)
stop_t = ((cv2.getTickCount() - start_t)/cv2.getTickFrequency()) * 1000;
# image display and key handling
cv2.imshow(windowName, frame);
# wait fps time or less depending on processing time taken (e.g. 1000ms / 25 fps = 40 ms)
key = cv2.waitKey(max(2, frame_time - int(math.ceil(stop_t)))) & 0xFF;
if (key == ord('x')):
keepProcessing = False;
elif (key == ord('f')):
cv2.setWindowProperty(windowName, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN);
else:
print("usage: python firenet.py videofile.ext");
################################################################################
|
py | 1a4748b0b76a9426c978a1f4b80e20ac2ed68cf4 | """
Remove the docs in training set that overlap with test sets or are duplicate
As it loads all data into memory, it requires a large memory machine to run
If you are processing MAG, run pykp.data.mag.post_clearn.py to remove noisy items (abstract contains "Full textFull text is available as a scanned copy of the original print version.") (around 132561 out of 3114539) and remove duplicates by title
"""
import argparse
import json
import os
import string
import nltk
import tqdm
from joblib import Parallel, delayed
from multiprocessing import Pool
import time
from pykp.data.remove_duplicates import init_args, example_iterator_from_json, text2tokens, set_similarity_match
stopwords = nltk.corpus.stopwords.words('english')
stopwords.extend(string.punctuation)
stopwords.extend(string.digits)
stopwords.append('')
def detect_duplicate_job(train_example):
global testsets_dict, title_pool
train_id = train_example['id']
title_tokens = text2tokens(train_example['title'])
text_tokens = text2tokens(train_example['abstract'])
# check if title is duplicate in train data (have been processed before)
title_str = ' '.join(title_tokens)
if title_str in title_pool:
return ('train_log', '%s|%s|%s\n' % (train_id, title_pool[title_str], title_str))
else:
title_pool[title_str] = train_id
# check if title/content is duplicate in valid/test data
title_set = set(title_tokens)
content_set = title_set | set(text_tokens)
for test_dataset_subname, testset in testsets_dict.items():
for test_id, test_example in testset.items():
title_flag, title_sim = set_similarity_match(title_set, test_example['title_set'], 0.7)
content_flag, content_sim = set_similarity_match(content_set, test_example['content_set'], 0.7)
if title_flag or content_flag:
return (test_dataset_subname,
'%s|%s|%s|%s|%f|%f\n' % (test_example['id'], train_example['id'], test_example['title'], train_example['title'], title_sim, content_sim))
# write non-duplicates to disk
return ('train_output', json.dumps(train_example) + '\n')
def run_normal_parallel(n_jobs, examples_iter):
start_time = time.time()
pool = Pool(processes=n_jobs)
# results = pool.map(detect_duplicate_job, examples_iter)
results = []
for r in tqdm.tqdm(pool.imap(detect_duplicate_job, examples_iter), total=len(examples_iter)):
results.append(r)
# result = list(itertools.chain(*result))
print("Job finished, taking time %.2f s" % (time.time()-start_time))
return results
def main():
opt = init_args()
# specify for which dataset (for valid/test) we need to remove duplicate data samples from training data
if opt.datatype == 'paper':
total_num = 20000 #530631
train_dataset_name = 'kp20k_training'
test_dataset_names = ['kp20k', 'inspec', 'nus', 'semeval', 'krapivin']
id_field = None
title_field = 'title'
text_field ='abstract'
keyword_field = 'keywords'
trg_delimiter = ';'
elif opt.datatype == 'qa':
total_num = 298965
train_dataset_name = 'stackexchange_training'
test_dataset_names = ['stackexchange']
id_field = None
title_field = 'title'
text_field ='question'
keyword_field = 'tags'
trg_delimiter = ';'
elif opt.datatype == 'mag':
total_num = 5108427
train_dataset_name = 'mag'
test_dataset_names = ['kp20k', 'inspec', 'nus', 'semeval', 'krapivin']
id_field = 'id'
title_field = 'title'
text_field ='abstract'
keyword_field = 'keywords'
trg_delimiter = None
print("Loading training data...")
train_examples_iter = example_iterator_from_json(path=opt.train_file,
dataset_name=train_dataset_name,
id_field=id_field,
title_field=title_field,
text_field=text_field,
keyword_field=keyword_field,
trg_delimiter=trg_delimiter)
train_examples_iter = list(train_examples_iter)
global pbar, output_cache, testsets_dict, title_pool
testsets_dict = {}
output_dir = opt.test_dataset_dir + '/%s_output/' % opt.datatype
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print("Loading validation/test data...")
for test_dataset_name in test_dataset_names:
for type in ['validation', 'testing']:
test_dataset_subname = '%s_%s' % (test_dataset_name, type)
source_test_file = os.path.join(opt.test_dataset_dir, test_dataset_name, test_dataset_subname+'.json')
test_examples = list(example_iterator_from_json(path=source_test_file,
dataset_name=test_dataset_subname,
id_field=id_field,
title_field=title_field,
text_field=text_field,
keyword_field=keyword_field,
trg_delimiter = ';'))
testset = {}
for test_num, test_example in enumerate(test_examples):
test_id = test_example['id']
title_tokens = text2tokens(test_example['title'])
text_tokens = text2tokens(test_example['abstract'])
# concatenate title and put it into hashtable
title_set = set(title_tokens)
text_set = set(text_tokens)
content_set = title_set | text_set
test_example['title_set'] = title_set
test_example['content_set'] = content_set
test_example['dup_train_ids'] = []
test_example['dup_train_titles'] = []
testset[test_id] = test_example
testsets_dict[test_dataset_subname] = testset
print("\tsize(%s) = %d" % (test_dataset_subname, len(testset)))
"""
1. clean text, remove stopwords/punctuations
2. Treat as overlaps if title & text match>=70%
3. Build a title hashset to remove training duplicates
"""
print("Cleaning duplicate data...")
global file_writers
file_writers = {}
for test_dataset_name in test_dataset_names:
for type in ['validation', 'testing']:
test_dataset_subname = '%s_%s' % (test_dataset_name, type)
file_writers[test_dataset_subname] = open('%s/%s__dup__%s.log'
% (output_dir, test_dataset_subname, train_dataset_name), 'w')
print("Initializing file writer for %s: %s" % (test_dataset_subname, os.path.abspath('%s/%s__dup__%s.log' % (output_dir, test_dataset_subname, train_dataset_name))))
output_cache = []
file_writers['train_output'] = open('%s/%s_nodup.json' % (output_dir, train_dataset_name), 'w')
file_writers['train_log'] = open('%s/%s__dup.log' % (output_dir, train_dataset_name), 'w')
title_pool = {}
print("Total number of examples = %d" % len(train_examples_iter))
print("Total number of jobs = %d" % opt.n_jobs)
# dataset_line_tuples = Parallel(n_jobs=opt.n_jobs, verbose=len(train_examples_iter))(delayed(detect_duplicate_job)(ex) for ex in train_examples_iter)
dataset_line_tuples = run_normal_parallel(opt.n_jobs, train_examples_iter)
print("Process ends. Got %d data examples" % len(dataset_line_tuples))
for dataset_subname, line in dataset_line_tuples:
writer = file_writers[dataset_subname]
writer.write(line)
for d_name, d_writer in file_writers.items():
print("Closing %s" % d_name)
d_writer.close()
if __name__ == "__main__":
main()
|
py | 1a474c4dad87cafcd0289a472260ce5526bbbf51 | # -*- coding: utf-8 -*-
import io
import pandas as pd
import scrapy
from scrapy import Request
from scrapy import signals
from fooltrader.api.quote import get_security_list
from fooltrader.contract.files_contract import get_finance_path
from fooltrader.utils.utils import index_df_with_time
class AmericaStockFinanceSpider(scrapy.Spider):
name = "america_stock_finance"
custom_settings = {
# 'DOWNLOAD_DELAY': 2,
# 'CONCURRENT_REQUESTS_PER_DOMAIN': 8,
'SPIDER_MIDDLEWARES': {
'fooltrader.middlewares.FoolErrorMiddleware': 1000,
}
}
def start_requests(self):
security_item = self.settings.get("security_item")
if security_item is not None:
item = security_item
data_url = self.get_finance_url(item['code'])
data_path = get_finance_path(item)
yield Request(url=data_url,
meta={'path': data_path,
'item': item},
callback=self.download_finance_csv)
else:
for _, item in get_security_list(exchanges=['nasdaq']).iterrows():
data_url = self.get_finance_url(item['code'])
data_path = get_finance_path(item)
yield Request(url=data_url,
meta={'path': data_path,
'item': item},
callback=self.download_finance_csv)
def download_finance_csv(self, response):
content_type_header = response.headers.get('content-type', None)
if content_type_header.decode("utf-8") == content_type_header.decode("utf-8") == 'text/csv':
path = response.meta['path']
security_item = response.meta['item']
df = pd.read_csv(io.BytesIO(response.body), na_values='None')
df.columns = [
"reportDate",
"shares",
"sharesAdjusted",
"factor",
"totalAssets",
"totalCurrentAssets",
"totalLiabilities",
"totalCurrentLiabilities",
"bookValue",
"minorityBookValue",
"preferredEquity",
"goodwill",
"longTermBorrowing",
"operatingRevenue",
"netProfit",
"netProfitAttributedToParentCompanyOwner",
"EPS",
"dilutedEPS",
"DPS",
"netCashFlowsFromOperatingActivities",
"netCashFlowsFromInvesting",
"netCashFlowsFromFinancingActivities",
"cashChange",
"cashAtTheEndOfPeriod",
"capitalExpenditures",
"price",
"priceHigh",
"priceLow",
"ROE",
"ROA",
"BVPS",
"PB",
"PE",
"cumulativeDividendsPerShare",
"dividendPayoutRatio",
"longTermDebtToEquityRatio",
"equityToAssetsRatio",
"netMargin",
"assetTurnover",
"freeCashFlowPerShare",
"currentRatio"]
df['code'] = security_item['code']
df['securityId'] = security_item['id']
df['id'] = df[['securityId', 'reportDate']].apply(lambda x: '_'.join(x.astype(str)), axis=1)
df = index_df_with_time(df, index='reportDate')
df.fillna(0, inplace=True)
df.to_csv(path, index=False)
else:
self.logger.exception(
"get finance csv error:url={} content type={} body={}".format(response.url, content_type_header,
response.body))
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
spider = super(AmericaStockFinanceSpider, cls).from_crawler(crawler, *args, **kwargs)
crawler.signals.connect(spider.spider_closed, signal=signals.spider_closed)
return spider
def spider_closed(self, spider, reason):
spider.logger.info('Spider closed: %s,%s\n', spider.name, reason)
def get_finance_url(self, code):
return 'http://www.stockpup.com/data/{}_quarterly_financial_data.csv'.format(code)
|
py | 1a474ca149c4379814f0e4698f2c91f32248e6d3 | #
# Copyright (C) 2006-2017 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" Import all RDKit chemistry modules
"""
import sys
import warnings
from collections import namedtuple
import numpy
from rdkit import DataStructs
from rdkit import ForceField
from rdkit import RDConfig
from rdkit import rdBase
from rdkit.Chem import *
from rdkit.Chem.ChemicalFeatures import *
from rdkit.Chem.rdChemReactions import *
from rdkit.Chem.rdDepictor import *
from rdkit.Chem.rdDistGeom import *
from rdkit.Chem.rdForceFieldHelpers import *
from rdkit.Chem.rdMolAlign import *
from rdkit.Chem.rdMolDescriptors import *
from rdkit.Chem.rdMolTransforms import *
from rdkit.Chem.rdPartialCharges import *
from rdkit.Chem.rdReducedGraphs import *
from rdkit.Chem.rdShapeHelpers import *
from rdkit.Chem.rdqueries import *
from rdkit.Chem.rdMolEnumerator import *
from rdkit.Geometry import rdGeometry
from rdkit.RDLogger import logger
from rdkit.Chem.EnumerateStereoisomers import StereoEnumerationOptions, EnumerateStereoisomers
try:
from rdkit.Chem.rdSLNParse import *
except ImportError:
pass
Mol.Compute2DCoords = Compute2DCoords
Mol.ComputeGasteigerCharges = ComputeGasteigerCharges
logger = logger()
def TransformMol(mol, tform, confId=-1, keepConfs=False):
""" Applies the transformation (usually a 4x4 double matrix) to a molecule
if keepConfs is False then all but that conformer are removed
"""
refConf = mol.GetConformer(confId)
TransformConformer(refConf, tform)
if not keepConfs:
if confId == -1:
confId = 0
allConfIds = [c.GetId() for c in mol.GetConformers()]
for cid in allConfIds:
if not cid == confId:
mol.RemoveConformer(cid)
# reset the conf Id to zero since there is only one conformer left
mol.GetConformer(confId).SetId(0)
def ComputeMolShape(mol, confId=-1, boxDim=(20, 20, 20), spacing=0.5, **kwargs):
""" returns a grid representation of the molecule's shape
"""
res = rdGeometry.UniformGrid3D(boxDim[0], boxDim[1], boxDim[2], spacing=spacing)
EncodeShape(mol, res, confId, **kwargs)
return res
def ComputeMolVolume(mol, confId=-1, gridSpacing=0.2, boxMargin=2.0):
""" Calculates the volume of a particular conformer of a molecule
based on a grid-encoding of the molecular shape.
A bit of demo as well as a test of github #1883:
>>> from rdkit import Chem
>>> from rdkit.Chem import AllChem
>>> mol = Chem.AddHs(Chem.MolFromSmiles('C'))
>>> AllChem.EmbedMolecule(mol)
0
>>> ComputeMolVolume(mol)
28...
>>> mol = Chem.AddHs(Chem.MolFromSmiles('O'))
>>> AllChem.EmbedMolecule(mol)
0
>>> ComputeMolVolume(mol)
20...
"""
mol = rdchem.Mol(mol)
conf = mol.GetConformer(confId)
CanonicalizeConformer(conf, ignoreHs=False)
box = ComputeConfBox(conf)
sideLen = (box[1].x - box[0].x + 2 * boxMargin, box[1].y - box[0].y + 2 * boxMargin,
box[1].z - box[0].z + 2 * boxMargin)
shape = rdGeometry.UniformGrid3D(sideLen[0], sideLen[1], sideLen[2], spacing=gridSpacing)
EncodeShape(mol, shape, confId, ignoreHs=False, vdwScale=1.0)
voxelVol = gridSpacing**3
occVect = shape.GetOccupancyVect()
voxels = [1 for x in occVect if x == 3]
vol = voxelVol * len(voxels)
return vol
def GetConformerRMS(mol, confId1, confId2, atomIds=None, prealigned=False):
""" Returns the RMS between two conformations.
By default, the conformers will be aligned to the first conformer
before the RMS calculation and, as a side-effect, the second will be left
in the aligned state.
Arguments:
- mol: the molecule
- confId1: the id of the first conformer
- confId2: the id of the second conformer
- atomIds: (optional) list of atom ids to use a points for
alingment - defaults to all atoms
- prealigned: (optional) by default the conformers are assumed
be unaligned and the second conformer be aligned
to the first
"""
# align the conformers if necessary
# Note: the reference conformer is always the first one
if not prealigned:
if atomIds:
AlignMolConformers(mol, confIds=[confId1, confId2], atomIds=atomIds)
else:
AlignMolConformers(mol, confIds=[confId1, confId2])
# calculate the RMS between the two conformations
conf1 = mol.GetConformer(id=confId1)
conf2 = mol.GetConformer(id=confId2)
ssr = 0
for i in range(mol.GetNumAtoms()):
d = conf1.GetAtomPosition(i).Distance(conf2.GetAtomPosition(i))
ssr += d * d
ssr /= mol.GetNumAtoms()
return numpy.sqrt(ssr)
def GetConformerRMSMatrix(mol, atomIds=None, prealigned=False):
""" Returns the RMS matrix of the conformers of a molecule.
As a side-effect, the conformers will be aligned to the first
conformer (i.e. the reference) and will left in the aligned state.
Arguments:
- mol: the molecule
- atomIds: (optional) list of atom ids to use a points for
alingment - defaults to all atoms
- prealigned: (optional) by default the conformers are assumed
be unaligned and will therefore be aligned to the
first conformer
Note that the returned RMS matrix is symmetrical, i.e. it is the
lower half of the matrix, e.g. for 5 conformers::
rmsmatrix = [ a,
b, c,
d, e, f,
g, h, i, j]
where a is the RMS between conformers 0 and 1, b is the RMS between
conformers 0 and 2, etc.
This way it can be directly used as distance matrix in e.g. Butina
clustering.
"""
# if necessary, align the conformers
# Note: the reference conformer is always the first one
rmsvals = []
confIds = [conf.GetId() for conf in mol.GetConformers()]
if not prealigned:
if atomIds:
AlignMolConformers(mol, atomIds=atomIds, RMSlist=rmsvals)
else:
AlignMolConformers(mol, RMSlist=rmsvals)
else: # already prealigned
for i in range(1, len(confIds)):
rmsvals.append(
GetConformerRMS(mol, confIds[0], confIds[i], atomIds=atomIds, prealigned=prealigned))
# loop over the conformations (except the reference one)
cmat = []
for i in range(1, len(confIds)):
cmat.append(rmsvals[i - 1])
for j in range(1, i):
cmat.append(GetConformerRMS(mol, confIds[i], confIds[j], atomIds=atomIds, prealigned=True))
return cmat
def EnumerateLibraryFromReaction(reaction, sidechainSets, returnReactants=False):
""" Returns a generator for the virtual library defined by
a reaction and a sequence of sidechain sets
>>> from rdkit import Chem
>>> from rdkit.Chem import AllChem
>>> s1=[Chem.MolFromSmiles(x) for x in ('NC','NCC')]
>>> s2=[Chem.MolFromSmiles(x) for x in ('OC=O','OC(=O)C')]
>>> rxn = AllChem.ReactionFromSmarts('[O:2]=[C:1][OH].[N:3]>>[O:2]=[C:1][N:3]')
>>> r = AllChem.EnumerateLibraryFromReaction(rxn,[s2,s1])
>>> [Chem.MolToSmiles(x[0]) for x in list(r)]
['CNC=O', 'CCNC=O', 'CNC(C)=O', 'CCNC(C)=O']
Note that this is all done in a lazy manner, so "infinitely" large libraries can
be done without worrying about running out of memory. Your patience will run out first:
Define a set of 10000 amines:
>>> amines = (Chem.MolFromSmiles('N'+'C'*x) for x in range(10000))
... a set of 10000 acids
>>> acids = (Chem.MolFromSmiles('OC(=O)'+'C'*x) for x in range(10000))
... now the virtual library (1e8 compounds in principle):
>>> r = AllChem.EnumerateLibraryFromReaction(rxn,[acids,amines])
... look at the first 4 compounds:
>>> [Chem.MolToSmiles(next(r)[0]) for x in range(4)]
['NC=O', 'CNC=O', 'CCNC=O', 'CCCNC=O']
"""
if len(sidechainSets) != reaction.GetNumReactantTemplates():
raise ValueError('%d sidechains provided, %d required' %
(len(sidechainSets), reaction.GetNumReactantTemplates()))
def _combiEnumerator(items, depth=0):
for item in items[depth]:
if depth + 1 < len(items):
v = _combiEnumerator(items, depth + 1)
for entry in v:
l = [item]
l.extend(entry)
yield l
else:
yield [item]
ProductReactants = namedtuple('ProductReactants', 'products,reactants')
for chains in _combiEnumerator(sidechainSets):
prodSets = reaction.RunReactants(chains)
for prods in prodSets:
if returnReactants:
yield ProductReactants(prods, chains)
else:
yield prods
def ConstrainedEmbed(mol, core, useTethers=True, coreConfId=-1, randomseed=2342,
getForceField=UFFGetMoleculeForceField, **kwargs):
""" generates an embedding of a molecule where part of the molecule
is constrained to have particular coordinates
Arguments
- mol: the molecule to embed
- core: the molecule to use as a source of constraints
- useTethers: (optional) if True, the final conformation will be
optimized subject to a series of extra forces that pull the
matching atoms to the positions of the core atoms. Otherwise
simple distance constraints based on the core atoms will be
used in the optimization.
- coreConfId: (optional) id of the core conformation to use
- randomSeed: (optional) seed for the random number generator
An example, start by generating a template with a 3D structure:
>>> from rdkit.Chem import AllChem
>>> template = AllChem.MolFromSmiles("c1nn(Cc2ccccc2)cc1")
>>> AllChem.EmbedMolecule(template)
0
>>> AllChem.UFFOptimizeMolecule(template)
0
Here's a molecule:
>>> mol = AllChem.MolFromSmiles("c1nn(Cc2ccccc2)cc1-c3ccccc3")
Now do the constrained embedding
>>> mol = AllChem.ConstrainedEmbed(mol, template)
Demonstrate that the positions are nearly the same with template:
>>> import math
>>> molp = mol.GetConformer().GetAtomPosition(0)
>>> templatep = template.GetConformer().GetAtomPosition(0)
>>> all(math.isclose(v, 0.0, abs_tol=0.01) for v in molp-templatep)
True
>>> molp = mol.GetConformer().GetAtomPosition(1)
>>> templatep = template.GetConformer().GetAtomPosition(1)
>>> all(math.isclose(v, 0.0, abs_tol=0.01) for v in molp-templatep)
True
"""
match = mol.GetSubstructMatch(core)
if not match:
raise ValueError("molecule doesn't match the core")
coordMap = {}
coreConf = core.GetConformer(coreConfId)
for i, idxI in enumerate(match):
corePtI = coreConf.GetAtomPosition(i)
coordMap[idxI] = corePtI
ci = EmbedMolecule(mol, coordMap=coordMap, randomSeed=randomseed, **kwargs)
if ci < 0:
raise ValueError('Could not embed molecule.')
algMap = [(j, i) for i, j in enumerate(match)]
if not useTethers:
# clean up the conformation
ff = getForceField(mol, confId=0)
for i, idxI in enumerate(match):
for j in range(i + 1, len(match)):
idxJ = match[j]
d = coordMap[idxI].Distance(coordMap[idxJ])
ff.AddDistanceConstraint(idxI, idxJ, d, d, 100.)
ff.Initialize()
n = 4
more = ff.Minimize()
while more and n:
more = ff.Minimize()
n -= 1
# rotate the embedded conformation onto the core:
rms = AlignMol(mol, core, atomMap=algMap)
else:
# rotate the embedded conformation onto the core:
rms = AlignMol(mol, core, atomMap=algMap)
ff = getForceField(mol, confId=0)
conf = core.GetConformer()
for i in range(core.GetNumAtoms()):
p = conf.GetAtomPosition(i)
pIdx = ff.AddExtraPoint(p.x, p.y, p.z, fixed=True) - 1
ff.AddDistanceConstraint(pIdx, match[i], 0, 0, 100.)
ff.Initialize()
n = 4
more = ff.Minimize(energyTol=1e-4, forceTol=1e-3)
while more and n:
more = ff.Minimize(energyTol=1e-4, forceTol=1e-3)
n -= 1
# realign
rms = AlignMol(mol, core, atomMap=algMap)
mol.SetProp('EmbedRMS', str(rms))
return mol
def AssignBondOrdersFromTemplate(refmol, mol):
""" assigns bond orders to a molecule based on the
bond orders in a template molecule
Arguments
- refmol: the template molecule
- mol: the molecule to assign bond orders to
An example, start by generating a template from a SMILES
and read in the PDB structure of the molecule
>>> import os
>>> from rdkit.Chem import AllChem
>>> template = AllChem.MolFromSmiles("CN1C(=NC(C1=O)(c2ccccc2)c3ccccc3)N")
>>> mol = AllChem.MolFromPDBFile(os.path.join(RDConfig.RDCodeDir, 'Chem', 'test_data', '4DJU_lig.pdb'))
>>> len([1 for b in template.GetBonds() if b.GetBondTypeAsDouble() == 1.0])
8
>>> len([1 for b in mol.GetBonds() if b.GetBondTypeAsDouble() == 1.0])
22
Now assign the bond orders based on the template molecule
>>> newMol = AllChem.AssignBondOrdersFromTemplate(template, mol)
>>> len([1 for b in newMol.GetBonds() if b.GetBondTypeAsDouble() == 1.0])
8
Note that the template molecule should have no explicit hydrogens
else the algorithm will fail.
It also works if there are different formal charges (this was github issue 235):
>>> template=AllChem.MolFromSmiles('CN(C)C(=O)Cc1ccc2c(c1)NC(=O)c3ccc(cc3N2)c4ccc(c(c4)OC)[N+](=O)[O-]')
>>> mol = AllChem.MolFromMolFile(os.path.join(RDConfig.RDCodeDir, 'Chem', 'test_data', '4FTR_lig.mol'))
>>> AllChem.MolToSmiles(mol)
'COC1CC(C2CCC3C(O)NC4CC(CC(O)N(C)C)CCC4NC3C2)CCC1N(O)O'
>>> newMol = AllChem.AssignBondOrdersFromTemplate(template, mol)
>>> AllChem.MolToSmiles(newMol)
'COc1cc(-c2ccc3c(c2)Nc2ccc(CC(=O)N(C)C)cc2NC3=O)ccc1[N+](=O)[O-]'
"""
refmol2 = rdchem.Mol(refmol)
mol2 = rdchem.Mol(mol)
# do the molecules match already?
matching = mol2.GetSubstructMatch(refmol2)
if not matching: # no, they don't match
# check if bonds of mol are SINGLE
for b in mol2.GetBonds():
if b.GetBondType() != BondType.SINGLE:
b.SetBondType(BondType.SINGLE)
b.SetIsAromatic(False)
# set the bonds of mol to SINGLE
for b in refmol2.GetBonds():
b.SetBondType(BondType.SINGLE)
b.SetIsAromatic(False)
# set atom charges to zero;
for a in refmol2.GetAtoms():
a.SetFormalCharge(0)
for a in mol2.GetAtoms():
a.SetFormalCharge(0)
matching = mol2.GetSubstructMatches(refmol2, uniquify=False)
# do the molecules match now?
if matching:
if len(matching) > 1:
logger.warning("More than one matching pattern found - picking one")
matching = matching[0]
# apply matching: set bond properties
for b in refmol.GetBonds():
atom1 = matching[b.GetBeginAtomIdx()]
atom2 = matching[b.GetEndAtomIdx()]
b2 = mol2.GetBondBetweenAtoms(atom1, atom2)
b2.SetBondType(b.GetBondType())
b2.SetIsAromatic(b.GetIsAromatic())
# apply matching: set atom properties
for a in refmol.GetAtoms():
a2 = mol2.GetAtomWithIdx(matching[a.GetIdx()])
a2.SetHybridization(a.GetHybridization())
a2.SetIsAromatic(a.GetIsAromatic())
a2.SetNumExplicitHs(a.GetNumExplicitHs())
a2.SetFormalCharge(a.GetFormalCharge())
SanitizeMol(mol2)
if hasattr(mol2, '__sssAtoms'):
mol2.__sssAtoms = None # we don't want all bonds highlighted
else:
raise ValueError("No matching found")
return mol2
# ------------------------------------
#
# doctest boilerplate
#
def _runDoctests(verbose=None): # pragma: nocover
import sys
import doctest
failed, _ = doctest.testmod(optionflags=doctest.ELLIPSIS, verbose=verbose)
sys.exit(failed)
if __name__ == '__main__': # pragma: nocover
_runDoctests()
|
py | 1a474cc70784dde081af63deeeaac07bb3834a10 |
"""
Create plots corresponding to each sample setplot function. Search for all
files of the form setplot_*.py and loop over them.
Also create .rst files for each example. The doc string for each setplot
file should start with at title underlined with ===, followed by a brief
description. These are used in the rst file, which also includes the
setplot function itself and a pointer to the plots directory.
"""
def makeplots(filenames=[]):
import os, glob, re
from pyclaw.plotters.plotclaw import plotclaw
thisdir = os.path.split(os.getcwd())[1]
#os.system('make .plots') # ensure output files and sample plots exist
os.system('make .htmls') # ensure html files exist
if filenames==[]:
filenames = glob.glob('setplot_*.py')
spnames = []
for setplotfile in filenames:
print '=== Making plots using ',setplotfile
regexp = re.compile(r'setplot_(?P<spname>.*).py')
result = regexp.search(setplotfile)
spname = result.group('spname')
spnames.append(spname)
plotdir = 'plots_%s' % spname
plotclaw(outdir="_output", plotdir=plotdir, setplot=setplotfile)
for spname in spnames:
setplotfile = 'setplot_%s.py' % spname
rstfile_name = 'plotexample-acou-2d-%s' % spname
print '=== Making rst file %s.rst' % rstfile_name
rstfile = open('../%s.rst' % rstfile_name, 'w')
setplot_lines = open(setplotfile,'r').read()
regexp = re.compile(r'"""(?P<descr>.*?)""" (?P<rest>.*)', \
re.DOTALL)
result = regexp.search(setplot_lines)
setplot_descr = result.group('descr')
setplot_rest = result.group('rest')
setplot_rest = setplot_rest.replace('\n','\n ',1000)
rstfile.write(""".. _%s: \n%s \n\n""" % (rstfile_name, setplot_descr))
rstfile.write("Example generating data: `<claw/doc/sphinx/%s/README.html>`_\n\n" \
% thisdir)
rstfile.write("Resulting plots: `<claw/doc/sphinx/%s/plots_%s/_PlotIndex.html>`_\n\n::\n" \
% (thisdir, spname))
rstfile.write(setplot_rest)
rstfile.close()
if __name__=='__main__':
import sys
makeplots(sys.argv[1:])
|
py | 1a474ed53b0e2787437b6df24388ed181ffcec95 | """ Classes and functions use across different Semi-Supervised algorithms """
|
py | 1a474f9199f2d6487a0d380474c6c6aebbeb9c35 | from glumpy import app, gloo, gl
from contextlib import contextmanager
import numpy as np
try:
import pycuda.driver
from pycuda.gl import graphics_map_flags, BufferObject
_PYCUDA = True
except ImportError as err:
print('pycuda import error:', err)
_PYCUDA = False
import torch
class OffscreenRender:
def __init__(self, viewport_size, out_buffer_location='opengl', clear_color=None):
self._init_buffers(viewport_size, out_buffer_location)
self.clear_color = clear_color if clear_color is not None else (0., 0., 0., 1.)
def _init_buffers(self, viewport_size, out_buffer_location):
assert out_buffer_location in ['torch', 'opengl', 'numpy']
if out_buffer_location == 'torch':
assert _PYCUDA, 'pycuda is not available'
try:
import pycuda.gl.autoinit # this may fails in headless mode
except:
raise RuntimeError('PyCUDA init failed, cannot use torch buffer')
_ = torch.cuda.FloatTensor(1, 3, 512,512) # needs init here, otherwise does not work
color_np = np.zeros((viewport_size[1], viewport_size[0], 4), np.float32)
self.color_buf, self.color_buf_cuda = create_shared_texture(color_np)
self.out_buf = torch.zeros((viewport_size[1], viewport_size[0], 4), dtype=torch.float32).cuda()
elif out_buffer_location == 'opengl':
self.color_buf = np.zeros((viewport_size[1], viewport_size[0], 4), dtype=np.float32).view(gloo.TextureFloat2D)
self.out_buf = self.color_buf
elif out_buffer_location == 'numpy':
self.color_buf = np.zeros((viewport_size[1], viewport_size[0], 4), dtype=np.float32).view(gloo.TextureFloat2D)
self.out_buf = np.zeros((viewport_size[1], viewport_size[0], 3), dtype=np.float32)
self.viewport_size = viewport_size
self.out_buffer_location = out_buffer_location
self.depth_buf = gloo.DepthBuffer(viewport_size[0], viewport_size[1], gl.GL_DEPTH_COMPONENT32)
self.fbo = gloo.FrameBuffer(color=self.color_buf, depth=self.depth_buf)
def render(self, scene, cull_face=True):
self.fbo.activate()
gl.glEnable(gl.GL_PROGRAM_POINT_SIZE)
gl.glEnable(gl.GL_DEPTH_TEST)
gl.glShadeModel(gl.GL_FLAT)
if cull_face:
gl.glEnable(gl.GL_CULL_FACE)
gl.glCullFace(gl.GL_BACK)
else:
gl.glDisable(gl.GL_CULL_FACE)
gl.glClearColor(*self.clear_color)
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
gl.glViewport(0, 0, self.viewport_size[0], self.viewport_size[1])
if scene.draw_points:
scene.program.draw(gl.GL_POINTS)
else:
assert scene.index_buffer is not None
scene.program.draw(gl.GL_TRIANGLES, scene.index_buffer)
if self.out_buffer_location == 'torch':
frame = cpy_texture_to_tensor(self.color_buf_cuda, self.out_buf).clone()
elif self.out_buffer_location == 'opengl':
frame = self.out_buf
else:
gl.glReadPixels(0, 0, self.viewport_size[0], self.viewport_size[1], gl.GL_RGB, gl.GL_FLOAT, self.out_buf)
frame = self.out_buf.copy()
self.fbo.deactivate()
return frame
@contextmanager
def cuda_activate_array(img):
"""Context manager simplifying use of pycuda.gl.RegisteredImage"""
mapping = img.map()
yield mapping.array(0,0)
mapping.unmap()
@contextmanager
def cuda_activate_buffer(buf):
mapping = buf.map()
yield mapping.device_ptr()
mapping.unmap()
def create_shared_texture(arr, map_flags=None):
"""Create and return a Texture2D with gloo and pycuda views."""
if map_flags is None:
map_flags = graphics_map_flags.WRITE_DISCARD
gl_view = arr.view(gloo.TextureFloat2D)
gl_view.activate() # force gloo to create on GPU
gl_view.deactivate()
cuda_view = pycuda.gl.RegisteredImage(
int(gl_view.handle), gl_view.target, map_flags)
return gl_view, cuda_view
def create_shared_buffer(arr):
"""Create and return a BufferObject with gloo and pycuda views."""
gl_view = arr.view(gloo.VertexBuffer)
gl_view.activate() # force gloo to create on GPU
gl_view.deactivate()
cuda_view = BufferObject(np.long(gl_view.handle))
return gl_view, cuda_view
def cpy_texture_to_tensor(texture, tensor):
"""Copy GL texture (cuda view) to pytorch tensor"""
with cuda_activate_array(texture) as src:
cpy = pycuda.driver.Memcpy2D()
cpy.set_src_array(src)
cpy.set_dst_device(tensor.data_ptr())
cpy.width_in_bytes = cpy.src_pitch = cpy.dst_pitch = tensor.shape[1] * 4 * 4
cpy.height = tensor.shape[0]
cpy(aligned=False)
torch.cuda.synchronize()
return tensor
def cpy_tensor_to_texture(tensor, texture):
"""Copy pytorch tensor to GL texture (cuda view)"""
with cuda_activate_array(texture) as ary:
cpy = pycuda.driver.Memcpy2D()
cpy.set_src_device(tensor.data_ptr())
cpy.set_dst_array(ary)
cpy.width_in_bytes = cpy.src_pitch = cpy.dst_pitch = tensor.shape[1] * 4 * 4
cpy.height = tensor.shape[0]
cpy(aligned=False)
torch.cuda.synchronize()
return tensor
def cpy_buffer_to_tensor(buffer, tensor):
"""Copy GL buffer (cuda view) to pytorch tensor"""
n = tensor.numel()*tensor.element_size()
with cuda_activate_buffer(buffer) as buf_ptr:
pycuda.driver.memcpy_dtod(tensor.data_ptr(), buf_ptr, n)
def cpy_tensor_to_buffer(tensor, buffer):
"""Copy pytorch tensor to GL buffer (cuda view)"""
n = tensor.numel()*tensor.element_size()
with cuda_activate_buffer(buffer) as buf_ptr:
pycuda.driver.memcpy_dtod(buf_ptr, tensor.data_ptr(), n)
|
py | 1a47513e7adba0a178778681c16ce069052d1dac | """Provides functionality to interact with image processing services."""
import asyncio
from datetime import timedelta
import logging
from typing import final
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_NAME,
CONF_ENTITY_ID,
CONF_NAME,
CONF_SOURCE,
)
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import make_entity_service_schema
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.util.async_ import run_callback_threadsafe
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
DOMAIN = "image_processing"
SCAN_INTERVAL = timedelta(seconds=10)
DEVICE_CLASSES = [
"alpr", # Automatic license plate recognition
"face", # Face
"ocr", # OCR
]
SERVICE_SCAN = "scan"
SERVICE_ENABLE = "enable_detection"
SERVICE_DISABLE = "disable_detection"
EVENT_DETECT_FACE = "image_processing.detect_face"
ATTR_AGE = "age"
ATTR_CONFIDENCE = "confidence"
ATTR_FACES = "faces"
ATTR_GENDER = "gender"
ATTR_GLASSES = "glasses"
ATTR_MOTION = "motion"
ATTR_TOTAL_FACES = "total_faces"
CONF_CONFIDENCE = "confidence"
DEFAULT_TIMEOUT = 10
DEFAULT_CONFIDENCE = 80
SOURCE_SCHEMA = vol.Schema(
{
vol.Required(CONF_ENTITY_ID): cv.entity_domain("camera"),
vol.Optional(CONF_NAME): cv.string,
}
)
PLATFORM_SCHEMA = cv.PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_SOURCE): vol.All(cv.ensure_list, [SOURCE_SCHEMA]),
vol.Optional(CONF_CONFIDENCE, default=DEFAULT_CONFIDENCE): vol.All(
vol.Coerce(float), vol.Range(min=0, max=100)
),
}
)
PLATFORM_SCHEMA_BASE = cv.PLATFORM_SCHEMA_BASE.extend(PLATFORM_SCHEMA.schema)
async def async_setup(hass, config):
"""Set up the image processing."""
component = EntityComponent(_LOGGER, DOMAIN, hass, SCAN_INTERVAL)
await component.async_setup(config)
async def async_scan_service(service):
"""Service handler for scan."""
image_entities = await component.async_extract_from_service(service)
update_tasks = []
for entity in image_entities:
entity.async_set_context(service.context)
update_tasks.append(asyncio.create_task(entity.async_update_ha_state(True)))
if update_tasks:
await asyncio.wait(update_tasks)
hass.services.async_register(
DOMAIN, SERVICE_SCAN, async_scan_service, schema=make_entity_service_schema({})
)
component.async_register_entity_service(
SERVICE_ENABLE,
schema=make_entity_service_schema({}),
func="async_enable_detection",
)
component.async_register_entity_service(
SERVICE_DISABLE,
schema=make_entity_service_schema({}),
func="async_disable_detection",
)
return True
class ImageProcessingEntity(Entity):
"""Base entity class for image processing."""
timeout = DEFAULT_TIMEOUT
det = "on"
def enable_detection(self):
"""Enable motion detection in the camera."""
self.det = "on"
raise NotImplementedError()
async def async_enable_detection(self):
"""Call the job and enable motion detection."""
await self.hass.async_add_executor_job(self.enable_detection)
def disable_detection(self):
"""Disable motion detection in camera."""
self.det = "off"
raise NotImplementedError()
async def async_disable_detection(self):
"""Call the job and disable motion detection."""
await self.hass.async_add_executor_job(self.disable_detection)
@property
def camera_entity(self):
"""Return camera entity id from process pictures."""
return None
@property
def confidence(self):
"""Return minimum confidence for do some things."""
return None
@property
def state_attributes(self):
"""Return device specific state attributes."""
return {ATTR_MOTION: self.det}
def process_image(self, image):
"""Process image."""
raise NotImplementedError()
async def async_process_image(self, image):
"""Process image."""
return await self.hass.async_add_executor_job(self.process_image, image)
async def async_update(self):
"""Update image and process it.
This method is a coroutine.
"""
camera = self.hass.components.camera
image = None
try:
image = await camera.async_get_raw_image(
self.camera_entity, timeout=self.timeout
)
except AttributeError:
try:
image = await camera.async_get_image(
self.camera_entity, timeout=self.timeout
)
except HomeAssistantError as err:
_LOGGER.error("Error on receive image from entity: %s", err)
return
# process image data
await self.async_process_image(image.content)
class ImageProcessingFaceEntity(ImageProcessingEntity):
"""Base entity class for face image processing."""
def __init__(self):
"""Initialize base face identify/verify entity."""
self.faces = []
self.total_faces = 0
self.det = "off"
def enable_detection(self):
"""Enable motion detection in the camera."""
self.det = "on"
raise NotImplementedError()
async def async_enable_detection(self):
"""Call the job and enable motion detection."""
await self.hass.async_add_executor_job(self.enable_detection)
def disable_detection(self):
"""Disable motion detection in camera."""
self.det = "off"
raise NotImplementedError()
async def async_disable_detection(self):
"""Call the job and disable motion detection."""
await self.hass.async_add_executor_job(self.disable_detection)
@property
def state(self):
"""Return the state of the entity."""
confidence = 0
state = None
# No confidence support
if not self.confidence:
return self.total_faces
# Search high confidence
for face in self.faces:
if ATTR_CONFIDENCE not in face:
continue
f_co = face[ATTR_CONFIDENCE]
if f_co > confidence:
confidence = f_co
for attr in [ATTR_NAME, ATTR_MOTION]:
if attr in face:
state = face[attr]
break
return state
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return "face"
@final
@property
def state_attributes(self):
"""Return device specific state attributes."""
return {
ATTR_FACES: self.faces,
ATTR_TOTAL_FACES: self.total_faces,
ATTR_MOTION: self.det,
}
def process_faces(self, faces, total):
"""Send event with detected faces and store data."""
run_callback_threadsafe(
self.hass.loop, self.async_process_faces, faces, total
).result()
@callback
def async_process_faces(self, faces, total):
"""Send event with detected faces and store data.
known are a dict in follow format:
[
{
ATTR_CONFIDENCE: 80,
ATTR_NAME: 'Name',
ATTR_AGE: 12.0,
ATTR_GENDER: 'man',
ATTR_MOTION: 'smile',
ATTR_GLASSES: 'sunglasses'
},
]
This method must be run in the event loop.
"""
# Send events
for face in faces:
if (
ATTR_CONFIDENCE in face
and self.confidence
and face[ATTR_CONFIDENCE] < self.confidence
):
continue
face.update({ATTR_ENTITY_ID: self.entity_id})
self.hass.async_add_job(self.hass.bus.async_fire, EVENT_DETECT_FACE, face)
# Update entity store
self.faces = faces
self.total_faces = total
|
py | 1a47513f5fa9923540f824ac6a9b8bf03450d193 | # coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for core.domain.base_model_validators."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import datetime
from core import jobs_registry
from core.domain import base_model_validators
from core.domain import prod_validation_jobs_one_off
from core.platform import models
from core.tests import test_utils
import feconf
import utils
(base_models, user_models) = models.Registry.import_models(
[models.NAMES.base_model, models.NAMES.user])
class MockModel(base_models.BaseModel):
pass
class MockSnapshotModel(base_models.BaseModel):
commit_type = 'edit'
commit_cmds = []
class MockBaseModelValidator(base_model_validators.BaseModelValidator):
pass
class MockModelValidatorWithInvalidValidationType(
base_model_validators.BaseModelValidator):
@classmethod
def _get_external_id_relationships(cls, item):
return []
@classmethod
def _get_model_domain_object_instance(cls, unused_item):
return MockModel()
@classmethod
def _get_domain_object_validation_type(cls, unused_item):
return 'Invalid'
class MockSummaryModelValidator(
base_model_validators.BaseSummaryModelValidator):
@classmethod
def _get_external_id_relationships(cls, item):
return []
class MockSnapshotContentModelValidator(
base_model_validators.BaseSnapshotContentModelValidator):
@classmethod
def _get_external_id_relationships(cls, item):
return []
class MockSnapshotMetadataModelValidator(
base_model_validators.BaseSnapshotMetadataModelValidator):
EXTERNAL_MODEL_NAME = 'external model'
@classmethod
def _get_external_id_relationships(cls, item):
return [
base_model_validators.ExternalModelFetcherDetails(
'external_model_ids', MockModel, [])]
class MockBaseUserModelValidator(
base_model_validators.BaseUserModelValidator):
@classmethod
def _get_external_id_relationships(cls, item):
return []
@classmethod
def _get_custom_validation_functions(cls):
return [cls._validate_common_properties_do_not_match]
@classmethod
def _get_external_instance_custom_validation_functions(cls):
return [
cls._validate_explorations_are_public,
cls._validate_collections_are_public
]
class MockCommitLogEntryModel(base_models.BaseCommitLogEntryModel):
pass
class MockCommitLogEntryModelValidator(
base_model_validators.BaseCommitLogEntryModelValidator):
EXTERNAL_MODEL_NAME = 'mockmodel'
@classmethod
def _get_change_domain_class(cls, item):
if item.id.startswith('mock'):
return MockCommitLogEntryModel
else:
cls._add_error(
'model %s' % base_model_validators.ERROR_CATEGORY_ID_CHECK,
'Entity id %s: Entity id does not match regex pattern' % (
item.id))
return None
@classmethod
def _get_external_id_relationships(cls, item):
return [
base_model_validators.UserSettingsModelFetcherDetails(
'user_id', [item.user_id],
may_contain_system_ids=False,
may_contain_pseudonymous_ids=False
)]
class BaseValidatorTests(test_utils.AuditJobsTestBase):
def setUp(self):
super(BaseValidatorTests, self).setUp()
self.invalid_model = MockModel(id='mockmodel')
self.invalid_model.update_timestamps()
self.invalid_model.put()
def test_error_is_raised_if_fetch_external_properties_is_undefined(self):
with self.assertRaisesRegexp(
NotImplementedError,
r'The _get_external_id_relationships\(\) method is missing from the'
' derived class. It should be implemented in the derived class.'):
MockBaseModelValidator().validate(self.invalid_model)
def test_error_is_get_external_model_properties_is_undefined(self):
with self.assertRaisesRegexp(
NotImplementedError,
r'The _get_external_model_properties\(\) method is missing from the'
' derived class. It should be implemented in the derived class.'):
MockSummaryModelValidator().validate(self.invalid_model)
def test_error_is_raised_if_external_model_name_is_undefined(self):
with self.assertRaisesRegexp(
Exception, 'External model name should be specified'):
MockSnapshotContentModelValidator().validate(self.invalid_model)
def test_error_is_raised_if_get_change_domain_class_is_undefined(self):
with self.assertRaisesRegexp(
NotImplementedError,
r'The _get_change_domain_class\(\) method is missing from the '
'derived class. It should be implemented in the derived class.'):
snapshot_model = MockSnapshotModel(id='mockmodel')
snapshot_model.update_timestamps()
snapshot_model.put()
MockSnapshotMetadataModelValidator().validate(snapshot_model)
def test_error_is_raised_if_entity_classes_to_map_over_is_undefined(self):
job_class = (
prod_validation_jobs_one_off.ProdValidationAuditOneOffJob)
with self.assertRaisesRegexp(
NotImplementedError,
r'The entity_classes_to_map_over\(\) method is missing from the '
'derived class. It should be implemented in the derived class.'):
with self.swap(jobs_registry, 'ONE_OFF_JOB_MANAGERS', [job_class]):
job_id = job_class.create_new()
job_class.enqueue(job_id)
def test_error_is_raised_with_invalid_validation_type_for_domain_objects(
self):
MockModelValidatorWithInvalidValidationType.validate(self.invalid_model)
expected_errors = {
'domain object check': [
'Entity id mockmodel: Entity fails domain validation with '
'the error Invalid validation type for domain object: Invalid']}
self.assertEqual(
MockModelValidatorWithInvalidValidationType.errors, expected_errors)
def test_no_error_is_raised_for_base_user_model(self):
user = MockModel(id='12345')
user.update_timestamps()
user.put()
MockBaseUserModelValidator().validate(user)
def test_validate_deleted_reports_error_for_old_deleted_model(self):
year_ago = datetime.datetime.utcnow() - datetime.timedelta(weeks=52)
model = MockModel(
id='123',
deleted=True,
last_updated=year_ago
)
model.update_timestamps(update_last_updated_time=False)
model.put()
validator = MockBaseUserModelValidator()
validator.validate_deleted(model)
self.assertEqual(
validator.errors,
{
'entity stale check': [
'Entity id 123: model marked as '
'deleted is older than 8 weeks'
]
}
)
def test_external_model_fetcher_with_user_settings_raise_error(self):
with self.assertRaisesRegexp(
Exception,
'When fetching instances of UserSettingsModel, please use ' +
'UserSettingsModelFetcherDetails instead of ' +
'ExternalModelFetcherDetails'):
base_model_validators.ExternalModelFetcherDetails(
'committer_ids', user_models.UserSettingsModel,
[
feconf.MIGRATION_BOT_USER_ID, 'User-1',
self.PSEUDONYMOUS_ID
]
)
def test_may_contain_system_users_filters_system_ids(self):
user_settings_model = (
base_model_validators.UserSettingsModelFetcherDetails(
'committer_ids',
[feconf.MIGRATION_BOT_USER_ID, 'User-1'],
may_contain_system_ids=True,
may_contain_pseudonymous_ids=False
))
self.assertItemsEqual(
user_settings_model.model_ids, ['User-1'])
def test_error_raised_if_model_ids_contain_system_ids(self):
with self.assertRaisesRegexp(
utils.ValidationError,
'The field \'committer_ids\' should not contain system IDs'):
base_model_validators.UserSettingsModelFetcherDetails(
'committer_ids', [feconf.MIGRATION_BOT_USER_ID, 'User-1'],
may_contain_system_ids=False,
may_contain_pseudonymous_ids=False
)
def test_may_contain_pseudonymous_users_filters_pseudonymous_users(self):
user_settings_model = (
base_model_validators.UserSettingsModelFetcherDetails(
'committer_ids', ['User-1', self.PSEUDONYMOUS_ID],
may_contain_system_ids=False,
may_contain_pseudonymous_ids=True
))
self.assertItemsEqual(
user_settings_model.model_ids, ['User-1'])
def test_error_raised_if_model_ids_contain_pseudonymous_ids(self):
with self.assertRaisesRegexp(
utils.ValidationError,
'The field \'committer_ids\' should not contain pseudonymous IDs'):
base_model_validators.UserSettingsModelFetcherDetails(
'committer_ids', [self.PSEUDONYMOUS_ID, 'User-1'],
may_contain_system_ids=False,
may_contain_pseudonymous_ids=False
)
def test_error_raised_when_fetching_external_model_with_system_ids(self):
model = MockCommitLogEntryModel(
id='mock-12345',
user_id=feconf.MIGRATION_BOT_USER_ID,
commit_cmds=[])
model.update_timestamps()
mock_validator = MockCommitLogEntryModelValidator()
mock_validator.errors.clear()
mock_validator.validate(model)
self.assertDictContainsSubset(
{
'invalid user setting ids': [
'Entity id mock-12345: '
'The field \'user_id\' should not contain system IDs'
]
},
mock_validator.errors
)
def test_error_raised_when_fetching_external_model_with_pseudo_ids(self):
model = MockCommitLogEntryModel(
id='mock-12345',
user_id=self.PSEUDONYMOUS_ID,
commit_cmds=[])
model.update_timestamps()
mock_validator = MockCommitLogEntryModelValidator()
mock_validator.errors.clear()
mock_validator.validate(model)
self.assertDictContainsSubset(
{
'invalid user setting ids': [
'Entity id mock-12345: '
'The field \'user_id\' should not contain pseudonymous IDs'
]
},
mock_validator.errors
)
|
py | 1a47532aa504b78b2b9aae380ba517bf3c922991 | ##
##
#
# Adapter for com.vividsolutions.jts.geom.Coordinate
#
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# 01/20/11 dgilling Initial Creation.
#
#
#
from dynamicserialize.dstypes.com.vividsolutions.jts.geom import Coordinate
ClassAdapter = 'com.vividsolutions.jts.geom.Coordinate'
def serialize(context, coordinate):
context.writeDouble(coordinate.getX())
context.writeDouble(coordinate.getY())
def deserialize(context):
x = context.readDouble()
y = context.readDouble()
coord = Coordinate()
coord.setX(x)
coord.setY(y)
return coord
|
py | 1a47535916e39a889d945959036dd968d1a82bb0 | # urls.py
from __future__ import absolute_import
from django.conf.urls import patterns, include, url
import twitterapp.views
# required to set an app name to resolve 'url' in templates with namespacing
app_name = "twitter"
urlpatterns = [
url(r'^site-setup/$', twitterapp.views.site_setup),
]
|
py | 1a47555a1d0fad5c442f2aa381a171cb57adc96c | # Generated by Django 3.0.3 on 2020-03-23 02:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scrape', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ScrapyModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('unique_id', models.CharField(max_length=100, null=True)),
('comments', models.TextField()),
('hotel_name', models.CharField(max_length=100)),
],
),
migrations.DeleteModel(
name='ScrapeResult',
),
]
|
bzl | 1a47557b162963cbb03329d36426f1d6badc102c | load("@bazel_tools//tools/cpp:lib_cc_configure.bzl", "get_cpu_value")
def execute_or_fail_loudly(
repository_ctx,
arguments,
environment = {},
working_directory = ""):
"""Execute the given command
Fails if the command does not exit with exit-code 0.
Args:
arguments: List, the command line to execute.
Returns:
exec_result: The output of the command.
"""
exec_result = repository_ctx.execute(
arguments,
environment = environment,
quiet = True,
working_directory = working_directory,
)
if exec_result.return_code != 0:
arguments = [_as_string(x) for x in arguments]
fail("\n".join(["Command failed: " + " ".join(arguments), exec_result.stderr]))
return exec_result
def _as_string(v):
if type(v) == "string":
return v
else:
return repr(v)
def find_python(repository_ctx):
python = repository_ctx.which("python3")
if not python:
python = repository_ctx.which("python")
if not python:
fail("There is no Python in PATH. Please install Python >= 3.3.")
result = repository_ctx.execute([python, "--version"])
if not result.stdout.startswith("Python 3"):
fail("rules_haskell requires Python >= 3.3.")
return python
def resolve_labels(repository_ctx, labels):
"""
Avoid rule restart by resolving these labels early. See
https://github.com/bazelbuild/bazel/blob/master/tools/cpp/lib_cc_configure.bzl#L17.
Args:
repository_ctx: The context with which to resolve the labels.
labels: Labels to be resolved expressed as a list of strings.
Returns:
A dictionary with the labels as keys and their paths as values.
"""
return dict([(label, repository_ctx.path(Label(label))) for label in labels])
def define_rule(rule_type, name, **kwargs):
"""Generate a string representing a rule definition.
Take care to escape string values using repr().
### Examples
```bzl
define_rule("myrule",
name = "foo",
myattr1 = repr("bar"),
myattr2 = ["baz"],
)
```
"""
attrs = ["{} = {},".format(k, v) for k, v in kwargs.items() if v != None]
skeleton = """\
{rule_type}(
name = {name},
{attrs}
)
"""
return skeleton.format(
rule_type = rule_type,
name = repr(name),
attrs = "\n ".join(attrs),
)
|
py | 1a4755c9da354effd782c6a87f1c8f660b2f357e | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class GetAvailabilitySetResult:
"""
A collection of values returned by getAvailabilitySet.
"""
def __init__(__self__, id=None, location=None, managed=None, name=None, platform_fault_domain_count=None, platform_update_domain_count=None, resource_group_name=None, tags=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
__self__.id = id
"""
The provider-assigned unique ID for this managed resource.
"""
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
__self__.location = location
"""
The supported Azure location where the Availability Set exists.
"""
if managed and not isinstance(managed, bool):
raise TypeError("Expected argument 'managed' to be a bool")
__self__.managed = managed
"""
Whether the availability set is managed or not.
"""
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
__self__.name = name
if platform_fault_domain_count and not isinstance(platform_fault_domain_count, str):
raise TypeError("Expected argument 'platform_fault_domain_count' to be a str")
__self__.platform_fault_domain_count = platform_fault_domain_count
"""
The number of fault domains that are used.
"""
if platform_update_domain_count and not isinstance(platform_update_domain_count, str):
raise TypeError("Expected argument 'platform_update_domain_count' to be a str")
__self__.platform_update_domain_count = platform_update_domain_count
"""
The number of update domains that are used.
"""
if resource_group_name and not isinstance(resource_group_name, str):
raise TypeError("Expected argument 'resource_group_name' to be a str")
__self__.resource_group_name = resource_group_name
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
__self__.tags = tags
"""
A mapping of tags assigned to the resource.
"""
class AwaitableGetAvailabilitySetResult(GetAvailabilitySetResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAvailabilitySetResult(
id=self.id,
location=self.location,
managed=self.managed,
name=self.name,
platform_fault_domain_count=self.platform_fault_domain_count,
platform_update_domain_count=self.platform_update_domain_count,
resource_group_name=self.resource_group_name,
tags=self.tags)
def get_availability_set(name=None,resource_group_name=None,opts=None):
"""
Use this data source to access information about an existing Availability Set.
:param str name: The name of the Availability Set.
:param str resource_group_name: The name of the resource group in which the Availability Set exists.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure:compute/getAvailabilitySet:getAvailabilitySet', __args__, opts=opts).value
return AwaitableGetAvailabilitySetResult(
id=__ret__.get('id'),
location=__ret__.get('location'),
managed=__ret__.get('managed'),
name=__ret__.get('name'),
platform_fault_domain_count=__ret__.get('platformFaultDomainCount'),
platform_update_domain_count=__ret__.get('platformUpdateDomainCount'),
resource_group_name=__ret__.get('resourceGroupName'),
tags=__ret__.get('tags'))
|
py | 1a47563ba71372302acf5feb804706c4570881fe | from . import bp as order
from flask import render_template, url_for, render_template, flash, redirect
from app.forms import OrderForm
@order.route('/order_form')
def order_form():
form = OrderForm()
context = {
'form':form
}
return render_template('order_form.html', **context ) |
py | 1a4757a3854c455c9eb038a02f548b9f246ea3e0 | import torch.nn as nn
class SoftmaxAdapter(nn.Module):
def __init__(self, dim_in, n_class):
super(SoftmaxAdapter, self).__init__()
self.fc = nn.Linear(dim_in, n_class)
def forward(self, input):
h = self.fc(input)
return nn.functional.softmax(h)
class SigmoidAdapter(nn.Module):
def __init__(self, dim_in, n_class):
super(SigmoidAdapter, self).__init__()
self.fc = nn.Linear(dim_in, n_class)
def forward(self, input):
h = self.fc(input)
return nn.functional.sigmoid(h)
|
py | 1a4758143fe5ddb714eaefad5b857cf455012818 | import jpype
jpype.addClassPath('./lib/*')
if not(jpype.isJVMStarted()):
jpype.startJVM(convertStrings =True)
neqsim = jpype.JPackage('neqsim') |
py | 1a475887add0f7610649cb0301f950f4fba515a4 | # Specter BlockChain Implementation
# Nick Frichette 12/9/2017
import json
import hashlib
import requests
import base64
from threading import Thread
from database_orm import *
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.exceptions import InvalidSignature
# ANSI escape sequences
FAIL = '\033[91m'
END = '\033[0m'
OK = '\033[92m'
class Blockchain:
NODE_ADDRESS_LIST = ['http://localhost:5000']
blocks = []
index = 0
db = None
transaction_pool = []
def __init__(self, is_node=False):
# Instantiate DB
self.db = Database()
if is_node:
print OK + 'Thank you for standing up a node!' + END
# If the DB is empty, generate the Genesis
if self.db.is_empty():
print FAIL + 'No blocks in chain' + END
print OK + 'Creating Genesis Block' + END
genesis = self.make_genesis_block()
self.add_block(genesis)
else:
# For each row in the DB, create a block
blocks = self.db.get_all_blocks()
for item in blocks:
block = Block(
item.coin_index,
json.loads(item.transaction_info),
item.previous_hash,
item.current_hash,
item.timestamp,
item.nonce
)
self.add_block(block)
# Unverified transactions are added to the transaction pool
# A separate thread will put them onto the block chain.
# This should be more preformat at scale.
#trans_thread = Thread(target=self.transaction_thread)
#trans_thread.daemon = true
#trans_thread.start()
else:
# This is an implementation meant for normal users
try:
blockchain_json = self.download_blockchain()
self.unjsonify(blockchain_json)
except requests.exceptions.ConnectionError:
print FAIL + "Failed to connect to nodes. Terminating" + END
exit()
def download_blockchain(self):
# Query the nodes for the blockchain
# In the future validation will need to occur
blockchain_json = []
for address in self.NODE_ADDRESS_LIST:
request = requests.get(address + '/getblockchain')
blockchain_json = request.json()
return blockchain_json
def update_blockchain(self):
try:
blockchain_json = self.download_blockchain()
self.blocks = []
self.unjsonify(blockchain_json)
except requests.exceptions.ConnectionError:
print "Failed to update blockchain"
def jsonify(self):
data_json = {}
i = 0
for block in self.blocks:
data = {
"index": block.coin_index,
"transaction": block.transaction_info,
"previous_hash": block.previous_hash,
"current_hash": block.current_hash,
"timestamp": block.timestamp,
"nonce": block.nonce
}
data_json[i] = data
i += 1
return data_json
def unjsonify(self, json_data):
for block in json_data:
js = json_data[block]
block = Block(
js['index'],
js['transaction'],
js['previous_hash'],
js['current_hash'],
js['timestamp'],
js['nonce']
)
self.blocks.append(block)
# If not in the DB, insert it
#if not self.db.in_db(block):
# self.db.insert_block(block)
return None
def print_chain(self):
print self.blocks
return self.blocks
def add_block(self, block):
if not self.db.in_db(block):
self.db.insert_block(block)
self.blocks.append(block)
def make_block(self, transaction):
self.index += 1
# Error handling to fix serialization issues
transaction['amount'] = int(transaction['amount'])
block_hash = self.calc_block_hash(self.index, transaction['hash'], transaction['timestamp'], transaction, 0)
block = Block(self.index, transaction, transaction['hash'], block_hash, transaction['timestamp'], 0)
self.add_block(block)
def make_genesis_block(self):
print OK + 'Genesis Block Created' + END
transaction = {
"from": "-1",
"to": "MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAupSwIG17vricebp6EN88"+
"7wzHj0OsaxYl24z2VT6U9+ByEoGGWOPC/Nv9jwebzCLT49Bv5nZL0c7WCQMvvb5o"+
"3BNk2wPZR6XEQBZxgwXJdt5h2Ye+Nyc8wYvZodp1ouUv2jCNvcnH4VCz6y56yPzc"+
"861ZeYGGO9xbTu7RLkBqGODIqNqLzRhIdpYDukz2TVgHrEXalu+SFkrHo+oc5OBg"+
"YYLQeOSlzRKxgfvFG9ViNlqHP0tQDQsGnakBFuBWW5DuwrEKjqkmM+dxo9ALNaag"+
"ELpB60zXK2ZxwdvOmko8KZNsHVQMzZql2hcJiyfc99OvOBgp/xTscK94NNqQ6m2M"+
"pFr8V07XFnRB8r1EQhY9oFuraUi9xSZbKc3DVG3FEfSyo2Q/+jT+9dkSt7GegIya"+
"wM3saOY2VeN1f8XsfQ+a96SL+ltas99NlDJGMuOJOjrKherpfEBcuEK5EvljceGy"+
"b7O4NyUcQ/k0q/ngQM+Lz5/3RUShqCbtkmjH5FAxiNHzluy83hJyxGxrYHTEMF88"+
"Z6YHyaOBUpMp3mvPMVqM/jeI2aslJDTEDmeaRhs6yI90RDyohzb1FUctUKVPiL8a"+
"FI9/gKmSCpgB8BEpI23K0az4kbItnWLe3xzABSFL0nSQWkXQqWmepKcDwp6TcJtG"+
"/U5BSE284qlQFOd50rW0xRUCAwEAAQ==",
"amount": 100,
"signature": "-1",
"timestamp": -1,
"hash": -1
}
current_hash = self.calc_block_hash(0, -1, -1, transaction, 0)
genesis_block = Block(0, transaction, -1, current_hash, 0, 0)
self.index += 1
return genesis_block
def calc_block_hash(self, index, prev_hash, timestamp, transaction, nonce=0):
data = {
"index": index,
"previous_hash": prev_hash,
"timestamp": timestamp,
"transaction": transaction,
"nonce": nonce
}
data_json = json.dumps(data, sort_keys=True)
hashed = hashlib.sha256(data_json)
return hashed.hexdigest()
def lookup_address(self, address):
# Begin searching for transactions from that address
balance = 0
for block in self.blocks:
if block.transaction_info['from'] == address:
balance -= block.transaction_info['amount']
if block.transaction_info['to'] == address:
balance += block.transaction_info['amount']
return balance
def validate_transaction(self, transaction):
# We need to ensure that a transaction is valid on the blockchain
# First lets get the amount the user wants to move
amount = int(transaction['amount'])
# Now let's check their balance with their public key
balance = self.lookup_address(transaction['from'])
# Now compare the two
if amount < balance:
return True
else:
return False
@staticmethod
def create_signable_transaction(from_address, to_address, amount, timestamp):
return ':'.join((from_address, to_address, amount, str(timestamp)))
def authenticate_transaction(self, transaction):
is_verified = self.verify_remote_transaction(transaction['from'], transaction['signature'], transaction)
return is_verified
def verify_remote_transaction(self, public_key, signature, transaction):
# transaction.pop('hash')
transaction = self.create_signable_transaction(
transaction['from'],
transaction['to'],
transaction['amount'],
transaction['timestamp']
)
key = "-----BEGIN PUBLIC KEY-----\n"
i = 0
while i < len(public_key):
key += public_key[i:i+64]+'\n'
i += 64
key += "-----END PUBLIC KEY-----\n"
public_key = serialization.load_pem_public_key(
str(key),
backend=default_backend()
)
try:
public_key.verify(
bytes(base64.decodestring(signature)),
bytes(transaction),
padding.PSS(
mgf=padding.MGF1(hashes.SHA256()),
salt_length=padding.PSS.MAX_LENGTH
),
hashes.SHA256()
)
return True
except InvalidSignature:
return False
#def transaction_thread(self):
# while true:
# while len(self.transaction_pool) > 0:
# transaction = self.transaction_pool[-1]
# if self.authenticate_transaction(transaction):
# if self.validate_transaction(transaction):
# print str(len(self.blocks))
# print OK + "Confirmed Transaction" + END
# self.make_block(self.transaction_pool.pop())
# print str(len(self.blocks))
def add_transaction_to_pool(self, transaction):
#self.transaction_pool.append(transaction)
if self.authenticate_transaction(transaction):
if self.validate_transaction(transaction):
self.make_block(transaction)
if __name__ == '__main__':
blockchain = Blockchain()
|
py | 1a47590ded909e1599935677d8570acd9c1bf552 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests fuzzers.mutator_plugin."""
from pyfakefs import fake_filesystem_unittest
import os
import shutil
import unittest
from bot.fuzzers import mutator_plugin
from tests.test_libs import helpers
from tests.test_libs import test_utils
class FindMutatorPluginTest(fake_filesystem_unittest.TestCase):
"""Tests find_mutator_plugin."""
def setUp(self):
helpers.patch_environ(self)
test_utils.set_up_pyfakefs(self)
self.plugins_root_dir = '/plugins'
os.environ['MUTATOR_PLUGINS_DIR'] = self.plugins_root_dir
def test_find_mutator_plugin_with_usable(self):
"""Tests that the right path is returned by find_mutator_plugin when there
is a usable mutator plugin available."""
usable_plugin_path = os.path.join(
self.plugins_root_dir, 'plugins',
mutator_plugin.MUTATOR_SHARED_OBJECT_FILENAME)
self.fs.create_file(usable_plugin_path)
self.assertEqual(usable_plugin_path, mutator_plugin.find_mutator_plugin())
def test_set_mutator_plugin_without_usable(self):
"""Tests that None is returned by find_mutator_plugin when there isn't a
usable mutator plugin available."""
self.assertIsNone(mutator_plugin.find_mutator_plugin())
# pylint: disable=protected-access
class GetDirectoryFunctionsTest(unittest.TestCase):
"""Tests functions for get plugin directories."""
def setUp(self):
helpers.patch_environ(self)
self.plugins_root_dir = '/plugins'
os.environ['MUTATOR_PLUGINS_DIR'] = self.plugins_root_dir
def test_get_mutator_plugins_subdir(self):
"""Tests that _get_mutator_plugins_subdir returns the path to the correct
subdirectory."""
subdir = 'x'
self.assertEqual(
os.path.join(self.plugins_root_dir, subdir),
mutator_plugin._get_mutator_plugins_subdir(subdir))
def test_get_mutator_plugins_archives_dir(self):
"""Tests that _get_mutator_plugins_archives_dir returns the path to the
mutator plugin archives directory."""
self.assertEqual(
os.path.join(self.plugins_root_dir,
mutator_plugin.ARCHIVES_SUBDIR_NAME),
mutator_plugin._get_mutator_plugins_archives_dir())
def test_get_mutator_plugins_unpacked_dir(self):
"""Tests that _get_mutator_plugins_unpacked_dir returns the path to the
unpacked mutator plugin directory."""
self.assertEqual(
os.path.join(self.plugins_root_dir, mutator_plugin.PLUGINS_SUBDIR_NAME),
mutator_plugin._get_mutator_plugins_unpacked_dir())
# pylint: disable=protected-access
class PluginGetterTest(fake_filesystem_unittest.TestCase):
"""Tests PluginGetter."""
def setUp(self):
"""Setup for plugin getter test."""
helpers.patch_environ(self)
test_utils.set_up_pyfakefs(self)
os.environ['JOB_NAME'] = 'libfuzzer_asan_test'
self.fuzzer_binary_name = 'test_fuzzer'
self.name = 'myplugin'
self.plugins_root_dir = '/plugins'
os.environ['MUTATOR_PLUGINS_DIR'] = self.plugins_root_dir
self.fs.create_dir(self.plugins_root_dir)
self.plugin_getter = mutator_plugin.PluginGetter(self.fuzzer_binary_name)
self.plugins_archives_dir = os.path.join(self.plugins_root_dir, 'archives')
self.plugin_archive_filename = '%s-%s-%s.zip' % (
self.name, os.environ['JOB_NAME'], self.fuzzer_binary_name)
self.plugin_archive_path = os.path.join(self.plugins_archives_dir,
self.plugin_archive_filename)
self.plugins_dir = os.path.join(self.plugins_root_dir, 'plugins')
helpers.patch(self, [
'google_cloud_utils.storage.copy_file_from',
'bot.fuzzers.mutator_plugin._get_mutator_plugins_from_bucket',
])
def mocked_copy_file_from(gcs_url, file_path):
expected_url = '%s/%s' % (mutator_plugin._get_mutator_plugins_bucket_url(
), self.plugin_archive_filename)
self.assertEqual(expected_url, gcs_url)
self.assertEqual(file_path, self.plugin_archive_path)
return file_path
self.mock.copy_file_from.side_effect = mocked_copy_file_from
def test_create_directories(self):
"""Tests that create_directories creates the right directories."""
shutil.rmtree(self.plugins_root_dir)
self.fs.create_dir(self.plugins_root_dir)
self.plugin_getter.create_directories()
directories = [
os.path.join(self.plugins_root_dir, 'plugins'),
os.path.join(self.plugins_root_dir, 'archives')
]
self.assertTrue(all(os.path.isdir(directory) for directory in directories))
def test_recognizes_usable(self):
"""Tests that _is_plugin_usable recognizes a usable plugin archive."""
self.assertTrue(
self.plugin_getter._is_plugin_usable(self.plugin_archive_filename))
def test_recognizes_unusable(self):
"""Tests that _is_plugin_usable recognizes an unusable plugin archive."""
unusable_plugin_archive_filename = self.plugin_archive_filename.replace(
self.fuzzer_binary_name, 'other_binary')
self.assertFalse(
self.plugin_getter._is_plugin_usable(unusable_plugin_archive_filename))
def test_download_mutator_plugin_archive(self):
"""Tests that _download_mutator_plugin_archive downloads an archive to the
correct location."""
self.assertEqual(
self.plugin_archive_path,
mutator_plugin._download_mutator_plugin_archive(
self.plugin_archive_filename))
class ExtractNameFromArchiveTest(unittest.TestCase):
"""Tests for _extract_name_from_archive."""
def test_extract_name_from_archive(self):
"""Tests that _extract_name_from_archive extracts the name from the
archive."""
name = 'myplugin'
fuzzer_binary_name = 'test_fuzzer'
job_name = 'libfuzzer_asan_test'
plugin_archive_filename = '%s-%s-%s.zip' % (name, job_name,
fuzzer_binary_name)
extracted_name, job_and_fuzz_target = (
mutator_plugin._extract_name_from_archive(plugin_archive_filename))
self.assertEqual(name, extracted_name)
expected_job_and_fuzz_target = '%s-%s' % (job_name, fuzzer_binary_name)
self.assertEqual(expected_job_and_fuzz_target, job_and_fuzz_target)
|
py | 1a475958bd94be1c0b90fa413546c6de9970608e | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import os
from six import iteritems
import logging
from werkzeug.wrappers import Request
from werkzeug.local import LocalManager
from werkzeug.exceptions import HTTPException, NotFound
from werkzeug.middleware.profiler import ProfilerMiddleware
from werkzeug.middleware.shared_data import SharedDataMiddleware
import frappe
import frappe.handler
import frappe.auth
import frappe.api
import frappe.utils.response
import frappe.website.render
from frappe.utils import get_site_name, sanitize_html
from frappe.middlewares import StaticDataMiddleware
from frappe.utils.error import make_error_snapshot
from frappe.core.doctype.comment.comment import update_comments_in_parent_after_request
from frappe import _
import frappe.recorder
import frappe.monitor
import frappe.rate_limiter
local_manager = LocalManager([frappe.local])
_site = None
_sites_path = os.environ.get("SITES_PATH", ".")
class RequestContext(object):
def __init__(self, environ):
self.request = Request(environ)
def __enter__(self):
init_request(self.request)
def __exit__(self, type, value, traceback):
frappe.destroy()
@Request.application
def application(request):
response = None
try:
rollback = True
init_request(request)
frappe.recorder.record()
frappe.monitor.start()
frappe.rate_limiter.apply()
if frappe.local.form_dict.cmd:
response = frappe.handler.handle()
elif frappe.request.path.startswith("/api/"):
response = frappe.api.handle()
elif frappe.request.path.startswith('/backups'):
response = frappe.utils.response.download_backup(request.path)
elif frappe.request.path.startswith('/private/files/'):
response = frappe.utils.response.download_private_file(request.path)
elif frappe.local.request.method in ('GET', 'HEAD', 'POST'):
response = frappe.website.render.render()
else:
raise NotFound
except HTTPException as e:
return e
except frappe.SessionStopped as e:
response = frappe.utils.response.handle_session_stopped()
except Exception as e:
response = handle_exception(e)
else:
rollback = after_request(rollback)
finally:
if frappe.local.request.method in ("POST", "PUT") and frappe.db and rollback:
frappe.db.rollback()
# set cookies
if response and hasattr(frappe.local, 'cookie_manager'):
frappe.local.cookie_manager.flush_cookies(response=response)
frappe.rate_limiter.update()
frappe.monitor.stop(response)
frappe.recorder.dump()
if response and hasattr(frappe.local, 'rate_limiter'):
response.headers.extend(frappe.local.rate_limiter.headers())
frappe.destroy()
return response
def init_request(request):
frappe.local.request = request
frappe.local.is_ajax = frappe.get_request_header("X-Requested-With")=="XMLHttpRequest"
site = _site or request.headers.get('X-Frappe-Site-Name') or get_site_name(request.host)
frappe.init(site=site, sites_path=_sites_path)
if not (frappe.local.conf and frappe.local.conf.db_name):
# site does not exist
raise NotFound
if frappe.local.conf.get('maintenance_mode'):
frappe.connect()
raise frappe.SessionStopped('Session Stopped')
make_form_dict(request)
frappe.local.http_request = frappe.auth.HTTPRequest()
def make_form_dict(request):
import json
request_data = request.get_data(as_text=True)
if 'application/json' in (request.content_type or '') and request_data:
args = json.loads(request_data)
else:
args = request.form or request.args
try:
frappe.local.form_dict = frappe._dict({ k:v[0] if isinstance(v, (list, tuple)) else v \
for k, v in iteritems(args) })
except IndexError:
frappe.local.form_dict = frappe._dict(args)
if "_" in frappe.local.form_dict:
# _ is passed by $.ajax so that the request is not cached by the browser. So, remove _ from form_dict
frappe.local.form_dict.pop("_")
def handle_exception(e):
response = None
http_status_code = getattr(e, "http_status_code", 500)
return_as_message = False
if frappe.get_request_header('Accept') and (frappe.local.is_ajax or 'application/json' in frappe.get_request_header('Accept')):
# handle ajax responses first
# if the request is ajax, send back the trace or error message
response = frappe.utils.response.report_error(http_status_code)
elif (http_status_code==500
and (frappe.db and isinstance(e, frappe.db.InternalError))
and (frappe.db and (frappe.db.is_deadlocked(e) or frappe.db.is_timedout(e)))):
http_status_code = 508
elif http_status_code==401:
frappe.respond_as_web_page(_("Session Expired"),
_("Your session has expired, please login again to continue."),
http_status_code=http_status_code, indicator_color='red')
return_as_message = True
elif http_status_code==403:
frappe.respond_as_web_page(_("Not Permitted"),
_("You do not have enough permissions to complete the action"),
http_status_code=http_status_code, indicator_color='red')
return_as_message = True
elif http_status_code==404:
frappe.respond_as_web_page(_("Not Found"),
_("The resource you are looking for is not available"),
http_status_code=http_status_code, indicator_color='red')
return_as_message = True
elif http_status_code == 429:
response = frappe.rate_limiter.respond()
else:
traceback = "<pre>" + sanitize_html(frappe.get_traceback()) + "</pre>"
if frappe.local.flags.disable_traceback:
traceback = ""
frappe.respond_as_web_page("Server Error",
traceback, http_status_code=http_status_code,
indicator_color='red', width=640)
return_as_message = True
if e.__class__ == frappe.AuthenticationError:
if hasattr(frappe.local, "login_manager"):
frappe.local.login_manager.clear_cookies()
if http_status_code >= 500:
frappe.logger().error('Request Error', exc_info=True)
make_error_snapshot(e)
if return_as_message:
response = frappe.website.render.render("message",
http_status_code=http_status_code)
return response
def after_request(rollback):
if (frappe.local.request.method in ("POST", "PUT") or frappe.local.flags.commit) and frappe.db:
if frappe.db.transaction_writes:
frappe.db.commit()
rollback = False
# update session
if getattr(frappe.local, "session_obj", None):
updated_in_db = frappe.local.session_obj.update()
if updated_in_db:
frappe.db.commit()
rollback = False
update_comments_in_parent_after_request()
return rollback
application = local_manager.make_middleware(application)
def serve(port=8000, profile=False, no_reload=False, no_threading=False, site=None, sites_path='.'):
global application, _site, _sites_path
_site = site
_sites_path = sites_path
from werkzeug.serving import run_simple
patch_werkzeug_reloader()
if profile:
application = ProfilerMiddleware(application, sort_by=('cumtime', 'calls'))
if not os.environ.get('NO_STATICS'):
application = SharedDataMiddleware(application, {
str('/assets'): str(os.path.join(sites_path, 'assets'))
})
application = StaticDataMiddleware(application, {
str('/files'): str(os.path.abspath(sites_path))
})
application.debug = True
application.config = {
'SERVER_NAME': 'localhost:8000'
}
in_test_env = os.environ.get('CI')
if in_test_env:
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
run_simple('0.0.0.0', int(port), application,
use_reloader=False if in_test_env else not no_reload,
use_debugger=not in_test_env,
use_evalex=not in_test_env,
threaded=not no_threading)
def patch_werkzeug_reloader():
"""
This function monkey patches Werkzeug reloader to ignore reloading files in
the __pycache__ directory.
To be deprecated when upgrading to Werkzeug 2.
"""
from werkzeug._reloader import WatchdogReloaderLoop
trigger_reload = WatchdogReloaderLoop.trigger_reload
def custom_trigger_reload(self, filename):
if os.path.basename(os.path.dirname(filename)) == "__pycache__":
return
return trigger_reload(self, filename)
WatchdogReloaderLoop.trigger_reload = custom_trigger_reload |
py | 1a47596fea28d6530f0bca714aefe511c8fb33ee | # coding: utf-8
"""
Wavefront REST API
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import wavefront_api_client
from wavefront_api_client.models.integration_alert import IntegrationAlert # noqa: E501
from wavefront_api_client.rest import ApiException
class TestIntegrationAlert(unittest.TestCase):
"""IntegrationAlert unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testIntegrationAlert(self):
"""Test IntegrationAlert"""
# FIXME: construct object with mandatory attributes with example values
# model = wavefront_api_client.models.integration_alert.IntegrationAlert() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py | 1a475a1e052fcecc0dfda39228f202f9d3916fdc | import argparse
import json
import os
import numpy as np
import tensorflow.compat.v1 as tf
import time
class AccumulatingOptimizer(object):
def __init__(self, opt, var_list):
self.opt = opt
self.var_list = var_list
self.accum_vars = {tv : tf.Variable(tf.zeros_like(tv.initialized_value()), trainable=False)
for tv in var_list}
self.total_loss = tf.Variable(tf.zeros(shape=[], dtype=tf.float32))
self.count_loss = tf.Variable(tf.zeros(shape=[], dtype=tf.float32))
def reset(self):
updates = [tv.assign(tf.zeros_like(tv)) for tv in self.accum_vars.values()]
updates.append(self.total_loss.assign(tf.zeros(shape=[], dtype=tf.float32)))
updates.append(self.count_loss.assign(tf.zeros(shape=[], dtype=tf.float32)))
with tf.control_dependencies(updates):
return tf.no_op()
def compute_gradients(self, loss):
grads = self.opt.compute_gradients(loss, self.var_list)
updates = [self.accum_vars[v].assign_add(g) for (g,v) in grads]
updates.append(self.total_loss.assign_add(loss))
updates.append(self.count_loss.assign_add(1.0))
with tf.control_dependencies(updates):
return tf.no_op()
def apply_gradients(self):
grads = [(g,v) for (v,g) in self.accum_vars.items()]
with tf.control_dependencies([self.opt.apply_gradients(grads)]):
return self.total_loss / self.count_loss
|
py | 1a475a6a9b421f3c53f286103f23ef97d923f83a | import os
import time
from gym_idsgame.config.runner_mode import RunnerMode
from gym_idsgame.simulation.dao.simulation_config import SimulationConfig
from gym_idsgame.agents.dao.agent_type import AgentType
from gym_idsgame.config.client_config import ClientConfig
from gym_idsgame.runnner import Runner
from gym_idsgame.agents.training_agents.q_learning.q_agent_config import QAgentConfig
from gym_idsgame.experiments.util import plotting_util, util
import argparse
def default_output_dir() -> str:
"""
:return: the default output dir
"""
script_dir = os.path.dirname(__file__)
return script_dir
def define_args():
parser = argparse.ArgumentParser()
parser.add_argument('--attacker_path', type=str,default='')
parser.add_argument('--defender_path', type=str,default='')
parser.add_argument('--num_episodes', type=int, default = 100)
parser.add_argument('--attacker_bot', action='store_true')
parser.add_argument('--defender_bot', action='store_true')
args = parser.parse_args()
return args
def default_config(args) -> ClientConfig:
"""
:return: Default configuration for the experiment
"""
simulation_config = SimulationConfig(render=True, sleep=0.8, video=False, log_frequency=1,
video_fps=5, video_dir=default_output_dir() + "/videos", num_episodes=args.num_episodes,
gifs=False, gif_dir=default_output_dir() + "/gifs", video_frequency = 1)
q_agent_config = QAgentConfig(attacker_load_path=args.attacker_path,defender_load_path=args.defender_path)
env_name = "idsgame-cyber-v0"
attacker_type = AgentType.TABULAR_Q_AGENT.value
defender_type = AgentType.TABULAR_Q_AGENT.value
if(args.attacker_bot):
attacker_type = AgentType.ATTACK_MAXIMAL_VALUE.value
if(args.defender_bot):
defender_type = AgentType.DEFEND_MINIMAL_VALUE.value
client_config = ClientConfig(env_name=env_name, attacker_type=attacker_type,
defender_type=defender_type, mode=RunnerMode.SIMULATE.value,
simulation_config=simulation_config, output_dir=default_output_dir(),
title="Simulation",
q_agent_config=q_agent_config)
return client_config
# Program entrypoint
if __name__ == '__main__':
args = define_args()
config = default_config(args)
result = Runner.run(config)
print(f'Number of attack victory in {args.num_episodes} episodes: {result.attacker_wins[-1]}')
|
py | 1a475c7d58dbb5097ea623af8f64348988b46d83 |
def read_verilog(args):
assert(len(args) == 1)
filename = args[0]
with open(filename, 'r') as verilogfile:
content = [line for line in verilogfile if 'assign' in line][:-1]
boolDict = dict()
for line in content:
left, right = line.split('=')
name = left.split()[1]
for k, v in boolDict.items():
right = right.replace(k, '({})'.format(v))
boolDict[name] = right.replace(';', '').replace('\n', '')
#print(boolDict['valid'])
return boolDict['valid']
|
py | 1a475db64427a91a747965472836187d34481151 | from quanser_robots import GentlyTerminating
import threading
import gym
import torch
import numpy as np
from abstract_rl.src.data_structures.temporal_difference_data.trajectory_builder import TrajectoryBuilder
from abstract_rl.src.data_structures.temporal_difference_data.trajectory_collection import TrajectoryCollection
from abstract_rl.src.misc.cli_printer import CliPrinter
from abstract_rl.src.misc.running_centered_filter import RunningCenteredFilter
class MCMCEnvWrapper:
"""
Environment wrapper for gym environments. Adds support for executing a whole trajectory based on a policy,
instead of only giving a step based interface to the outside.
"""
def namespace(self):
return self._name_sp
def __init__(self, mc):
"""
Initialize a new environment.
:param mc: The model configuration with everything important.
"""
conf = mc['conf']
self.mc = mc
self.num_threads = conf['num_threads']
self.render = conf['render']
self._name_sp = conf.get_namespace()
# save config
self.conf = conf
self.num_epochs = conf.get_root('num_epochs')
self.env_name = conf['name']
self.env = GentlyTerminating(gym.make(self.env_name))
self._normalize = conf['normalize']
self._discount = conf['discount']
self.epoch = 0
# set best measured reward to lowest possible reward
self.best_reward = np.finfo(np.float64).min
self.last_reward = None
self.max_traj_len = 0
self.min_reward = None
self.cli = CliPrinter().instance
self.created_trajectories = 0
self.obs_sp = self.env.observation_space
self.act_sp = self.env.action_space
self.thread_lock = threading.Lock()
self.state_filter = RunningCenteredFilter('states', self.observation_dim)
self.reward_filter = RunningCenteredFilter('rewards', 1)
def last_ucb_reward(self):
assert self.last_reward is not None
return self.last_reward
def discount(self):
"""
Discount factor of the environment.
:return: The discount factor used.
"""
return self._discount
def reset(self):
"""
Resets the environment.
:return: The state after the reset.
"""
cs = self.env.reset()
return cs
def execute_policy(self, policy, max_steps, batch_size, exploration=True, render=False, rew_field_name=None):
"""
Executes a policy for a maximum number of steps multiple times. This work can be split onto multiple threads as
well.
:param policy: The policy to evaluate.
:param max_steps: The maximum number of steps.
:return: A list of trajectories.
"""
with self.conf.ns('policy'):
t = 0
k = 0
trajectories = []
while t < batch_size:
tr = self.execute_policy_once(np.minimum(batch_size - t, max_steps), policy, render and k == 0, opt=not exploration)
trajectories.append(tr)
t += len(tr)
k += 1
if rew_field_name is not None:
disc_rewards = [traj.discounted_reward(self.discount()) for traj in trajectories]
self.mc['log'].log({rew_field_name: [np.mean(disc_rewards), np.std(disc_rewards)]})
self.epoch += 1
tj = TrajectoryCollection(self, sum([len(tra) for tra in trajectories]))
tj.extend(trajectories)
tj.print()
return tj
def execute_policy_once(self, max_steps, policy, render=False, opt=False):
"""
Execute a policy once for the maximum number of steps or the environment sends done.
:param max_steps: The maximum number of steps, if done not set.
:param policy: The policy to use.
:param render: Render the environment.
:param seed: Set a seed if wanted.
:return: The finalized and built trajectory.
"""
# reset environment and create empty trajectory
env = GentlyTerminating(gym.make(self.env_name)) if not self.render else self.env
cs = env.reset()
if self._normalize: cs /= env.observation_space.high
self.state_filter.register(cs)
# create new trajectory builder
with self.thread_lock:
new_id = self.created_trajectories
self.created_trajectories += 1
traj_builder = TrajectoryBuilder(new_id, self, cs)
t = 0
# repeat for the number of steps
while max_steps is None or t < max_steps:
# sample distribution based on state
tcs = torch.Tensor(cs)
tcs = tcs.view([1, -1])
# sample action and calc log likelihood
suff_stats = policy.forward(tcs)
a = policy.mode(suff_stats=suff_stats) \
if opt else policy.sample_actions(suff_stats=suff_stats)
ll = policy.log_prob(a, suff_stats=suff_stats)
# prepare for usage
ll = ll.detach().numpy()[0]
a = a.detach().numpy()[0]
cs, r, done, info = env.step(a)
self.state_filter.register(cs)
self.reward_filter.register(cs)
# bug fix for quanser
cs /= env.observation_space.high
t += 1
# only punish if episode aborted
traj_builder.observe(a, r, cs, ll, int(done))
# render if needed
if render: env.render()
# break if necessary
if done: break
# compile using the discount factor
traj = traj_builder.finalize()
self.max_traj_len = max(self.max_traj_len, t)
env.close()
return traj
@property
def observation_space(self):
"""
Bounds for the observation space.
:return: Bound of the observation space.
"""
return self.obs_sp
@property
def action_space(self):
"""
Bounds for the action space.
:return: Bound of the action space.
"""
return self.act_sp
@property
def observation_dim(self):
"""
Dimension of observation space.
:return: Dimension of the observation space.
"""
return int(np.prod(self.observation_space.high.shape))
@property
def action_dim(self):
"""
Dimension of action space.
:return: Dimension of the action space.
"""
return int(np.prod(self.action_space.high.shape))
|
py | 1a475dddaa2a330d43e105b0e9660d31e95666e3 | # Download the Python helper library from twilio.com/docs/python/install
from twilio.rest import TwilioTaskRouterClient
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
auth_token = "your_auth_token"
workspace_sid = "WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
workflow_sid = "WWXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
client = TwilioTaskRouterClient(account_sid, auth_token)
workflow = client.workflows(workspace_sid).update(
workflow_sid, task_reservation_timout='20'
)
print(workflow.task_reservation_timeout)
# alternatively
workflow = client.workflows(workspace_sid).get(workflow_sid)
workflow = workflow.update(task_reservation_timeout='20')
print(workflow.task_reservation_timeout)
|
py | 1a475f193edcf5120871b4c120fb7d2c5eff0f47 | import torch
from torch import nn
import numpy as np
import os
from .utils.detect_face import detect_face, extract_face
class PNet(nn.Module):
"""MTCNN PNet.
Keyword Arguments:
pretrained {bool} -- Whether or not to load saved pretrained weights (default: {True})
"""
def __init__(self, pretrained=True):
super().__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=3)
self.prelu1 = nn.PReLU(10)
self.pool1 = nn.MaxPool2d(2, 2, ceil_mode=True)
self.conv2 = nn.Conv2d(10, 16, kernel_size=3)
self.prelu2 = nn.PReLU(16)
self.conv3 = nn.Conv2d(16, 32, kernel_size=3)
self.prelu3 = nn.PReLU(32)
self.conv4_1 = nn.Conv2d(32, 2, kernel_size=1)
self.softmax4_1 = nn.Softmax(dim=1)
self.conv4_2 = nn.Conv2d(32, 4, kernel_size=1)
self.training = False
if pretrained:
state_dict_path = os.path.join(os.path.dirname(__file__), '../data/pnet.pt')
state_dict = torch.load(state_dict_path)
self.load_state_dict(state_dict)
def forward(self, x):
x = self.conv1(x)
x = self.prelu1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.prelu2(x)
x = self.conv3(x)
x = self.prelu3(x)
a = self.conv4_1(x)
a = self.softmax4_1(a)
b = self.conv4_2(x)
return b, a
class RNet(nn.Module):
"""MTCNN RNet.
Keyword Arguments:
pretrained {bool} -- Whether or not to load saved pretrained weights (default: {True})
"""
def __init__(self, pretrained=True):
super().__init__()
self.conv1 = nn.Conv2d(3, 28, kernel_size=3)
self.prelu1 = nn.PReLU(28)
self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv2 = nn.Conv2d(28, 48, kernel_size=3)
self.prelu2 = nn.PReLU(48)
self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv3 = nn.Conv2d(48, 64, kernel_size=2)
self.prelu3 = nn.PReLU(64)
self.dense4 = nn.Linear(576, 128)
self.prelu4 = nn.PReLU(128)
self.dense5_1 = nn.Linear(128, 2)
self.softmax5_1 = nn.Softmax(dim=1)
self.dense5_2 = nn.Linear(128, 4)
self.training = False
if pretrained:
state_dict_path = os.path.join(os.path.dirname(__file__), '../data/rnet.pt')
state_dict = torch.load(state_dict_path)
self.load_state_dict(state_dict)
def forward(self, x):
x = self.conv1(x)
x = self.prelu1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.prelu2(x)
x = self.pool2(x)
x = self.conv3(x)
x = self.prelu3(x)
x = x.permute(0, 3, 2, 1).contiguous()
x = self.dense4(x.view(x.shape[0], -1))
x = self.prelu4(x)
a = self.dense5_1(x)
a = self.softmax5_1(a)
b = self.dense5_2(x)
return b, a
class ONet(nn.Module):
"""MTCNN ONet.
Keyword Arguments:
pretrained {bool} -- Whether or not to load saved pretrained weights (default: {True})
"""
def __init__(self, pretrained=True):
super().__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3)
self.prelu1 = nn.PReLU(32)
self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
self.prelu2 = nn.PReLU(64)
self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3)
self.prelu3 = nn.PReLU(64)
self.pool3 = nn.MaxPool2d(2, 2, ceil_mode=True)
self.conv4 = nn.Conv2d(64, 128, kernel_size=2)
self.prelu4 = nn.PReLU(128)
self.dense5 = nn.Linear(1152, 256)
self.prelu5 = nn.PReLU(256)
self.dense6_1 = nn.Linear(256, 2)
self.softmax6_1 = nn.Softmax(dim=1)
self.dense6_2 = nn.Linear(256, 4)
self.dense6_3 = nn.Linear(256, 10)
self.training = False
if pretrained:
state_dict_path = os.path.join(os.path.dirname(__file__), '../data/onet.pt')
state_dict = torch.load(state_dict_path)
self.load_state_dict(state_dict)
def forward(self, x):
x = self.conv1(x)
x = self.prelu1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.prelu2(x)
x = self.pool2(x)
x = self.conv3(x)
x = self.prelu3(x)
x = self.pool3(x)
x = self.conv4(x)
x = self.prelu4(x)
x = x.permute(0, 3, 2, 1).contiguous()
x = self.dense5(x.view(x.shape[0], -1))
x = self.prelu5(x)
a = self.dense6_1(x)
a = self.softmax6_1(a)
b = self.dense6_2(x)
c = self.dense6_3(x)
return b, c, a
class MTCNN(nn.Module):
"""MTCNN face detection module.
This class loads pretrained P-, R-, and O-nets and returns images cropped to include the face
only, given raw input images of one of the following types:
- PIL image or list of PIL images
- numpy.ndarray (uint8) representing either a single image (3D) or a batch of images (4D).
Cropped faces can optionally be saved to file
also.
Keyword Arguments:
image_size {int} -- Output image size in pixels. The image will be square. (default: {160})
margin {int} -- Margin to add to bounding box, in terms of pixels in the final image.
Note that the application of the margin differs slightly from the davidsandberg/facenet
repo, which applies the margin to the original image before resizing, making the margin
dependent on the original image size (this is a bug in davidsandberg/facenet).
(default: {0})
min_face_size {int} -- Minimum face size to search for. (default: {20})
thresholds {list} -- MTCNN face detection thresholds (default: {[0.6, 0.7, 0.7]})
factor {float} -- Factor used to create a scaling pyramid of face sizes. (default: {0.709})
post_process {bool} -- Whether or not to post process images tensors before returning.
(default: {True})
select_largest {bool} -- If True, if multiple faces are detected, the largest is returned.
If False, the face with the highest detection probability is returned.
(default: {True})
selection_method {string} -- Which heuristic to use for selection. Default None. If
specified, will override select_largest:
"probability": highest probability selected
"largest": largest box selected
"largest_over_threshold": largest box over a certain probability selected
"center_weighted_size": box size minus weighted squared offset from image center
(default: {None})
keep_all {bool} -- If True, all detected faces are returned, in the order dictated by the
select_largest parameter. If a save_path is specified, the first face is saved to that
path and the remaining faces are saved to <save_path>1, <save_path>2 etc.
(default: {False})
device {torch.device} -- The device on which to run neural net passes. Image tensors and
models are copied to this device before running forward passes. (default: {None})
"""
def __init__(
self, image_size=160, margin=0, min_face_size=20,
thresholds=[0.6, 0.7, 0.7], factor=0.709, post_process=True,
select_largest=True, selection_method=None, keep_all=False, device=None
):
super().__init__()
self.image_size = image_size
self.margin = margin
self.min_face_size = min_face_size
self.thresholds = thresholds
self.factor = factor
self.post_process = post_process
self.select_largest = select_largest
self.keep_all = keep_all
self.selection_method = selection_method
self.pnet = PNet()
self.rnet = RNet()
self.onet = ONet()
self.device = torch.device('cpu')
if device is not None:
self.device = device
self.to(device)
if not self.selection_method:
self.selection_method = 'largest' if self.select_largest else 'probability'
def forward(self, img, save_path=None, return_prob=False):
"""Run MTCNN face detection on a PIL image or numpy array. This method performs both
detection and extraction of faces, returning tensors representing detected faces rather
than the bounding boxes. To access bounding boxes, see the MTCNN.detect() method below.
Arguments:
img {PIL.Image, np.ndarray, or list} -- A PIL image, np.ndarray, torch.Tensor, or list.
Keyword Arguments:
save_path {str} -- An optional save path for the cropped image. Note that when
self.post_process=True, although the returned tensor is post processed, the saved
face image is not, so it is a true representation of the face in the input image.
If `img` is a list of images, `save_path` should be a list of equal length.
(default: {None})
return_prob {bool} -- Whether or not to return the detection probability.
(default: {False})
Returns:
Union[torch.Tensor, tuple(torch.tensor, float)] -- If detected, cropped image of a face
with dimensions 3 x image_size x image_size. Optionally, the probability that a
face was detected. If self.keep_all is True, n detected faces are returned in an
n x 3 x image_size x image_size tensor with an optional list of detection
probabilities. If `img` is a list of images, the item(s) returned have an extra
dimension (batch) as the first dimension.
Example:
>>> from facenet_pytorch import MTCNN
>>> mtcnn = MTCNN()
>>> face_tensor, prob = mtcnn(img, save_path='face.png', return_prob=True)
"""
# Detect faces
batch_boxes, batch_probs, batch_points = self.detect(img, landmarks=True)
# Select faces
if not self.keep_all:
batch_boxes, batch_probs, batch_points = self.select_boxes(
batch_boxes, batch_probs, batch_points, img, method=self.selection_method
)
# Extract faces
faces = self.extract(img, batch_boxes, save_path)
if return_prob:
return faces, batch_boxes, batch_probs
else:
return faces, batch_boxes
def detect(self, img, landmarks=False):
"""Detect all faces in PIL image and return bounding boxes and optional facial landmarks.
This method is used by the forward method and is also useful for face detection tasks
that require lower-level handling of bounding boxes and facial landmarks (e.g., face
tracking). The functionality of the forward function can be emulated by using this method
followed by the extract_face() function.
Arguments:
img {PIL.Image, np.ndarray, or list} -- A PIL image, np.ndarray, torch.Tensor, or list.
Keyword Arguments:
landmarks {bool} -- Whether to return facial landmarks in addition to bounding boxes.
(default: {False})
Returns:
tuple(numpy.ndarray, list) -- For N detected faces, a tuple containing an
Nx4 array of bounding boxes and a length N list of detection probabilities.
Returned boxes will be sorted in descending order by detection probability if
self.select_largest=False, otherwise the largest face will be returned first.
If `img` is a list of images, the items returned have an extra dimension
(batch) as the first dimension. Optionally, a third item, the facial landmarks,
are returned if `landmarks=True`.
Example:
>>> from PIL import Image, ImageDraw
>>> from facenet_pytorch import MTCNN, extract_face
>>> mtcnn = MTCNN(keep_all=True)
>>> boxes, probs, points = mtcnn.detect(img, landmarks=True)
>>> # Draw boxes and save faces
>>> img_draw = img.copy()
>>> draw = ImageDraw.Draw(img_draw)
>>> for i, (box, point) in enumerate(zip(boxes, points)):
... draw.rectangle(box.tolist(), width=5)
... for p in point:
... draw.rectangle((p - 10).tolist() + (p + 10).tolist(), width=10)
... extract_face(img, box, save_path='detected_face_{}.png'.format(i))
>>> img_draw.save('annotated_faces.png')
"""
with torch.no_grad():
batch_boxes, batch_points = detect_face(
img, self.min_face_size,
self.pnet, self.rnet, self.onet,
self.thresholds, self.factor,
self.device
)
boxes, probs, points = [], [], []
for box, point in zip(batch_boxes, batch_points):
box = np.array(box)
point = np.array(point)
if len(box) == 0:
boxes.append(None)
probs.append([None])
points.append(None)
elif self.select_largest:
box_order = np.argsort((box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1]))[::-1]
box = box[box_order]
point = point[box_order]
boxes.append(box[:, :4])
probs.append(box[:, 4])
points.append(point)
else:
boxes.append(box[:, :4])
probs.append(box[:, 4])
points.append(point)
boxes = np.array(boxes)
probs = np.array(probs)
points = np.array(points)
if (
not isinstance(img, (list, tuple)) and
not (isinstance(img, np.ndarray) and len(img.shape) == 4) and
not (isinstance(img, torch.Tensor) and len(img.shape) == 4)
):
boxes = boxes[0]
probs = probs[0]
points = points[0]
if landmarks:
return boxes, probs, points
return boxes, probs
def select_boxes(
self, all_boxes, all_probs, all_points, imgs, method='probability', threshold=0.9,
center_weight=2.0
):
"""Selects a single box from multiple for a given image using one of multiple heuristics.
Arguments:
all_boxes {np.ndarray} -- Ix0 ndarray where each element is a Nx4 ndarry of
bounding boxes for N detected faces in I images (output from self.detect).
all_probs {np.ndarray} -- Ix0 ndarray where each element is a Nx0 ndarry of
probabilities for N detected faces in I images (output from self.detect).
all_points {np.ndarray} -- Ix0 ndarray where each element is a Nx5x2 array of
points for N detected faces. (output from self.detect).
imgs {PIL.Image, np.ndarray, or list} -- A PIL image, np.ndarray, torch.Tensor, or list.
Keyword Arguments:
method {str} -- Which heuristic to use for selection:
"probability": highest probability selected
"largest": largest box selected
"largest_over_theshold": largest box over a certain probability selected
"center_weighted_size": box size minus weighted squared offset from image center
(default: {'probability'})
threshold {float} -- theshold for "largest_over_threshold" method. (default: {0.9})
center_weight {float} -- weight for squared offset in center weighted size method.
(default: {2.0})
Returns:
tuple(numpy.ndarray, numpy.ndarray, numpy.ndarray) -- nx4 ndarray of bounding boxes
for n images. Ix0 array of probabilities for each box, array of landmark points.
"""
#copying batch detection from extract, but would be easier to ensure detect creates consistent output.
batch_mode = True
if (
not isinstance(imgs, (list, tuple)) and
not (isinstance(imgs, np.ndarray) and len(imgs.shape) == 4) and
not (isinstance(imgs, torch.Tensor) and len(imgs.shape) == 4)
):
imgs = [imgs]
all_boxes = [all_boxes]
all_probs = [all_probs]
all_points = [all_points]
batch_mode = False
selected_boxes, selected_probs, selected_points = [], [], []
for boxes, points, probs, img in zip(all_boxes, all_points, all_probs, imgs):
if boxes is None:
selected_boxes.append(None)
selected_probs.append([None])
selected_points.append(None)
continue
# If at least 1 box found
boxes = np.array(boxes)
probs = np.array(probs)
points = np.array(points)
if method == 'largest':
box_order = np.argsort((boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]))[::-1]
elif method == 'probability':
box_order = np.argsort(probs)[::-1]
elif method == 'center_weighted_size':
box_sizes = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
img_center = (img.width / 2, img.height/2)
box_centers = np.array(list(zip((boxes[:, 0] + boxes[:, 2]) / 2, (boxes[:, 1] + boxes[:, 3]) / 2)))
offsets = box_centers - img_center
offset_dist_squared = np.sum(np.power(offsets, 2.0), 1)
box_order = np.argsort(box_sizes - offset_dist_squared * center_weight)[::-1]
elif method == 'largest_over_threshold':
box_mask = probs > threshold
boxes = boxes[box_mask]
box_order = np.argsort((boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]))[::-1]
if sum(box_mask) == 0:
selected_boxes.append(None)
selected_probs.append([None])
selected_points.append(None)
continue
box = boxes[box_order][[0]]
prob = probs[box_order][[0]]
point = points[box_order][[0]]
selected_boxes.append(box)
selected_probs.append(prob)
selected_points.append(point)
if batch_mode:
selected_boxes = np.array(selected_boxes)
selected_probs = np.array(selected_probs)
selected_points = np.array(selected_points)
else:
selected_boxes = selected_boxes[0]
selected_probs = selected_probs[0][0]
selected_points = selected_points[0]
return selected_boxes, selected_probs, selected_points
def extract(self, img, batch_boxes, save_path):
# Determine if a batch or single image was passed
batch_mode = True
if (
not isinstance(img, (list, tuple)) and
not (isinstance(img, np.ndarray) and len(img.shape) == 4) and
not (isinstance(img, torch.Tensor) and len(img.shape) == 4)
):
img = [img]
batch_boxes = [batch_boxes]
batch_mode = False
# Parse save path(s)
if save_path is not None:
if isinstance(save_path, str):
save_path = [save_path]
else:
save_path = [None for _ in range(len(img))]
# Process all bounding boxes
faces = []
for im, box_im, path_im in zip(img, batch_boxes, save_path):
if box_im is None:
faces.append(None)
continue
if not self.keep_all:
box_im = box_im[[0]]
faces_im = []
for i, box in enumerate(box_im):
face_path = path_im
if path_im is not None and i > 0:
save_name, ext = os.path.splitext(path_im)
face_path = save_name + '_' + str(i + 1) + ext
face = extract_face(im, box, self.image_size, self.margin, face_path)
if self.post_process:
face = fixed_image_standardization(face)
faces_im.append(face)
if self.keep_all:
faces_im = torch.stack(faces_im)
else:
faces_im = faces_im[0]
faces.append(faces_im)
if not batch_mode:
faces = faces[0]
return faces
def fixed_image_standardization(image_tensor):
processed_tensor = (image_tensor - 127.5) / 128.0
return processed_tensor
def prewhiten(x):
mean = x.mean()
std = x.std()
std_adj = std.clamp(min=1.0/(float(x.numel())**0.5))
y = (x - mean) / std_adj
return y
|
py | 1a475f67f584bba7f7ff061f79c543d5b5fe1b3e | # coding=utf-8
# Copyright (c) 2017,2018, F5 Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import pytest
import requests
from f5_openstack_agent.lbaasv2.drivers.bigip.resource_helper import \
ResourceType
from ..testlib.resource_validator import ResourceValidator
from ..testlib.service_reader import LoadbalancerReader
requests.packages.urllib3.disable_warnings()
LOG = logging.getLogger(__name__)
@pytest.fixture(scope="module")
def services():
neutron_services_filename = (
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'../../testdata/service_requests/pool_multiple_members.json')
)
return (json.load(open(neutron_services_filename)))
def test_pool_lb_change_ratio(track_bigip_cfg, bigip, services, icd_config,
icontrol_driver):
env_prefix = icd_config['environment_prefix']
service_iter = iter(services)
validator = ResourceValidator(bigip, env_prefix)
# create lb
service = service_iter.next()
lb_reader = LoadbalancerReader(service)
folder = '{0}_{1}'.format(env_prefix, lb_reader.tenant_id())
icontrol_driver._common_service_handler(service)
assert bigip.folder_exists(folder)
# create listener
service = service_iter.next()
icontrol_driver._common_service_handler(service)
# create pool with round-robin, no members
service = service_iter.next()
icontrol_driver._common_service_handler(service)
pool_srvc = service['pools'][0]
pool_name = '{0}_{1}'.format(env_prefix, pool_srvc['id'])
validator.assert_pool_valid(pool_srvc, folder)
pool = bigip.get_resource(ResourceType.pool, pool_name, partition=folder)
assert pool.loadBalancingMode == 'round-robin'
# create member with weight = 1
service = service_iter.next()
member = service['members'][0]
icontrol_driver._common_service_handler(service)
validator.assert_member_valid(pool_srvc, member, folder)
pool.refresh()
assert pool.loadBalancingMode == 'round-robin'
# create member with weight > 1
service = service_iter.next()
member = service['members'][1]
icontrol_driver._common_service_handler(service)
validator.assert_member_valid(pool_srvc, member, folder)
pool.refresh()
assert pool.loadBalancingMode == 'ratio-member'
# create member with weight = 1
service = service_iter.next()
member = service['members'][2]
icontrol_driver._common_service_handler(service)
validator.assert_member_valid(pool_srvc, member, folder)
pool.refresh()
assert pool.loadBalancingMode == 'ratio-member'
# delete pool member with weight > 1
service = service_iter.next()
icontrol_driver._common_service_handler(service)
validator.assert_pool_valid(pool_srvc, folder)
pool.refresh()
assert pool.loadBalancingMode == 'round-robin'
# update pool to have lb method least connections
service = service_iter.next()
icontrol_driver._common_service_handler(service)
validator.assert_pool_valid(pool_srvc, folder)
pool.refresh()
assert pool.loadBalancingMode == 'least-connections-member'
# create member with weight > 1
service = service_iter.next()
member = service['members'][2]
icontrol_driver._common_service_handler(service)
validator.assert_pool_valid(pool_srvc, folder)
validator.assert_member_valid(pool_srvc, member, folder)
pool.refresh()
assert pool.loadBalancingMode == 'ratio-least-connections-member'
# delete member with weight > 1
service = service_iter.next()
icontrol_driver._common_service_handler(service)
validator.assert_pool_valid(pool_srvc, folder)
pool.refresh()
assert pool.loadBalancingMode == 'least-connections-member'
# delete second member
service = service_iter.next()
icontrol_driver._common_service_handler(service)
validator.assert_pool_valid(pool_srvc, folder)
pool.refresh()
assert pool.loadBalancingMode == 'least-connections-member'
# set lb method to SOURCE_IP for pool
service = service_iter.next()
icontrol_driver._common_service_handler(service)
validator.assert_pool_valid(pool_srvc, folder)
pool.refresh()
assert pool.loadBalancingMode == 'least-connections-node'
# update member to have weight > 1
service = service_iter.next()
member = service['members'][0]
icontrol_driver._common_service_handler(service)
validator.assert_pool_valid(pool_srvc, folder)
validator.assert_member_valid(pool_srvc, member, folder)
pool.refresh()
assert pool.loadBalancingMode == 'least-connections-node'
# delete remaining member
service = service_iter.next()
icontrol_driver._common_service_handler(service)
validator.assert_pool_valid(pool_srvc, folder)
pool.refresh()
assert pool.loadBalancingMode == 'least-connections-node'
# delete pool
service = service_iter.next()
icontrol_driver._common_service_handler(service)
assert not bigip.resource_exists(
ResourceType.pool, pool_name, partition=folder)
# delete listener
service = service_iter.next()
icontrol_driver._common_service_handler(service)
# delete lb
service = service_iter.next()
icontrol_driver._common_service_handler(service)
|
py | 1a47612fff963c47003528f78b019a54b61518e9 | import os
SCRAPYU_PATH = os.path.realpath(os.path.dirname(__file__))
TEMPLATES_DIR = os.path.join(SCRAPYU_PATH, 'templates') |
py | 1a476242e018619e6a776335f307097ecd250f87 | import gym
from gym import error, spaces, utils
from gym.utils import seeding
from math import gcd
import pygame
import numpy as np
class MARLEnv(gym.Env):
WINDOW_HEIGHT = 360
WINDOW_WIDTH = 640
CELL_LENGTH = gcd(WINDOW_HEIGHT, WINDOW_WIDTH)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
YELLOW = (247, 240, 48)
RED = (201, 16, 41)
BLUE = (0, 0, 255)
PADDING = 5
MAX_NUMBER_OF_AGENTS = 5
MIN_BALLS_COUNT = 10
MAX_BALLS_COUNT = 20
MIN_PITS_COUNT = 3
MAX_PITS_COUNT = 10
ROBOT_PLAYER = "../assets/robot-pack/PNG/Top view/robot_yellow.png"
ROBOT_LOADED = "../assets/robot-pack/PNG/Top view/robot_green.png"
ROBOT_UN_LOADED = "../assets/robot-pack/PNG/Top view/robot_red.png"
TARGET_FLAG = "../assets/kenney_sportspack/PNG/Equipment/flag_checkered.png"
BALL = "../assets/kenney_sportspack/PNG/Equipment/ball_soccer1.png"
def __init__(self):
pygame.init()
self.game_window = pygame.display.set_mode((MARLEnv.WINDOW_WIDTH, MARLEnv.WINDOW_HEIGHT), 0, 32)
self.grid = None
self.agents = None
self.source_balls = None
self.target_balls = None
self.pits_pos = None
# Initialize the agents number.
self.reset()
def render(self, mode='human', close=False):
# Fill window.
self.game_window.fill(MARLEnv.WHITE)
############################
# Draw the grid.
############################
h, w = self.grid.shape
for i in range(0, w, MARLEnv.CELL_LENGTH):
pygame.draw.line(self.game_window, MARLEnv.BLACK, (i, 0),
(i, MARLEnv.WINDOW_HEIGHT - 1))
for j in range(0, h, MARLEnv.CELL_LENGTH):
pygame.draw.line(self.game_window, MARLEnv.BLACK, (0, j), (MARLEnv.WINDOW_WIDTH - 1, j))
############################
# Draw the pits.
############################
for pit_pos in self.pits_pos:
pygame.draw.rect(self.game_window, MARLEnv.RED,
(pit_pos[0] * MARLEnv.CELL_LENGTH, pit_pos[1] * MARLEnv.CELL_LENGTH, MARLEnv.CELL_LENGTH,
MARLEnv.CELL_LENGTH))
############################
# Draw the source and the dest boxes.
############################
pygame.draw.rect(self.game_window, MARLEnv.BLUE,
(0, 0, MARLEnv.CELL_LENGTH, MARLEnv.CELL_LENGTH))
i, j = (
MARLEnv.WINDOW_HEIGHT - MARLEnv.CELL_LENGTH + 1,
MARLEnv.WINDOW_WIDTH - MARLEnv.CELL_LENGTH + 1
)
pygame.draw.rect(self.game_window, MARLEnv.YELLOW,
(j, i, i + MARLEnv.CELL_LENGTH, j + MARLEnv.CELL_LENGTH))
############################
# Draw the agents.
############################
i = 0
for agent in self.agents:
if i == 0:
robot_img = pygame.image.load(MARLEnv.ROBOT_PLAYER).convert_alpha()
elif agent['loaded']:
robot_img = pygame.image.load(MARLEnv.ROBOT_LOADED).convert_alpha()
else:
robot_img = pygame.image.load(MARLEnv.ROBOT_UN_LOADED).convert_alpha()
robot_img = pygame.transform.scale(robot_img,
(MARLEnv.CELL_LENGTH - 2 * MARLEnv.PADDING,
MARLEnv.CELL_LENGTH - 2 * MARLEnv.PADDING))
robot_img_rect = (
agent['pos'][0] * MARLEnv.CELL_LENGTH + MARLEnv.PADDING,
agent['pos'][1] * MARLEnv.CELL_LENGTH + MARLEnv.PADDING)
self.game_window.blit(robot_img, robot_img_rect)
i += 1
############################
# Draw the target flag.
############################
flag = pygame.image.load(MARLEnv.TARGET_FLAG).convert_alpha()
flag = pygame.transform.scale(flag, (30, 30))
flag_rect = (
MARLEnv.WINDOW_WIDTH - MARLEnv.CELL_LENGTH,
MARLEnv.WINDOW_HEIGHT - MARLEnv.CELL_LENGTH - MARLEnv.PADDING
)
self.game_window.blit(flag, flag_rect)
############################
# Draw the items (balls).
############################
for ball in self.source_balls:
ball_img = pygame.image.load(MARLEnv.BALL).convert_alpha()
ball_rect = (ball['pos'][0] - MARLEnv.PADDING, ball['pos'][1] - MARLEnv.PADDING)
self.game_window.blit(ball_img, ball_rect)
for ball in self.target_balls:
ball_img = pygame.image.load(MARLEnv.BALL).convert_alpha()
ball_rect = (ball['pos'][0] + MARLEnv.PADDING, ball['pos'][1] + MARLEnv.PADDING)
self.game_window.blit(ball_img, ball_rect)
############################
# Update pygame display(required).
############################
pygame.display.update()
return
def step(self, action):
"""
Parameters
----------
action :
Returns
-------
ob, reward, episode_over, info : tuple
ob (object) :
an environment-specific object representing your observation of
the environment.
reward (float) :
amount of reward achieved by the previous action. The scale
varies between environments, but the goal is always to increase
your total reward.
episode_over (bool) :
whether it's time to reset the environment again. Most (but not
all) tasks are divided up into well-defined episodes, and done
being True indicates the episode has terminated. (For example,
perhaps the pole tipped too far, or you lost your last life.)
info (dict) :
diagnostic information useful for debugging. It can sometimes
be useful for learning (for example, it might contain the raw
probabilities behind the environment's last state change).
However, official evaluations of your agent are not allowed to
use this for learning.
"""
x, y = self.agents[0]['pos']
pickup = False
drop = False
false_pickup = False
false_drop = False
collision = False
reward = 0
episode_over = False
print(action)
if action == 0: # 'LEFT':
x -= 1
elif action == 1: # 'RIGHT':
x += 1
elif action == 2: # 'UP':
y -= 1
elif action == 3: # 'DOWN':
y += 1
elif action == 4: # 'PICK_UP':
# check if he picked up correctly in the right place and there exists at least one ball in the source.
if not ((y, x) in [(0, 1), (1, 0), (1, 1)] and len(self.source_balls) > 0 and (
not self.agents[0]['loaded'])):
false_pickup = True
else:
pickup = True
self.agents[0]['loaded'] = True
ball = self.source_balls.pop(len(self.source_balls) - 1)
self.agents[0]['balls'].append(ball)
self.agents[0]['steps'] = -1
elif action == 5:
drop = True
last_rack_idx_x = MARLEnv.WINDOW_WIDTH // MARLEnv.CELL_LENGTH - 1
last_rack_idx_y = MARLEnv.WINDOW_HEIGHT // MARLEnv.CELL_LENGTH - 1
if (self.agents[0]['loaded'] and
(y, x) in [(last_rack_idx_y, last_rack_idx_x - 1), (last_rack_idx_y - 1, last_rack_idx_x),
(last_rack_idx_y - 1, last_rack_idx_x - 1)] and
len(self.source_balls) > 0):
ball = self.agents[0]['balls'].pop()
ball['pos'] = (
np.random.randint(MARLEnv.WINDOW_WIDTH - MARLEnv.CELL_LENGTH,
MARLEnv.WINDOW_WIDTH - MARLEnv.PADDING),
np.random.randint(MARLEnv.WINDOW_HEIGHT - MARLEnv.CELL_LENGTH,
MARLEnv.WINDOW_HEIGHT - MARLEnv.PADDING)
)
self.target_balls.append(ball)
self.agents[0]['loaded'] = len(self.agents[0]['balls']) > 0
self.agents[0]['steps'] = -1
elif (self.agents[0]['loaded'] and
(y, x) in [(last_rack_idx_y, last_rack_idx_x - 1), (last_rack_idx_y - 1, last_rack_idx_x),
(last_rack_idx_y - 1, last_rack_idx_x - 1)]):
false_drop = True
episode_over = True
else:
false_drop = True
if (x, y) in self.pits_pos or (x, y) in [self.agents[i]['pos'] for i in range(1, len(self.agents))]:
collision = True
episode_over = True
self.agents[0]['steps'] += 1
self.agents[0]['pos'] = (x, y)
# TODO add missed pcikups
reward = -collision * 100 - \
false_drop * 80 - \
false_pickup * 70 - \
self.agents[0]['steps'] + \
90 * drop * (not false_drop) + \
90 * pickup * (not false_pickup)
observation = self.get_observation()
print(reward, x, y)
return reward, episode_over, observation
def reset(self):
# Add pits.
self.pits_pos = []
for i in range(np.random.randint(MARLEnv.MIN_PITS_COUNT, MARLEnv.MAX_PITS_COUNT)):
self.pits_pos.append(
(
np.random.randint(3, MARLEnv.WINDOW_WIDTH // MARLEnv.CELL_LENGTH - 2),
np.random.randint(3, MARLEnv.WINDOW_HEIGHT // MARLEnv.CELL_LENGTH - 2)
)
)
# Initialize the agents number.
self.agents = []
for i in range(np.random.randint(2, MARLEnv.MAX_NUMBER_OF_AGENTS)):
x, y = (np.random.randint(0, MARLEnv.WINDOW_WIDTH // MARLEnv.CELL_LENGTH),
np.random.randint(0, MARLEnv.WINDOW_HEIGHT // MARLEnv.CELL_LENGTH))
while (x, y) in self.pits_pos:
x, y = (np.random.randint(0, MARLEnv.WINDOW_WIDTH // MARLEnv.CELL_LENGTH),
np.random.randint(0, MARLEnv.WINDOW_HEIGHT // MARLEnv.CELL_LENGTH))
self.agents.append(
{
'pos': (x, y),
'steps': 0,
'loaded': False,
'balls': []
}
)
# Initialize the grid.
self.grid = np.zeros((MARLEnv.WINDOW_HEIGHT, MARLEnv.WINDOW_WIDTH))
# Initialize the items(balls) parameters.
self.source_balls = []
for i in range(np.random.randint(MARLEnv.MIN_BALLS_COUNT, MARLEnv.MAX_BALLS_COUNT)):
self.source_balls.append(
{
'pos': (np.random.randint(0, MARLEnv.CELL_LENGTH // 1.5),
np.random.randint(0, MARLEnv.CELL_LENGTH) // 1.5)
}
)
self.target_balls = []
def get_observation(self):
ob = charar = np.chararray(
(MARLEnv.WINDOW_HEIGHT // MARLEnv.CELL_LENGTH, MARLEnv.WINDOW_WIDTH // MARLEnv.CELL_LENGTH))
ob[:] = '.'
# set the source balls.
if len(self.source_balls) > 0:
ob[0][0] = 'X'
else:
ob[0][0] = 'E'
# set the player.
x, y = self.agents[0]['pos']
ob[y][x] = 'P'
# set other agents
for i in range(1, len(self.agents)):
agent = self.agents[i]
x, y = agent['pos']
ob[y][x] = '*' # TODO @Samir, try to make it different.
# set pits
for pit_pos in self.pits_pos:
x, y = pit_pos
ob[y][x] = '*'
# set target balls/
if len(self.target_balls) > 0:
ob[-1][-1] = 'X'
else:
ob[-1][-1] = 'E'
return ob
|
py | 1a4763865eebba11df4bc055fabe83e83b11719d | from typing import List, NamedTuple
import libkol
from ..Error import (
InvalidLocationError,
NotEnoughMeatError,
UnknownError,
WrongKindOfItemError,
)
from ..util import parsing
from .request import Request
from ..Store import Store
class Response(NamedTuple):
items: List["libkol.types.ItemQuantity"]
meat_gained: int
class npc_buy(Request):
"""
Purchases items from an NPC store.
:param session: Active session
:param store: NPC store to buy from
:param item: Item to buy
:param quantity: Quantity of said item to buy
"""
def __init__(
self,
session: "libkol.Session",
store: Store,
item: "libkol.Item",
quantity: int = 1,
) -> None:
if item.store_id != store.id:
raise WrongKindOfItemError("This item cannot be purchased in that store")
# Gift shop is handled differently
if store.slug == "town_giftshop.php":
params = {"action": "buy", "howmany": quantity, "whichitem": item.id}
self.request = session.request("town_giftshop.php", pwd=True, params=params)
return
params = {"whichshop": store.slug, "action": "buyitem", "quantity": quantity}
if item.store_row:
params["whichrow"] = item.store_row
else:
params["whichitem"] = item.id
self.request = session.request("shop.php", pwd=True, params=params)
@staticmethod
async def parser(content: str, **kwargs) -> Response:
if len(content) == 0:
raise InvalidLocationError("You cannot visit that store yet.")
if "You've been sent back here by some kind of bug" in content:
raise InvalidLocationError("The store you tried to visit doesn't exist.")
if (
"This store doesn't sell that item" in content
or "Invalid item selected" in content
or "<td>That isn't a thing that's sold here.</td>" in content
):
raise WrongKindOfItemError("This store doesn't carry that item.")
if "You can't afford " in content:
raise NotEnoughMeatError(
"You do not have enough meat to purchase the item(s)."
)
items = await parsing.item(content)
if len(items) == 0:
raise UnknownError("Unknown error. No items received.")
meat = parsing.meat(content)
return Response(items, meat)
|
py | 1a4763c6bd5409d1bbcd05a89feb8f3280fbbeda | #!/usr/bin/python
import sys, os
import json
import requests
import re
import collections
import operator
import time
from xml.dom import minidom
from datetime import datetime, timedelta
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
PUTIO_FOLDER_RSS = '' #example https://put.io/rss/video/12345678
PUTIO_USERNAME = '' #example username1
PUTIO_PASSWORD = '' #example password1
FROM_DATE_DAY_OFFSET = 3
SHOULD_VALIDATE_DATE = True
TIME_FORMAT = '%b %d'
ITEM_LIMIT = 10
data = {};
has_more_items = False
item_count = 0
def fetch_xml(url, username, password):
r = requests.get(url, auth=(username, password))
r.raise_for_status()
return minidom.parseString(r.text)
def get_el_val(el, name):
return el.getElementsByTagName(name)[0].childNodes[0].data
def get_show_name(orig_title):
title = orig_title
digit_search = re.search("\d", title)
if digit_search:
digit_index = digit_search.start()
title = title[:digit_index]
if (title[digit_index - 1] == 'S'):
title = title[:digit_index - 1]
return title.replace('.', ' ').title()
def get_season_episode(orig_title):
title = orig_title
digit_search = re.search("\d", title)
if not digit_search:
return title
digit_index = digit_search.start()
if title[digit_index - 1] == 'S':
digit_index -= 1
title = title[digit_index:]
dot_search = re.search("[.]", title)
if dot_search:
dot_index = dot_search.start()
title = title[:dot_index]
return title
def get_episode(season_episode_string):
# SxxEyy format
if len(season_episode_string) == 6:
return season_episode_string[4:6]
# xxyy format
if len(season_episode_string) == 4:
return season_episode_string[2:4]
# xyy format
if len(season_episode_string) == 3:
return season_episode_string[1:3]
return '-'
def get_season(season_episode_string):
# SxxEyy format
if len(season_episode_string) == 6:
return season_episode_string[1:3]
# xxyy format
if len(season_episode_string) == 4:
return season_episode_string[:2]
# xyy format
if len(season_episode_string) == 3:
return season_episode_string[0]
return '-'
def parse_date(date_str):
# stripping away the ' -0000' timezone info
date_str = date_str[:(len(date_str) - 6)]
return datetime.strptime(date_str, "%a, %d %b %Y %H:%M:%S")
def validate_date(date_str):
if not SHOULD_VALIDATE_DATE:
return True
parsed_date = parse_date(date_str)
return get_from_date() <= parsed_date <= get_to_date()
def get_from_date():
return datetime.now() - timedelta(days=FROM_DATE_DAY_OFFSET)
def get_to_date():
return datetime.now()
def get_display_date(date):
return date.strftime(TIME_FORMAT)
try:
xml = fetch_xml(PUTIO_FOLDER_RSS, username=PUTIO_USERNAME, password=PUTIO_PASSWORD)
items = xml.getElementsByTagName("item")
for item in items:
date = get_el_val(item, "pubDate")
if not validate_date(date):
continue
item_count += 1
orig_title = get_el_val(item, "title")
title = get_show_name(orig_title)
season_episode = get_season_episode(orig_title)
season = get_season(season_episode)
episode = get_episode(season_episode)
key = title + " " + season_episode
data[key] = {
'name': title,
'season': season,
'episode': episode,
'link': get_el_val(item, "guid")
}
if item_count == ITEM_LIMIT:
has_more_items = True
break
if item_count == 0:
data['message'] = 'No new shows :-('
else:
data = collections.OrderedDict(sorted(data.items()))
if has_more_items:
data['message'] = '... and more!'
data['to_date'] = get_display_date(get_to_date())
data['from_date'] = get_display_date(get_from_date())
except requests.ConnectionError as ce:
data['error'] = 'Error'
print json.dumps(data) if isinstance(data, (dict, list, tuple, set)) else data.encode('utf-8')
sys.exit()
|
py | 1a4763e4662536f0d94af421c255b69d433b5eac | # Adapted from: https://github.com/hardmaru/estool/blob/master/es.py
import numpy as np
# A Vanilla Gradient Ascent
class GradientAscentOpt:
def __init__(self, theta0, stepsize, epsilon=1e-8):
self.epsilon = epsilon
self.dim = len(theta0)
self.stepsize = stepsize
self.reset(np.copy(theta0))
def reset(self, theta0):
self.theta = theta0
def _compute_step(self, grad):
return self.stepsize * grad
def step(self, grad):
step = self._compute_step(grad)
self.theta += step
# Adam Gradient Ascent
class AdamOpt:
def __init__(self, theta0, stepsize, betas=(0.9, 0.999), epsilon=1e-8):
self.epsilon = epsilon
self.t = 0
self.dim = len(theta0)
self.stepsize = stepsize
self.beta1 = betas[0]
self.beta2 = betas[1]
self.reset(np.copy(theta0))
def reset(self, theta0):
self.theta = theta0
self.m = np.zeros(self.dim, dtype=np.float32)
self.v = np.zeros(self.dim, dtype=np.float32)
def _compute_step(self, grad):
a = self.stepsize * np.sqrt(1 - self.beta2 ** self.t) / (1 - self.beta1 ** self.t)
self.m = self.beta1 * self.m + (1 - self.beta1) * grad
self.v = self.beta2 * self.v + (1 - self.beta2) * (grad * grad)
step = a * self.m / (np.sqrt(self.v) + self.epsilon)
return step
def step(self, grad):
self.t += 1
step = self._compute_step(grad)
self.theta += step
|
py | 1a4764254ff1e32d09657f7b2a26f7091a911aba | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains functions for the anime faces dataset where each image has a list labels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
from six.moves import urllib
import tensorflow as tf
from datasets import dataset_utils
import util_misc
import tensorflow.contrib.slim as slim
_FILE_PATTERN = '%s-*'
_ITEMS_TO_DESCRIPTIONS = {
'source': 'A color image of varying height and width.',
'label_text': 'The text of the label.',
'conditional_labels': 'one hot encoded labels extracted from `label_text`',
'filename': 'Name of the image file.',
}
FLAGS = tf.flags.FLAGS
DEFAULT_NUM_CLASSES = 51
TAG_TEXT_DELIMITER = ', '
_DEFAULT_TRAIN_SIZE = 27247
_DEFAULT_VALIDATION_SIZE = 641
_PRELOGITS_SIZE = 2048
def get_split(split_name, dataset_dir, file_pattern=None, reader=None):
"""Gets a dataset tuple with instructions for reading ImageNet.
Args:
split_name: A train/test split name.
dataset_dir: The base directory of the dataset sources.
file_pattern: The file pattern to use when matching the dataset sources.
It is assumed that the pattern contains a '%s' string so that the split
name can be inserted.
reader: The TensorFlow reader type.
Returns:
A `Dataset` namedtuple.
Raises:
ValueError: if `split_name` is not a valid train/test split.
"""
assert FLAGS.num_classes == 0 or FLAGS.num_classes == DEFAULT_NUM_CLASSES
num_classes = FLAGS.num_classes or DEFAULT_NUM_CLASSES
_SPLITS_TO_SIZES = {
'train': FLAGS.train_size or _DEFAULT_TRAIN_SIZE,
'validation': FLAGS.validation_size or _DEFAULT_VALIDATION_SIZE,
}
if split_name not in _SPLITS_TO_SIZES:
raise ValueError('split name %s was not recognized.' % split_name)
if not file_pattern:
file_pattern = _FILE_PATTERN
file_pattern = os.path.join(dataset_dir, file_pattern % split_name)
# Allowing None in the signature so that dataset_factory can use the default.
if reader is None:
reader = tf.TFRecordReader
keys_to_features = {
'image/encoded': tf.FixedLenFeature(
(), tf.string, default_value=''),
'image/format': tf.FixedLenFeature(
(), tf.string, default_value='jpeg'),
'image/class/label': tf.VarLenFeature(
dtype=tf.int64),
'image/class/text': tf.FixedLenFeature(
[], dtype=tf.string, default_value=''),
'image/filename': tf.FixedLenFeature(
[], dtype=tf.string, default_value=''),
}
output_name = 'target' if FLAGS.dataset_use_target else 'source'
items_to_handlers = {
output_name: slim.tfexample_decoder.Image('image/encoded', 'image/format'),
'conditional_labels': dataset_utils.OneHotLabelTensor('image/class/text',
tags_id_lookup_file=FLAGS.tags_id_lookup_file,
num_classes=num_classes,
tags_key_column_index=FLAGS.tags_key_column_index,
tags_value_column_index=FLAGS.tags_value_column_index),
'label_text': slim.tfexample_decoder.Tensor('image/class/text'),
'filename': slim.tfexample_decoder.Tensor('image/filename'),
}
items_used = [output_name, 'conditional_labels', 'filename', 'label_text']
items_need_preprocessing = [output_name, 'conditional_labels',]
decoder = slim.tfexample_decoder.TFExampleDecoder(
keys_to_features, items_to_handlers)
return slim.dataset.Dataset(
data_sources=file_pattern,
reader=reader,
decoder=decoder,
num_samples=_SPLITS_TO_SIZES[split_name],
items_to_descriptions=_ITEMS_TO_DESCRIPTIONS,
items_used=items_used,
items_need_preprocessing=items_need_preprocessing,
num_classes=num_classes,
has_source=True)
|
py | 1a4765e6d0217e8671ea29a962fe5c8051845adb | """Simple script to generate audio files for testing.
This script can generate example audio files used for testing. Some metadata fields,
like artist or title, can be insert into the file. Various parameters can also be
changed, like number of channels or sample rate. It only outputs wav file, which is
good enough since we rely on a 3rd party library for loading files and metadata.
Number of frames to be written is specified with --frame-count (-n) and the same
pattern is always used. Assuming two channels and two bytes sample width, the
first frame will contain 00000000, second frame 01010101, third 02020202 and so
on. When reaching FFFFFFFF, the value will wrap back to 00000000 again.
"""
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
from os import path
from typing import cast
import wave
from mutagen import File
from mutagen.id3 import TALB, TIT2, TPE1
METADATA_FIELDS = {
"title": TIT2,
"artist": TPE1,
"album": TALB,
}
FRAMES_PER_PACKET = 352
def write_new_wave_file(filename: str, args) -> None:
"""Generate and write a new sample WAV file."""
if path.exists(filename) and not args.overwrite:
raise Exception("file already exists")
with wave.open(filename, "wb") as handle:
wfile: wave.Wave_write = cast(wave.Wave_write, handle)
# See: https://github.com/PyCQA/pylint/issues/4534
# pylint: disable=no-member
wfile.setnchannels(args.channels)
wfile.setsampwidth(args.sample_width)
wfile.setframerate(args.sample_rate)
for frame_number in range(args.frame_count):
if args.static:
frame = args.channels * args.sample_width * b"\x00"
else:
frame = args.channels * args.sample_width * bytes([frame_number & 0xFF])
wfile.writeframes(frame)
# pylint: enable=no-member
def add_metadata(filename: str, args):
"""Add metadata to an existing file."""
f = File(filename)
f.add_tags()
for title, tag in METADATA_FIELDS.items():
f.tags.add(tag(encoding=3, text=[getattr(args, title)]))
f.save()
def main():
"""Script starts here."""
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("filename", help="output filename")
parser.add_argument(
"-c", "--channels", type=int, default=2, help="number of channels"
)
parser.add_argument(
"-w", "--sample-width", type=int, default=2, help="sample width in bytes"
)
parser.add_argument(
"-r", "--sample-rate", type=int, default=44100, help="sample rate"
)
parser.add_argument(
"-s",
"--static",
default=False,
action="store_true",
help="use just zeroes as content",
)
parser.add_argument(
"-o",
"--overwrite",
default=False,
action="store_true",
help="overwrite audio file if it exists",
)
parser.add_argument(
"-n",
"--frame-count",
type=int,
default=FRAMES_PER_PACKET * 2,
help="frames to generate",
)
metadata = parser.add_argument_group("metadata")
for item in METADATA_FIELDS:
metadata.add_argument(f"--{item}", default=None, help=item)
args = parser.parse_args()
write_new_wave_file(args.filename, args)
add_metadata(args.filename, args)
if __name__ == "__main__":
main()
|
py | 1a4767e67ce56bf33ce9ec4c248f6b8e3091837c | # GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Component:
DESCRIPTION = "List IPS rules"
class Input:
ID = "id"
SCOPE = "scope"
class Output:
COVERED_CVES = "covered_cves"
RESPONSE_JSON = "response_json"
RULES_ASSIGNED = "rules_assigned"
class ListRulesInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"id": {
"type": "integer",
"title": "ID",
"description": "ID of the computer or policy",
"order": 2
},
"scope": {
"type": "string",
"title": "Scope",
"description": "Set the scope",
"enum": [
"computer",
"policy"
],
"order": 1
}
},
"required": [
"id",
"scope"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class ListRulesOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"covered_cves": {
"type": "array",
"title": "CVEs",
"description": "CVEs covered by the assigned rules",
"items": {
"type": "string"
},
"order": 2
},
"response_json": {
"type": "object",
"title": "Response JSON",
"description": "Full response in JSON format",
"order": 3
},
"rules_assigned": {
"type": "array",
"title": "Rules Assigned",
"description": "All IPS rules currently assigned",
"items": {
"type": "integer"
},
"order": 1
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
|
py | 1a4767f47d796becb0ed8ed66309b24423040bfe | test = 2
print(test,type(test))
test = 2.5
print(test,type(test))
test = 'Winner'
print(test,type(test))
test = [2,4,'Winner']
print(test,type(test)) |
py | 1a476843bac1101ed430ff8669a123df26ead3c8 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.path import Path
from matplotlib.patches import PathPatch
import seaborn as sns
from ipywidgets import *
from IPython.display import display, HTML
def prodmix_graph(zoom):
# create the plot object
fig, ax = plt.subplots(figsize=(8, 8))
s = np.linspace(0, 3000)
plt.plot(s, 10000/6 - 5*s/6, lw=3, label='$5x_1 + 6x_2 \leq 10000$')
plt.fill_between(s, 0, 10000/6 - 5*s/6, alpha=0.1)
plt.plot(s, 1500 - s/2, lw=3, label='$x_1 + 2x_2 \leq 3000$')
plt.fill_between(s, 0, 1500 - s/2, alpha=0.1)
plt.plot(600 * np.ones_like(s), s, lw=3, label='$x_1 \leq 600$')
plt.fill_betweenx(s, 0, 600, alpha=0.1)
plt.plot(s, 1200 * np.ones_like(s), lw=3, label='$x_2 \leq 1200$')
plt.fill_betweenx(0, s, 1200, alpha=0.1)
# add non-negativity constraints
plt.plot(s, np.zeros_like(s), lw=3, label='$x_1$ non-negative')
plt.plot(np.zeros_like(s), s, lw=3, label='$x_2$ non-negative')
# highlight the feasible region
path = Path([
(0., 0.),
(0., 1200.),
(560, 1200.),
(600., 7000/6),
(600., 0.),
(0., 0.),
])
patch = PathPatch(path, label='feasible region', alpha=0.5)
ax.add_patch(patch)
# labels and stuff
plt.xlabel('$x_1$ (Basic)', fontsize=16)
plt.ylabel('$x_2$ (XP)', fontsize=16)
if zoom:
plt.xlim(400, 800)
plt.ylim(1000, 1400)
else:
plt.xlim(-0.5, 1500)
plt.ylim(-0.5, 1500)
plt.legend(fontsize=11)
ax.legend(loc='upper right', bbox_to_anchor=(1.4, 1))
plt.show()
def prodmix_obj(zoom, margin1, margin2):
fig, ax = plt.subplots(figsize=(9, 8))
s = np.linspace(0, 1500)
plt.plot(s, 10000/6 - 5*s/6, lw=3, label='$5x_1 + 6x_2 \leq 10000$')
plt.plot(s, 1500 - s/2, lw=3, label='$x_1 + 2x_2 \leq 3000$')
plt.plot(600 * np.ones_like(s), s, lw=3, label='$x_1 \leq 600$')
plt.plot(s, 1200 * np.ones_like(s), lw=3, label='$x_2 \leq 1200$')
plt.plot(s, np.zeros_like(s), lw=3, label='$x_1$ non-negative')
plt.plot(np.zeros_like(s), s, lw=3, label='$x_2$ non-negative')
# plot the possible (x1, x2) pairs
pairs = [(x1, x2) for x1 in np.arange(start=0, stop=600, step=25)
for x2 in np.arange(start=0, stop=1200, step=30)
if (5*x1 + 6*x2) <= 10000
and (x1 + 2*x2) <= 3000
and x1<=600 and x2<=1200]
# split these into our variables
x1, x2 = np.hsplit(np.array(pairs), 2)
# caculate the objective function at each pair
z = margin1*x1 + margin2*x2 # the objective function
# plot the results
plt.scatter(x1, x2, c=z, cmap='jet',
label='Profit={} $x_1$ + {} $x_2$'.format(margin1, margin2), zorder=3)
# labels and stuff
cb = plt.colorbar()
cb.set_label('profit', fontsize=14)
plt.xlabel('$x_1$ (Basic)', fontsize=16)
plt.ylabel('$x_2$ (XP)', fontsize=16)
if zoom:
plt.xlim(400, 800)
plt.ylim(1000, 1400)
else:
plt.xlim(-0.5, 1500)
plt.ylim(-0.5, 1500)
plt.legend(fontsize=18)
ax.legend(loc='upper right', bbox_to_anchor=(1.8, 1))
plt.show()
def show_integer_feasregion():
fig, ax = plt.subplots(figsize=(9, 8))
s = np.linspace(0, 50)
plt.plot(s, 45 - 5*s/7, lw=3, label='$7x_1 + 5x_2 \leq 45$')
plt.plot(s, -25 + 1.9*s, lw=3, label='$1.9x_1 - x_2 \geq 25$')
plt.plot(s, 15.5 + 5*s/9, lw=3, label='$-5x_1 + 9x_2 \leq 15.5$')
plt.plot(16 * np.ones_like(s), s, lw=3, label='$x_1 \geq 16$')
plt.plot(s, 18 * np.ones_like(s), lw=3, label='$x_2 \geq 18$')
# plot the possible (x1, x2) pairs
pairs = [(x1, x2) for x1 in np.arange(start=15, stop=31, step=1)
for x2 in np.arange(start=15, stop=31, step=1)
if (5*x1 + 6*x2) <= 10000
and (x1 + 2*x2) <= 3000
and x1<=600 and x2<=1200]
# split these into our variables
x1, x2 = np.hsplit(np.array(pairs), 2)
# plot the results
plt.scatter(x1, x2, c=0*x1 + 0*x2, cmap='jet', zorder=3)
plt.xlim(15-0.5, 30)
plt.ylim(15-0.5, 30)
plt.xlabel('$x_1$', fontsize=16)
plt.ylabel('$x_2$', fontsize=16)
lppath = Path([
(16., 18.),
(16., 24.4),
(23.3, 28.4),
(26.8, 25.8),
(22.6, 18.),
(16., 18.),
])
lppatch = PathPatch(lppath, label='LP feasible region', alpha=0.3)
ax.add_patch(lppatch)
mippath = Path([
(16., 18.),
(16., 24),
(19, 26),
(23, 28),
(25, 27),
(26, 26),
(26, 25),
(23, 19),
(22, 18.),
(16., 18.),
])
mippatch = PathPatch(mippath, label='Integer feasible region', alpha=0.5)
ax.add_patch(mippatch)
plt.legend(fontsize=18)
ax.legend(loc='upper right', bbox_to_anchor=(1.4, 1))
plt.show()
def draw_local_global_opt():
function = lambda x: (x-1)*(x-2)*(x-3)*(x-4)*(x-5)*(x-6)*(x-7)
x = np.linspace(1,7,500)
plt.figure(figsize=(12,7))
plt.plot(x, function(x), label='$f(x)$')
globalx = 1.32
localx = 3.45
plt.scatter(globalx, function(globalx), s=30, c='r', label='global opt')
plt.scatter(localx, function(localx), s=30, c='orange', label='local opt')
plt.axhline(linewidth=2, color='black')
plt.legend()
plt.show()
def showconvex(values):
plt.subplots(2, 2, figsize=(17,10))
function = lambda x: (x-3)**2
x = np.linspace(0.8,4.2,500)
plt.subplot(2,2,1)
plt.plot(x, function(x), label='$f(x)$')
line = np.array(values)
plt.plot(line, function(line), 'o-')
plt.title('Convex: Line joining any two poits is above the curve')
function = lambda x: np.log(x) - (x-2)**2
x = np.linspace(0.8,4.2,500)
plt.subplot(2,2,2)
plt.plot(x, function(x), label='$f(x)$')
line = np.array(values)
plt.plot(line, function(line), 'o-')
plt.title('Concave: Line joining any two poits is below the curve')
function = lambda x: np.log(x) - 2*x*(x-4)**2
x = np.linspace(0.8,4.2,500)
plt.subplot(2,2,3)
plt.plot(x, function(x), label='$f(x)$')
line = np.array(values)
plt.plot(line, function(line), 'o-')
plt.title('Neither convex or concave')
function = lambda x: np.cos(x*2)*x
x = np.linspace(0.8,4.2,500)
plt.subplot(2,2,4)
plt.plot(x, function(x), label='$f(x)$')
line = np.array(values)
plt.plot(line, function(line), 'o-')
plt.title('Neither convex or concave')
plt.legend()
plt.show()
def deriv(x):
x_deriv = (x-2)*(x-3)*(x-4)*(x-5)+(x-1)*(x-3)*(x-4)*(x-5)+(x-1)*(x-2)*(x-4)*(x-5)+(x-1)*(x-2)*(x-3)*(x-5)\
+(x-1)*(x-2)*(x-3)*(x-4)
return x_deriv
def step(x_new, x_prev, precision, l_r):
function = lambda x: (x-1)*(x-2)*(x-3)*(x-4)*(x-5)
x = np.linspace(1,5,500)
x_list, y_list = [x_new], [function(x_new)]
while abs(x_new - x_prev) > precision:
x_prev = x_new
d_x = - deriv(x_prev)
x_new = x_prev + (l_r * d_x)
x_list.append(x_new)
y_list.append(function(x_new))
print("Local minimum occurs at: "+ str(x_new))
print("Number of steps: " + str(len(x_list)))
plt.subplots(1, 2, figsize=(17,7))
plt.subplot(1,2,1)
plt.scatter(x_list,y_list,c="g")
plt.plot(x_list,y_list,c="g")
plt.plot(x,function(x), c="r")
plt.title("Gradient descent")
plt.subplot(1,2,2)
plt.scatter(x_list,y_list,c="g")
plt.plot(x_list,y_list,c="g")
plt.plot(x,function(x), c="r")
plt.xlim([x_list[0]-.2,x_list[-1]+.2])
plt.title("Zoomed in Gradient descent to Key Area")
plt.show()
def montyhall(sample_size):
np.random.seed(1234)
prizes = [np.append(np.random.permutation(prizes),[1,1])\
for prizes in np.tile(['goat', 'goat', 'car'], (sample_size,1))]
prizes = [np.append(r,np.where(r=='car')[0]+1) for r in prizes]
prizes = [np.append(r,np.random.choice(list(set(np.where(r=='goat')[0]+1)-{1}))) for r in prizes]
prizes = [np.append(r,list({'2','3'}-{r[-1]})[0]) for r in prizes]
df = pd.DataFrame(prizes, columns=['door1','door2','door3','select', 'keep', 'prize', 'open','switch'])
df['win'] = 'NA'
df.win[df.prize==df.keep] = 'keep'
df.win[df.prize==df.switch] = 'switch'
fig, axes = plt.subplots(1, 1, figsize = (12,6))
ax = sns.countplot(x='win', data=df, order=df['win'].value_counts().sort_values().index, ax=axes)
total = len(df.win)
nbars = len(ax.patches)
for p in ax.patches:
percentage = '{:.1f}%'.format(100 * p.get_height()/total)
x = p.get_x() + p.get_width()/2 -.05
y = p.get_y() + p.get_height() + total/100
ax.annotate(percentage, (x, y))
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.show()
display(df.head(10))
def successful_rate(num_candidates,
num_reject,
num_sim = 5000,
printtable = False):
np.random.seed(1234)
candidates = [np.random.choice(range(100), num_candidates, replace=False) for i in range(num_sim)]
df = pd.DataFrame(candidates, columns=['person'+str(i+1) for i in range(num_candidates)])
df['best_score'] = df[df.columns[:num_candidates]].max(axis=1)
df['best'] = df[df.columns[:num_candidates]].idxmax(axis=1)
rate = dict.fromkeys(num_reject)
for r in num_reject:
df['best_at_stop'] = df[df.columns[:r]].max(axis=1)
df_rest = df[df.columns[r:num_candidates]]
df['hired_score'] = np.where(df_rest.gt(df['best_at_stop'], axis=0).any(axis=1),
df_rest[df_rest.gt(df['best_at_stop'],
axis=0)].stack(dropna=False).groupby(level=0).first(),
df[df.columns[num_candidates-1]]).astype('int64')
df['hired'] = np.where(df_rest.gt(df['best_at_stop'], axis=0).any(axis=1),
df_rest.gt(df['best_at_stop'], axis=0).idxmax(axis=1),
'person'+str(num_candidates))
rate[r] = np.sum(df.best==df.hired)/num_sim*100
if printtable == True:
print('The best candidate is hired {} times in {} trials with {} rejection'.format(np.sum(df.best==df.hired),
num_sim,
r))
display(df.head(10))
return rate
def secretary(n):
rate = successful_rate(n, range(1,n))
lists = sorted(rate.items())
x, y = zip(*lists)
plt.plot(x, y)
plt.show()
print('optimal rejection is {} with {}% chance to hire the best candidate'.\
format(max(rate, key=rate.get), round(max(rate.values())),2)) |
py | 1a476851f95836b8a66e60e1b5eaf28fdec3465a | input = """
rule(r1). head(b,r1). pbl(a,r1). nbl(neg_b,r1).
rule(r2). head(neg_a,r2). nbl(a,r2).
rule(r3). head(a,r3). nbl(neg_a,r3).
opp(a,neg_a).
opp(b,neg_b).
pr(r1,r2). pr(r2,r3).
"""
output = """
rule(r1). head(b,r1). pbl(a,r1). nbl(neg_b,r1).
rule(r2). head(neg_a,r2). nbl(a,r2).
rule(r3). head(a,r3). nbl(neg_a,r3).
opp(a,neg_a).
opp(b,neg_b).
pr(r1,r2). pr(r2,r3).
"""
|
py | 1a47685b05e5820c61f86a7b7edf722ebdb83fbc | import gym
import numpy as np
import tensorflow as tf
from gym.wrappers import TimeLimit
def ortho_init(scale=1.0):
"""
Orthogonal initialization for the policy weights
:param scale: (float) Scaling factor for the weights.
:return: (function) an initialization function for the weights
"""
# _ortho_init(shape, dtype, partition_info=None)
def _ortho_init(shape, *_, **_kwargs):
"""Intialize weights as Orthogonal matrix.
Orthogonal matrix initialization [1]_. For n-dimensional shapes where
n > 2, the n-1 trailing axes are flattened. For convolutional layers, this
corresponds to the fan-in, so this makes the initialization usable for
both dense and convolutional layers.
References
----------
.. [1] Saxe, Andrew M., James L. McClelland, and Surya Ganguli.
"Exact solutions to the nonlinear dynamics of learning in deep
linear
"""
# lasagne ortho init for tf
shape = tuple(shape)
if len(shape) == 2:
flat_shape = shape
elif len(shape) == 4: # assumes NHWC
flat_shape = (np.prod(shape[:-1]), shape[-1])
else:
raise NotImplementedError
gaussian_noise = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(gaussian_noise, full_matrices=False)
weights = u if u.shape == flat_shape else v # pick the one with the correct shape
weights = weights.reshape(shape)
return (scale * weights[:shape[0], :shape[1]]).astype(np.float32)
return _ortho_init
def conv(input_tensor, scope, *, n_filters, filter_size, stride,
pad='VALID', init_scale=1.0, data_format='NHWC', one_dim_bias=False):
"""
Creates a 2d convolutional layer for TensorFlow
:param input_tensor: (TensorFlow Tensor) The input tensor for the convolution
:param scope: (str) The TensorFlow variable scope
:param n_filters: (int) The number of filters
:param filter_size: (Union[int, [int], tuple<int, int>]) The filter size for the squared kernel matrix,
or the height and width of kernel filter if the input is a list or tuple
:param stride: (int) The stride of the convolution
:param pad: (str) The padding type ('VALID' or 'SAME')
:param init_scale: (int) The initialization scale
:param data_format: (str) The data format for the convolution weights
:param one_dim_bias: (bool) If the bias should be one dimentional or not
:return: (TensorFlow Tensor) 2d convolutional layer
"""
if isinstance(filter_size, list) or isinstance(filter_size, tuple):
assert len(filter_size) == 2, \
"Filter size must have 2 elements (height, width), {} were given".format(len(filter_size))
filter_height = filter_size[0]
filter_width = filter_size[1]
else:
filter_height = filter_size
filter_width = filter_size
if data_format == 'NHWC':
channel_ax = 3
strides = [1, stride, stride, 1]
bshape = [1, 1, 1, n_filters]
elif data_format == 'NCHW':
channel_ax = 1
strides = [1, 1, stride, stride]
bshape = [1, n_filters, 1, 1]
else:
raise NotImplementedError
bias_var_shape = [n_filters] if one_dim_bias else [1, n_filters, 1, 1]
n_input = input_tensor.get_shape()[channel_ax].value
wshape = [filter_height, filter_width, n_input, n_filters]
with tf.compat.v1.variable_scope(scope):
weight = tf.get_variable("w", wshape, initializer=ortho_init(init_scale))
bias = tf.get_variable("b", bias_var_shape, initializer=tf.constant_initializer(0.0))
if not one_dim_bias and data_format == 'NHWC':
bias = tf.reshape(bias, bshape)
return bias + tf.nn.conv2d(input_tensor, weight, strides=strides, padding=pad, data_format=data_format)
def linear(input_tensor, scope, n_hidden, *, init_scale=1.0, init_bias=0.0):
"""
Creates a fully connected layer for TensorFlow
:param input_tensor: (TensorFlow Tensor) The input tensor for the fully connected layer
:param scope: (str) The TensorFlow variable scope
:param n_hidden: (int) The number of hidden neurons
:param init_scale: (int) The initialization scale
:param init_bias: (int) The initialization offset bias
:return: (TensorFlow Tensor) fully connected layer
"""
with tf.compat.v1.variable_scope(scope):
n_input = input_tensor.get_shape()[1]
weight = tf.compat.v1.get_variable("w", [n_input, n_hidden], initializer=ortho_init(init_scale))
bias = tf.compat.v1.get_variable("b", [n_hidden], initializer=tf.constant_initializer(init_bias))
return tf.matmul(input_tensor, weight) + bias
def batch_to_seq(tensor_batch, n_batch, n_steps, flat=False):
"""
Transform a batch of Tensors, into a sequence of Tensors for recurrent policies
:param tensor_batch: (TensorFlow Tensor) The input tensor to unroll
:param n_batch: (int) The number of batch to run (n_envs * n_steps)
:param n_steps: (int) The number of steps to run for each environment
:param flat: (bool) If the input Tensor is flat
:return: (TensorFlow Tensor) sequence of Tensors for recurrent policies
"""
if flat:
tensor_batch = tf.reshape(tensor_batch, [n_batch, n_steps])
else:
tensor_batch = tf.reshape(tensor_batch, [n_batch, n_steps, -1])
return [tf.squeeze(v, [1]) for v in tf.split(axis=1, num_or_size_splits=n_steps, value=tensor_batch)]
def seq_to_batch(tensor_sequence, flat=False):
"""
Transform a sequence of Tensors, into a batch of Tensors for recurrent policies
:param tensor_sequence: (TensorFlow Tensor) The input tensor to batch
:param flat: (bool) If the input Tensor is flat
:return: (TensorFlow Tensor) batch of Tensors for recurrent policies
"""
shape = tensor_sequence[0].get_shape().as_list()
if not flat:
assert len(shape) > 1
n_hidden = tensor_sequence[0].get_shape()[-1].value
return tf.reshape(tf.concat(axis=1, values=tensor_sequence), [-1, n_hidden])
else:
return tf.reshape(tf.stack(values=tensor_sequence, axis=1), [-1])
def lstm(input_tensor, mask_tensor, cell_state_hidden, scope, n_hidden, init_scale=1.0, layer_norm=False):
"""
Creates an Long Short Term Memory (LSTM) cell for TensorFlow
:param input_tensor: (TensorFlow Tensor) The input tensor for the LSTM cell
:param mask_tensor: (TensorFlow Tensor) The mask tensor for the LSTM cell
:param cell_state_hidden: (TensorFlow Tensor) The state tensor for the LSTM cell
:param scope: (str) The TensorFlow variable scope
:param n_hidden: (int) The number of hidden neurons
:param init_scale: (int) The initialization scale
:param layer_norm: (bool) Whether to apply Layer Normalization or not
:return: (TensorFlow Tensor) LSTM cell
"""
_, n_input = [v.value for v in input_tensor[0].get_shape()]
with tf.variable_scope(scope):
weight_x = tf.get_variable("wx", [n_input, n_hidden * 4], initializer=ortho_init(init_scale))
weight_h = tf.get_variable("wh", [n_hidden, n_hidden * 4], initializer=ortho_init(init_scale))
bias = tf.get_variable("b", [n_hidden * 4], initializer=tf.constant_initializer(0.0))
if layer_norm:
# Gain and bias of layer norm
gain_x = tf.get_variable("gx", [n_hidden * 4], initializer=tf.constant_initializer(1.0))
bias_x = tf.get_variable("bx", [n_hidden * 4], initializer=tf.constant_initializer(0.0))
gain_h = tf.get_variable("gh", [n_hidden * 4], initializer=tf.constant_initializer(1.0))
bias_h = tf.get_variable("bh", [n_hidden * 4], initializer=tf.constant_initializer(0.0))
gain_c = tf.get_variable("gc", [n_hidden], initializer=tf.constant_initializer(1.0))
bias_c = tf.get_variable("bc", [n_hidden], initializer=tf.constant_initializer(0.0))
cell_state, hidden = tf.split(axis=1, num_or_size_splits=2, value=cell_state_hidden)
for idx, (_input, mask) in enumerate(zip(input_tensor, mask_tensor)):
cell_state = cell_state * (1 - mask)
hidden = hidden * (1 - mask)
if layer_norm:
gates = _ln(tf.matmul(_input, weight_x), gain_x, bias_x) \
+ _ln(tf.matmul(hidden, weight_h), gain_h, bias_h) + bias
else:
gates = tf.matmul(_input, weight_x) + tf.matmul(hidden, weight_h) + bias
in_gate, forget_gate, out_gate, cell_candidate = tf.split(axis=1, num_or_size_splits=4, value=gates)
in_gate = tf.nn.sigmoid(in_gate)
forget_gate = tf.nn.sigmoid(forget_gate)
out_gate = tf.nn.sigmoid(out_gate)
cell_candidate = tf.tanh(cell_candidate)
cell_state = forget_gate * cell_state + in_gate * cell_candidate
if layer_norm:
hidden = out_gate * tf.tanh(_ln(cell_state, gain_c, bias_c))
else:
hidden = out_gate * tf.tanh(cell_state)
input_tensor[idx] = hidden
cell_state_hidden = tf.concat(axis=1, values=[cell_state, hidden])
return input_tensor, cell_state_hidden
def _ln(input_tensor, gain, bias, epsilon=1e-5, axes=None):
"""
Apply layer normalisation.
:param input_tensor: (TensorFlow Tensor) The input tensor for the Layer normalization
:param gain: (TensorFlow Tensor) The scale tensor for the Layer normalization
:param bias: (TensorFlow Tensor) The bias tensor for the Layer normalization
:param epsilon: (float) The epsilon value for floating point calculations
:param axes: (tuple, list or int) The axes to apply the mean and variance calculation
:return: (TensorFlow Tensor) a normalizing layer
"""
if axes is None:
axes = [1]
mean, variance = tf.nn.moments(input_tensor, axes=axes, keep_dims=True)
input_tensor = (input_tensor - mean) / tf.sqrt(variance + epsilon)
input_tensor = input_tensor * gain + bias
return input_tensor
def conv_to_fc(input_tensor):
"""
Reshapes a Tensor from a convolutional network to a Tensor for a fully connected network
:param input_tensor: (TensorFlow Tensor) The convolutional input tensor
:return: (TensorFlow Tensor) The fully connected output tensor
"""
n_hidden = np.prod([v.value for v in input_tensor.get_shape()[1:]])
input_tensor = tf.reshape(input_tensor, [-1, n_hidden])
return input_tensor
class DoneOnSuccessWrapper(gym.Wrapper):
"""
Reset on success and offsets the reward.
Useful for GoalEnv.
"""
def __init__(self, env, reward_offset=1.0):
super(DoneOnSuccessWrapper, self).__init__(env)
self.reward_offset = reward_offset
def step(self, action):
obs, reward, done, info = self.env.step(action)
done = done or info.get('is_success', False)
reward += self.reward_offset
return obs, reward, done, info
def compute_reward(self, achieved_goal, desired_goal, info):
reward = self.env.compute_reward(achieved_goal, desired_goal, info)
return reward + self.reward_offset
class TimeFeatureWrapper(gym.Wrapper):
"""
Add remaining time to observation space for fixed length episodes.
See https://arxiv.org/abs/1712.00378 and https://github.com/aravindr93/mjrl/issues/13.
:param env: (gym.Env)
:param max_steps: (int) Max number of steps of an episode
if it is not wrapped in a TimeLimit object.
:param test_mode: (bool) In test mode, the time feature is constant,
equal to zero. This allow to check that the agent did not overfit this feature,
learning a deterministic pre-defined sequence of actions.
"""
def __init__(self, env, max_steps=1000, test_mode=False):
assert isinstance(env.observation_space, gym.spaces.Box)
# Add a time feature to the observation
low, high = env.observation_space.low, env.observation_space.high
low, high= np.concatenate((low, [0])), np.concatenate((high, [1.]))
env.observation_space = gym.spaces.Box(low=low, high=high, dtype=np.float32)
super(TimeFeatureWrapper, self).__init__(env)
if isinstance(env, TimeLimit):
self._max_steps = env._max_episode_steps
else:
self._max_steps = max_steps
self._current_step = 0
self._test_mode = test_mode
def reset(self):
self._current_step = 0
return self._get_obs(self.env.reset())
def step(self, action):
self._current_step += 1
obs, reward, done, info = self.env.step(action)
return self._get_obs(obs), reward, done, info
def _get_obs(self, obs):
"""
Concatenate the time feature to the current observation.
:param obs: (np.ndarray)
:return: (np.ndarray)
"""
# Remaining time is more general
time_feature = 1 - (self._current_step / self._max_steps)
if self._test_mode:
time_feature = 1.0
# Optionnaly: concatenate [time_feature, time_feature ** 2]
return np.concatenate((obs, [time_feature]))
def total_episode_reward_logger(rew_acc, rewards, masks, writer, steps):
"""
calculates the cumulated episode reward, and prints to tensorflow log the output
:param rew_acc: (np.array float) the total running reward
:param rewards: (np.array float) the rewards
:param masks: (np.array bool) the end of episodes
:param writer: (TensorFlow Session.writer) the writer to log to
:param steps: (int) the current timestep
:return: (np.array float) the updated total running reward
:return: (np.array float) the updated total running reward
"""
with tf.compat.v1.variable_scope("environment_info", reuse=True):
for env_idx in range(rewards.shape[0]):
dones_idx = np.sort(np.argwhere(masks[env_idx]))
if len(dones_idx) == 0:
rew_acc[env_idx] += sum(rewards[env_idx])
else:
rew_acc[env_idx] += sum(rewards[env_idx, :dones_idx[0, 0]])
summary = tf.compat.v1.Summary(value=[tf.compat.v1.Summary.Value(tag="episode_reward", simple_value=rew_acc[env_idx])])
writer.add_summary(summary, steps + dones_idx[0, 0])
for k in range(1, len(dones_idx[:, 0])):
rew_acc[env_idx] = sum(rewards[env_idx, dones_idx[k-1, 0]:dones_idx[k, 0]])
summary = tf.compat.v1.Summary(value=[tf.compat.v1.Summary.Value(tag="episode_reward", simple_value=rew_acc[env_idx])])
writer.add_summary(summary, steps + dones_idx[k, 0])
rew_acc[env_idx] = sum(rewards[env_idx, dones_idx[-1, 0]:])
return rew_acc |
py | 1a47694f3b30902d724091297842e225e2e44f8e | # Loads a target data then defines tables for it
spark.read \
.option("header", True) \
.csv("./testdata/adult.csv") \
.write \
.saveAsTable("adult")
delphi.misc \
.options({"db_name": "default", "table_name": "adult", "row_id": "tid"}) \
.flatten() \
.write \
.saveAsTable("adult_flatten")
spark.table("adult").show(1)
spark.table("adult_flatten").show(1)
# Loads a ground truth data then defines tables for it
spark.read \
.option("header", True) \
.csv("./testdata/adult_clean.csv") \
.write \
.saveAsTable("adult_clean")
spark.table("adult_flatten") \
.join(spark.table("adult_clean"), ["tid", "attribute"], "inner") \
.where("not(value <=> correct_val)") \
.write \
.saveAsTable("error_cells_ground_truth")
spark.table("adult_clean").show(1)
spark.table("error_cells_ground_truth").show(1)
# Detects error cells then repairs them
from repair.errors import NullErrorDetector, ConstraintErrorDetector
error_detectors = [
ConstraintErrorDetector(constraint_path="./testdata/adult_constraints.txt"),
NullErrorDetector()
]
repaired_df = delphi.repair \
.setDbName("default") \
.setTableName("adult") \
.setRowId("tid") \
.setErrorDetectors(error_detectors) \
.run()
# Computes performance numbers (precision & recall)
# - Precision: the fraction of correct repairs, i.e., repairs that match
# the ground truth, over the total number of repairs performed
# - Recall: correct repairs over the total number of errors
pdf = repaired_df.join(spark.table("adult_clean"), ["tid", "attribute"], "inner")
rdf = repaired_df.join(spark.table("error_cells_ground_truth"), ["tid", "attribute"], "right_outer")
# Compares predicted values with the correct ones
pdf.orderBy("attribute").show()
precision = pdf.where("repaired <=> correct_val").count() / pdf.count()
recall = rdf.where("repaired <=> correct_val").count() / rdf.count()
f1 = (2.0 * precision * recall) / (precision + recall)
print(f"Precision={precision} Recall={recall} F1={f1}")
|
py | 1a4769ce22ddf16648b5c7027a4c3910ac79a234 | # Copyright 2019 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for PSD kernel linop."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_util as tfp_test_util
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
tfpk = tfp.math.psd_kernels
def skip_if_no_xla(skip_test_fn):
try:
tf.function(lambda: tf.constant(0), experimental_compile=True)()
except tf.errors.UnimplementedError as e:
if 'Could not find compiler' in str(e):
skip_test_fn('XLA not available')
@test_util.run_all_in_graph_and_eager_modes
class LinearOperatorPSDKernelTest(tfp_test_util.TestCase):
"""Tests for tfp.experimental.linalg.LinearOperatorPSDKernel."""
def test_shape(self):
kernel = tfpk.ExponentiatedQuadratic(
amplitude=tf.random.uniform([17, 1, 1]),
feature_ndims=2)
x1 = tf.random.normal([1, 11, 5, 2, 13])
x2 = tf.random.normal([7, 1, 3, 2, 13])
linop = tfp.experimental.linalg.LinearOperatorPSDKernel(kernel, x1, x2)
self.assertAllEqual((17, 7, 11, 5, 3), linop.shape)
self.assertAllEqual((17, 7, 11), linop.batch_shape)
def test_diag_part(self):
kernel = tfpk.ExponentiatedQuadratic()
x1 = tf.random.normal([7, 3, 5, 2]) # square matrix 5x5
linop = tfp.experimental.linalg.LinearOperatorPSDKernel(kernel, x1)
expected, actual = self.evaluate([
tf.linalg.diag_part(kernel.matrix(x1, x1)),
linop.diag_part()
])
self.assertAllClose(expected, actual)
x2 = tf.random.normal([3, 11, 2]) # wide matrix 5x11
linop = tfp.experimental.linalg.LinearOperatorPSDKernel(kernel, x1, x2)
expected, actual = self.evaluate([
tf.linalg.diag_part(kernel.matrix(x1, x2)),
linop.diag_part()
])
self.assertAllClose(expected, actual)
x2 = tf.random.normal([2, 2]) # tall matrix 5x2
linop = tfp.experimental.linalg.LinearOperatorPSDKernel(kernel, x1, x2)
expected, actual = self.evaluate([
tf.linalg.diag_part(kernel.matrix(x1, x2)),
linop.diag_part()
])
self.assertAllClose(expected, actual)
def test_diag_part_xla(self):
skip_if_no_xla(self.skipTest)
if not tf.executing_eagerly(): return # experimental_compile is eager-only.
kernel = tfpk.ExponentiatedQuadratic()
x1 = tf.random.normal([7, 3, 5, 2]) # square matrix 5x5
linop = tfp.experimental.linalg.LinearOperatorPSDKernel(kernel, x1)
expected, actual = self.evaluate([
tf.linalg.diag_part(kernel.matrix(x1, x1)),
tf.function(linop.diag_part, experimental_compile=True)()
])
self.assertAllClose(expected, actual)
x2 = tf.random.normal([3, 11, 2]) # wide matrix 5x11
linop = tfp.experimental.linalg.LinearOperatorPSDKernel(kernel, x1, x2)
expected, actual = self.evaluate([
tf.linalg.diag_part(kernel.matrix(x1, x2)),
tf.function(linop.diag_part, experimental_compile=True)()
])
self.assertAllClose(expected, actual)
x2 = tf.random.normal([2, 2]) # tall matrix 5x2
linop = tfp.experimental.linalg.LinearOperatorPSDKernel(kernel, x1, x2)
expected, actual = self.evaluate([
tf.linalg.diag_part(kernel.matrix(x1, x2)),
tf.function(linop.diag_part, experimental_compile=True)()
])
self.assertAllClose(expected, actual)
def test_row_scalar(self):
kernel = tfpk.ExponentiatedQuadratic()
x1 = tf.random.normal([5, 2])
x2 = tf.random.normal([7, 3, 5, 2])
linop = tfp.experimental.linalg.LinearOperatorPSDKernel(kernel, x1, x2)
i = np.random.randint(0, 5)
expected, actual = self.evaluate(
[kernel.matrix(x1, x2)[..., i, :], linop.row(i)])
self.assertAllClose(expected, actual)
def test_row_batch(self):
kernel = tfpk.ExponentiatedQuadratic()
x1 = tf.random.normal([7, 1, 5, 2])
x2 = tf.random.normal([1, 3, 4, 2])
linop = tfp.experimental.linalg.LinearOperatorPSDKernel(kernel, x1, x2)
i = np.random.randint(0, 5, size=(7, 3))
cov = kernel.matrix(x1, x2)
expected, actual = self.evaluate([
tf.gather(cov, i[..., tf.newaxis], batch_dims=2)[..., 0, :],
linop.row(i)
])
self.assertAllClose(expected, actual)
def test_col_scalar(self):
kernel = tfpk.ExponentiatedQuadratic()
x1 = tf.random.normal([5, 2])
x2 = tf.random.normal([7, 3, 5, 2])
linop = tfp.experimental.linalg.LinearOperatorPSDKernel(kernel, x1, x2)
j = np.random.randint(0, 5)
expected, actual = self.evaluate(
[kernel.matrix(x1, x2)[..., j], linop.col(j)])
self.assertAllClose(expected, actual)
def test_col_batch(self):
kernel = tfpk.ExponentiatedQuadratic()
x1 = tf.random.normal([3, 5, 2])
x2 = tf.random.normal([7, 1, 4, 2])
linop = tfp.experimental.linalg.LinearOperatorPSDKernel(kernel, x1, x2)
j = np.random.randint(0, 4, size=(7, 3))
cov = kernel.matrix(x1, x2)
transpose = tf.linalg.matrix_transpose
# Gather with batch_dims wants all the batch dims adjacent and leading, so
# transpose-gather-transpose is easier to write than injecting a
# range(nrows) column into the gather indices.
expected, actual = self.evaluate([
transpose(tf.gather(transpose(cov), j[..., tf.newaxis], batch_dims=2)
)[..., 0],
linop.col(j)
])
self.assertAllClose(expected, actual)
def test_matmul(self):
kernel = tfpk.ExponentiatedQuadratic()
x1 = tf.random.normal([3, 2, 11])
x2 = tf.random.normal([5, 1, 4, 11])
linop = tfp.experimental.linalg.LinearOperatorPSDKernel(kernel, x1, x2)
cov = kernel.matrix(x1, x2)
x = tf.random.normal([4, 3])
expected, actual = self.evaluate([tf.matmul(cov, x), linop.matmul(x)])
self.assertAllClose(expected, actual)
def test_matmul_chunked(self):
kernel = tfpk.ExponentiatedQuadratic()
x1 = tf.random.normal([3, 2, 11])
x2 = tf.random.normal([5, 1, 14, 11])
linop = tfp.experimental.linalg.LinearOperatorPSDKernel(kernel, x1, x2,
num_matmul_parts=7)
cov = kernel.matrix(x1, x2)
x = tf.random.normal([14, 3])
expected, actual = self.evaluate([tf.matmul(cov, x), linop.matmul(x)])
self.assertAllClose(expected, actual)
@parameterized.named_parameters(
(dict(testcase_name='_{}chunk'.format(n), nchunks=n) for n in (2, 5)))
def test_matmul_chunked_with_remainder(self, nchunks):
kernel = tfpk.ExponentiatedQuadratic()
x1 = tf.random.normal([3, 2, 11])
x2 = tf.random.normal([5, 1, 17, 11])
linop = tfp.experimental.linalg.LinearOperatorPSDKernel(
kernel, x1, x2, num_matmul_parts=nchunks)
cov = kernel.matrix(x1, x2)
x = tf.random.normal([17, 3])
expected, actual = self.evaluate([tf.matmul(cov, x), linop.matmul(x)])
self.assertAllClose(expected, actual)
def test_matmul_chunked_grad(self):
kernel = tfpk.ExponentiatedQuadratic()
x1 = tf.random.normal([5, 3])
x2 = tf.random.normal([7, 3])
linop = tfp.experimental.linalg.LinearOperatorPSDKernel(kernel, x1, x2,
num_matmul_parts=3)
x = tf.random.normal([7, 2])
with tf.GradientTape() as tape:
tape.watch((x1, x2, x))
y = linop.matmul(x)
out_grad = tf.random.normal(tf.shape(y))
actuals = tape.gradient(y, (x1, x2, x), output_gradients=out_grad)
with tf.GradientTape() as tape:
tape.watch((x1, x2, x))
y = tf.matmul(kernel.matrix(x1, x2), x)
expecteds = tape.gradient(y, (x1, x2, x), output_gradients=out_grad)
expecteds, actuals = self.evaluate([expecteds, actuals])
self.assertEqual(len(expecteds), len(actuals))
for expected, actual in zip(expecteds, actuals):
self.assertAllClose(expected, actual)
def test_matmul_xla(self):
skip_if_no_xla(self.skipTest)
if not tf.executing_eagerly(): return # experimental_compile is eager-only.
kernel = tfpk.ExponentiatedQuadratic()
x1 = tf.random.normal([5, 3])
x2 = tf.random.normal([7, 3])
linop = tfp.experimental.linalg.LinearOperatorPSDKernel(
kernel, x1, x2, num_matmul_parts=3)
x = tf.random.normal([7, 2])
@tf.function(experimental_compile=True)
def f():
return linop.matmul(x)
actual = f()
expected = tf.matmul(kernel.matrix(x1, x2), x)
expected, actual = self.evaluate([expected, actual])
self.assertAllClose(expected, actual)
def test_matmul_grad_xla(self):
skip_if_no_xla(self.skipTest)
if not tf.executing_eagerly(): return # experimental_compile is eager-only.
kernel = tfpk.ExponentiatedQuadratic()
x1 = tf.random.normal([5, 3])
x2 = tf.random.normal([7, 3])
linop = tfp.experimental.linalg.LinearOperatorPSDKernel(
kernel, x1, x2, num_matmul_parts=3)
x = tf.random.normal([7, 2])
@tf.function(experimental_compile=True)
def f():
with tf.GradientTape() as tape:
tape.watch((x1, x2, x))
y = linop.matmul(x)
out_grad = tf.random.normal(tf.shape(y))
actuals = tape.gradient(y, (x1, x2, x), output_gradients=out_grad)
return y, actuals, out_grad
y, actuals, out_grad = f()
with tf.GradientTape() as tape:
tape.watch((x1, x2, x))
y = tf.matmul(kernel.matrix(x1, x2), x)
expecteds = tape.gradient(y, (x1, x2, x), output_gradients=out_grad)
expecteds, actuals = self.evaluate([expecteds, actuals])
self.assertEqual(len(expecteds), len(actuals))
for expected, actual in zip(expecteds, actuals):
self.assertAllClose(expected, actual)
def test_matmul_grad_xla_kernelparams(self):
skip_if_no_xla(self.skipTest)
if not tf.executing_eagerly(): return # experimental_compile is eager-only.
feature_dim = 3
def kernel_fn(eq_params, poly_params):
return (tfpk.ExponentiatedQuadratic(**eq_params) *
tfpk.Polynomial(**poly_params))
kernel_args = (
dict(length_scale=tf.random.uniform([], .5, 1.5, dtype=tf.float64),
amplitude=tf.random.uniform([], 1.5, 2.5, dtype=tf.float64)),
dict(bias_variance=tf.random.uniform([feature_dim], .5, 1.5,
dtype=tf.float64),
shift=tf.random.normal([feature_dim], dtype=tf.float64)))
x1 = tf.random.normal([5, feature_dim], dtype=tf.float64)
x2 = tf.random.normal([7, feature_dim], dtype=tf.float64)
linop = tfp.experimental.linalg.LinearOperatorPSDKernel(
kernel_fn, x1, x2, kernel_args=kernel_args, num_matmul_parts=3)
x = tf.random.normal([7, 2], dtype=tf.float64)
@tf.function(experimental_compile=True)
def f():
with tf.GradientTape() as tape:
tape.watch((x1, x2, x, kernel_args))
y = linop.matmul(x)
out_grad = tf.random.normal(tf.shape(y), dtype=tf.float64)
actuals = tape.gradient(y, (x1, x2, x, kernel_args),
output_gradients=out_grad)
return y, actuals, out_grad
y, actuals, out_grad = f()
with tf.GradientTape() as tape:
tape.watch((x1, x2, x, kernel_args))
y = tf.matmul(kernel_fn(*kernel_args).matrix(x1, x2), x)
expecteds = tape.gradient(y, (x1, x2, x, kernel_args),
output_gradients=out_grad)
expecteds, actuals = self.evaluate([expecteds, actuals])
tf.nest.assert_same_structure(expecteds, actuals)
for expected, actual in zip(tf.nest.flatten(expecteds),
tf.nest.flatten(actuals)):
self.assertAllClose(expected, actual)
if __name__ == '__main__':
tf.enable_v2_behavior()
tf.test.main()
|
py | 1a476a0c6a6b55972bc23cb608fcb01f9191c85f | import os
from .Backend import Backend
class TextFile(Backend):
def __init__(self, filename):
self.filename = filename
i = 1
while os.path.exists(self.filename):
i += 1
self.filename = "%s_%d" % (filename, i)
self.f = open(filename, 'w')
self.last_route = ""
def write(self, route, attribute, value):
if route != self.last_route:
self.f.write(str(route) + "\n")
self.last_route = route
try:
self.f.write("\t%s : %s\n" % (str(attribute), str(value.__dict__)))
except Exception:
self.f.write("\t%s : %s\n" % (str(attribute), str(value)))
def __del__(self):
self.f.close()
|
py | 1a476a44af7a9ae6178e5773e0909268c56c7085 | # Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import copy
import io
import multiprocessing
import os
import re
import stat
import subprocess
import sys
try:
from catkin_pkg.cmake import configure_file, get_metapackage_cmake_template_path
from catkin_pkg.packages import find_packages
from catkin_pkg.topological_order import topological_order_packages
except ImportError as e:
sys.exit(
'ImportError: "from catkin_pkg.topological_order import '
'topological_order" failed: %s\nMake sure that you have installed '
'"catkin_pkg", it is up to date and on the PYTHONPATH.' % e
)
from catkin.cmake import get_cmake_path
from catkin.terminal_color import ansi, disable_ANSI_colors, fmt, sanitize
def split_arguments(args, splitter_name, default=None):
if splitter_name not in args:
return args, default
index = args.index(splitter_name)
return args[0:index], args[index + 1:]
def extract_cmake_and_make_arguments(args):
args, cmake_args, make_args, _ = _extract_cmake_and_make_arguments(args, extract_catkin_make=False)
return args, cmake_args, make_args
def extract_cmake_and_make_and_catkin_make_arguments(args):
return _extract_cmake_and_make_arguments(args, extract_catkin_make=True)
def _extract_cmake_and_make_arguments(args, extract_catkin_make):
cmake_args = []
make_args = []
catkin_make_args = []
arg_types = {
'--cmake-args': cmake_args,
'--make-args': make_args
}
if extract_catkin_make:
arg_types['--catkin-make-args'] = catkin_make_args
arg_indexes = {}
for k in arg_types.keys():
if k in args:
arg_indexes[args.index(k)] = k
for index in reversed(sorted(arg_indexes.keys())):
arg_type = arg_indexes[index]
args, specific_args = split_arguments(args, arg_type)
arg_types[arg_type].extend(specific_args)
# classify -D* and -G* arguments as cmake specific arguments
implicit_cmake_args = [a for a in args if a.startswith('-D') or a.startswith('-G')]
args = [a for a in args if a not in implicit_cmake_args]
return args, implicit_cmake_args + cmake_args, make_args, catkin_make_args
def cprint(msg, end=None):
print(fmt(msg), end=end)
def colorize_line(line):
cline = sanitize(line)
cline = cline.replace(
'-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~',
'-- @{pf}~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~@|'
)
if line.startswith('-- ~~'):
# -- ~~ -
cline = cline.replace('~~ ', '@{pf}~~ @|')
cline = cline.replace(' - ', ' - @!@{bf}')
cline = cline.replace('(', '@|(')
cline = cline.replace('(plain cmake)', '@|(@{rf}plain cmake@|)')
cline = cline.replace('(unknown)', '@|(@{yf}unknown@|)')
if line.startswith('-- +++'):
# -- +++ add_subdirectory(package)
cline = cline.replace('+++', '@!@{gf}+++@|')
cline = cline.replace('kin package: \'', 'kin package: \'@!@{bf}')
cline = cline.replace(')', '@|)')
cline = cline.replace('\'\n', '@|\'\n')
cline = cline.replace('cmake package: \'', 'cmake package: \'@!@{bf}')
cline = cline.replace('\'\n', '@|\'\n')
if line.startswith('-- ==>'):
cline = cline.replace('-- ==>', '-- @!@{bf}==>@|')
if line.lower().startswith('warning'):
# WARNING
cline = ansi('yf') + cline
if line.startswith('CMake Warning'):
# CMake Warning...
cline = cline.replace('CMake Warning', '@{yf}@!CMake Warning@|')
if line.startswith('ERROR:'):
# ERROR:
cline = cline.replace('ERROR:', '@!@{rf}ERROR:@|')
if line.startswith('CMake Error'):
# CMake Error...
cline = cline.replace('CMake Error', '@{rf}@!CMake Error@|')
if line.startswith('Call Stack (most recent call first):'):
# CMake Call Stack
cline = cline.replace('Call Stack (most recent call first):',
'@{cf}@_Call Stack (most recent call first):@|')
return fmt(cline)
def print_command_banner(cmd, cwd, color):
if color:
# Prepare for printing
cmd_str = sanitize(' '.join(cmd))
cwd_str = sanitize(cwd)
# Print command notice
cprint('@{bf}####')
cprint('@{bf}#### Running command: @!"%s"@|@{bf} in @!"%s"' % (cmd_str, cwd_str))
cprint('@{bf}####')
else:
print('####')
print('#### Running command: "%s" in "%s"' % (' '.join(cmd), cwd))
print('####')
def run_command_colorized(cmd, cwd, quiet=False):
run_command(cmd, cwd, quiet=quiet, colorize=True)
def run_command(cmd, cwd, quiet=False, colorize=False):
capture = (quiet or colorize)
stdout_pipe = subprocess.PIPE if capture else None
stderr_pipe = subprocess.STDOUT if capture else None
try:
proc = subprocess.Popen(
cmd, cwd=cwd, shell=False,
stdout=stdout_pipe, stderr=stderr_pipe
)
except OSError as e:
raise OSError("Failed command '%s': %s" % (cmd, e))
out = io.StringIO() if quiet else sys.stdout
if capture:
while True:
line = unicode(proc.stdout.readline())
if proc.returncode is not None or not line:
break
try:
line = colorize_line(line) if colorize else line
except Exception as e:
import traceback
traceback.print_exc()
print('<caktin_make> color formatting problem: ' + str(e),
file=sys.stderr)
out.write(line)
proc.wait()
if proc.returncode:
if quiet:
print(out.getvalue())
raise subprocess.CalledProcessError(proc.returncode, ' '.join(cmd))
return out.getvalue() if quiet else ''
blue_arrow = '@!@{bf}==>@|@!'
def _check_build_dir(name, workspace, buildspace):
package_build_dir = os.path.join(buildspace, name)
if not os.path.exists(package_build_dir):
cprint(
blue_arrow + ' Creating build directory: \'' +
os.path.relpath(package_build_dir, workspace) + '\'@|'
)
os.mkdir(package_build_dir)
return package_build_dir
def isolation_print_command(cmd, path=None):
cprint(
blue_arrow + " " + sanitize(cmd) + "@|" +
(" @!@{kf}in@| '@!" + sanitize(path) + "@|'" if path else '')
)
def get_python_install_dir():
# this function returns the same value as the CMake variable PYTHON_INSTALL_DIR from catkin/cmake/python.cmake
python_install_dir = 'lib'
if os.name != 'nt':
python_version_xdoty = str(sys.version_info[0]) + '.' + str(sys.version_info[1])
python_install_dir = os.path.join(python_install_dir, 'python' + python_version_xdoty)
python_use_debian_layout = os.path.exists('/etc/debian_version')
python_packages_dir = 'dist-packages' if python_use_debian_layout else 'site-packages'
python_install_dir = os.path.join(python_install_dir, python_packages_dir)
return python_install_dir
def handle_make_arguments(input_make_args, force_single_threaded_when_running_tests=False):
make_args = list(input_make_args)
if force_single_threaded_when_running_tests:
# force single threaded execution when running test since rostest does not support multiple parallel runs
run_tests = [a for a in make_args if a.startswith('run_tests')]
if run_tests:
print('Forcing "-j1" for running unit tests.')
make_args.append('-j1')
# If no -j/--jobs/-l/--load-average flags are in make_args
if not extract_jobs_flags(' '.join(make_args)):
# If -j/--jobs/-l/--load-average are in MAKEFLAGS
if 'MAKEFLAGS' in os.environ and extract_jobs_flags(os.environ['MAKEFLAGS']):
# Do not extend make arguments, let MAKEFLAGS set things
pass
else:
# Else extend the make_arguments to include some jobs flags
# If ROS_PARALLEL_JOBS is set use those flags
if 'ROS_PARALLEL_JOBS' in os.environ:
# ROS_PARALLEL_JOBS is a set of make variables, not just a number
ros_parallel_jobs = os.environ['ROS_PARALLEL_JOBS']
make_args.extend(ros_parallel_jobs.split())
else:
# Else Use the number of CPU cores
try:
jobs = multiprocessing.cpu_count()
make_args.append('-j{0}'.format(jobs))
make_args.append('-l{0}'.format(jobs))
except NotImplementedError:
# If the number of cores cannot be determined, do not extend args
pass
return make_args
def extract_jobs_flags(mflags):
regex = r'(?:^|\s)(-?(?:j|l)(?:\s*[0-9]+|\s|$))' + \
r'|' + \
r'(?:^|\s)((?:--)?(?:jobs|load-average)(?:(?:=|\s+)[0-9]+|(?:\s|$)))'
matches = re.findall(regex, mflags) or []
matches = [m[0] or m[1] for m in matches]
return ' '.join([m.strip() for m in matches]) if matches else None
def build_catkin_package(
path, package,
workspace, buildspace, develspace, installspace,
install, force_cmake, quiet, last_env, cmake_args, make_args
):
cprint(
"Processing @{cf}catkin@| package: '@!@{bf}" +
package.name + "@|'"
)
# Make the build dir
build_dir = _check_build_dir(package.name, workspace, buildspace)
# Check last_env
if last_env is not None:
cprint(
blue_arrow + " Building with env: " +
"'{0}'".format(last_env)
)
# Check for Makefile and maybe call cmake
makefile = os.path.join(build_dir, 'Makefile')
if not os.path.exists(makefile) or force_cmake:
package_dir = os.path.dirname(package.filename)
if not os.path.exists(os.path.join(package_dir, 'CMakeLists.txt')):
export_tags = [e.tagname for e in package.exports]
if 'metapackage' not in export_tags:
print(colorize_line('Error: Package "%s" does not have a CMakeLists.txt file' % package.name))
sys.exit('Can not build catkin package without CMakeLists.txt file')
# generate CMakeLists.txt for metpackages without one
print(colorize_line('Warning: metapackage "%s" should have a CMakeLists.txt file' % package.name))
cmake_code = configure_file(
get_metapackage_cmake_template_path(),
{'name': package.name, 'metapackage_arguments': 'DIRECTORY "%s"' % package_dir})
cmakelists_txt = os.path.join(build_dir, 'CMakeLists.txt')
with open(cmakelists_txt, 'w') as f:
f.write(cmake_code)
package_dir = build_dir
# Run cmake
cmake_cmd = [
'cmake',
package_dir,
'-DCATKIN_DEVEL_PREFIX=' + develspace,
'-DCMAKE_INSTALL_PREFIX=' + installspace
]
cmake_cmd.extend(cmake_args)
isolation_print_command(' '.join(cmake_cmd))
if last_env is not None:
cmake_cmd = [last_env] + cmake_cmd
try:
run_command_colorized(cmake_cmd, build_dir, quiet)
except subprocess.CalledProcessError as e:
if os.path.exists(makefile):
# remove Makefile to force CMake invocation next time
os.remove(makefile)
raise
else:
print('Makefile exists, skipping explicit cmake invocation...')
# Check to see if cmake needs to be run via make
make_check_cmake_cmd = ['make', 'cmake_check_build_system']
isolation_print_command(' '.join(make_check_cmake_cmd), build_dir)
if last_env is not None:
make_check_cmake_cmd = [last_env] + make_check_cmake_cmd
run_command_colorized(
make_check_cmake_cmd, build_dir, quiet
)
# Run make
make_cmd = ['make']
make_cmd.extend(handle_make_arguments(make_args, force_single_threaded_when_running_tests=True))
isolation_print_command(' '.join(make_cmd), build_dir)
if last_env is not None:
make_cmd = [last_env] + make_cmd
run_command(make_cmd, build_dir, quiet)
# Make install
if install:
make_install_cmd = ['make', 'install']
isolation_print_command(' '.join(make_install_cmd), build_dir)
if last_env is not None:
make_install_cmd = [last_env] + make_install_cmd
run_command(make_install_cmd, build_dir, quiet)
def build_cmake_package(
path, package,
workspace, buildspace, develspace, installspace,
install, force_cmake, quiet, last_env, cmake_args, make_args
):
# Notify the user that we are processing a plain cmake package
cprint(
"Processing @{cf}plain cmake@| package: '@!@{bf}" + package.name +
"@|'"
)
# Make the build dir
build_dir = _check_build_dir(package.name, workspace, buildspace)
# Check last_env
if last_env is not None:
cprint(blue_arrow + " Building with env: " +
"'{0}'".format(last_env))
# Check for Makefile and maybe call cmake
makefile = os.path.join(build_dir, 'Makefile')
install_target = installspace if install else develspace
if not os.path.exists(makefile) or force_cmake:
# Call cmake
cmake_cmd = [
'cmake',
os.path.dirname(package.filename),
'-DCMAKE_INSTALL_PREFIX=' + install_target
]
cmake_cmd.extend(cmake_args)
isolation_print_command(' '.join(cmake_cmd))
if last_env is not None:
cmake_cmd = [last_env] + cmake_cmd
run_command_colorized(cmake_cmd, build_dir, quiet)
else:
print('Makefile exists, skipping explicit cmake invocation...')
# Check to see if cmake needs to be run via make
make_check_cmake_cmd = ['make', 'cmake_check_build_system']
isolation_print_command(' '.join(make_check_cmake_cmd), build_dir)
if last_env is not None:
make_check_cmake_cmd = [last_env] + make_check_cmake_cmd
run_command_colorized(
make_check_cmake_cmd, build_dir, quiet
)
# Run make
make_cmd = ['make']
make_cmd.extend(handle_make_arguments(make_args))
isolation_print_command(' '.join(make_cmd), build_dir)
if last_env is not None:
make_cmd = [last_env] + make_cmd
run_command(make_cmd, build_dir, quiet)
# Make install
make_install_cmd = ['make', 'install']
isolation_print_command(' '.join(make_install_cmd), build_dir)
if last_env is not None:
make_install_cmd = [last_env] + make_install_cmd
run_command(make_install_cmd, build_dir, quiet)
# If we are installing, and a env.sh exists, don't overwrite it
if install and os.path.exists(os.path.join(installspace, 'env.sh')):
return
cprint(blue_arrow + " Generating an env.sh")
# Generate env.sh for chaining to catkin packages
new_env_path = os.path.join(install_target, 'env.sh')
variables = {
'SETUP_DIR': install_target,
'SETUP_FILENAME': 'setup'
}
with open(os.path.join(new_env_path), 'w') as f:
f.write(configure_file(os.path.join(get_cmake_path(), 'templates', 'env.sh.in'), variables))
os.chmod(new_env_path, stat.S_IXUSR | stat.S_IWUSR | stat.S_IRUSR)
# Generate setup.sh for chaining to catkin packages
new_setup_path = os.path.join(install_target, 'setup.sh')
subs = {}
subs['cmake_prefix_path'] = install_target + ":"
subs['ld_path'] = os.path.join(install_target, 'lib') + ":"
pythonpath = os.path.join(install_target, get_python_install_dir())
subs['pythonpath'] = pythonpath + ':'
subs['pkgcfg_path'] = os.path.join(install_target, 'lib', 'pkgconfig')
subs['pkgcfg_path'] += ":"
subs['path'] = os.path.join(install_target, 'bin') + ":"
if not os.path.exists(install_target):
os.mkdir(install_target)
with open(new_setup_path, 'w+') as file_handle:
file_handle.write("""\
#!/usr/bin/env sh
# generated from catkin.builder module
""")
if last_env is not None:
last_setup_env = os.path.join(os.path.dirname(last_env), 'setup.sh')
file_handle.write('. %s\n\n' % last_setup_env)
file_handle.write("""\
# detect if running on Darwin platform
UNAME=`which uname`
UNAME=`$UNAME`
IS_DARWIN=0
if [ "$UNAME" = "Darwin" ]; then
IS_DARWIN=1
fi
# Prepend to the environment
export CMAKE_PREFIX_PATH="{cmake_prefix_path}$CMAKE_PREFIX_PATH"
if [ $IS_DARWIN -eq 0 ]; then
export LD_LIBRARY_PATH="{ld_path}$LD_LIBRARY_PATH"
else
export DYLD_LIBRARY_PATH="{ld_path}$DYLD_LIBRARY_PATH"
fi
export PATH="{path}$PATH"
export PKG_CONFIG_PATH="{pkgcfg_path}$PKG_CONFIG_PATH"
export PYTHONPATH="{pythonpath}$PYTHONPATH"
""".format(**subs))
def build_package(
path, package,
workspace, buildspace, develspace, installspace,
install, force_cmake, quiet, last_env, cmake_args, make_args, catkin_make_args,
number=None, of=None
):
cprint('@!@{gf}==>@| ', end='')
new_last_env = get_new_env(package, develspace, installspace, install, last_env)
build_type = _get_build_type(package)
if build_type == 'catkin':
build_catkin_package(
path, package,
workspace, buildspace, develspace, installspace,
install, force_cmake, quiet, last_env, cmake_args, make_args + catkin_make_args
)
if not os.path.exists(new_last_env):
raise RuntimeError(
"No env.sh file generated at: '" + new_last_env +
"'\n This sometimes occurs when a non-catkin package is "
"interpreted as a catkin package.\n This can also occur "
"when the cmake cache is stale, try --force-cmake."
)
elif build_type == 'cmake':
build_cmake_package(
path, package,
workspace, buildspace, develspace, installspace,
install, force_cmake, quiet, last_env, cmake_args, make_args
)
else:
sys.exit('Can not build package with unknown build_type')
if number is not None and of is not None:
msg = ' [@{gf}@!' + str(number) + '@| of @!@{gf}' + str(of) + '@|]'
else:
msg = ''
cprint('@{gf}<==@| Finished processing package' + msg + ': \'@{bf}@!' +
package.name + '@|\'')
return new_last_env
def get_new_env(package, develspace, installspace, install, last_env):
new_env = None
build_type = _get_build_type(package)
if build_type in ['catkin', 'cmake']:
new_env = os.path.join(
installspace if install else develspace,
'env.sh'
)
return new_env
def _get_build_type(package):
build_type = 'catkin'
if 'build_type' in [e.tagname for e in package.exports]:
build_type = [e.content for e in package.exports if e.tagname == 'build_type'][0]
return build_type
def build_workspace_isolated(
workspace='.',
sourcespace=None,
buildspace=None,
develspace=None,
installspace=None,
merge=False,
install=False,
force_cmake=False,
colorize=True,
build_packages=None,
quiet=False,
cmake_args=None,
make_args=None,
catkin_make_args=None
):
'''
Runs ``cmake``, ``make`` and optionally ``make install`` for all
catkin packages in sourcespace_dir. It creates several folders
in the current working directory. For non-catkin packages it runs
``cmake``, ``make`` and ``make install`` for each, installing it to
the devel space or install space if the ``install`` option is specified.
:param workspace: path to the current workspace, ``str``
:param sourcespace: workspace folder containing catkin packages, ``str``
:param buildspace: path to build space location, ``str``
:param develspace: path to devel space location, ``str``
:param installspace: path to install space (CMAKE_INSTALL_PREFIX), ``str``
:param merge: if True, build each catkin package into the same
devel space. does not work with non-catkin packages, ``bool``
:param install: if True, install all packages to the install space,
``bool``
:param force_cmake: (optional), if True calls cmake explicitly for each
package, ``bool``
:param colorize: if True, colorize cmake output and other messages,
``bool``
:param build_packages: specific packages to build (all parent packages
in the topological order must have been built before), ``str``
:param quiet: if True, hides some build output, ``bool``
:param cmake_args: additional arguments for cmake, ``[str]``
:param make_args: additional arguments for make, ``[str]``
:param catkin_make_args: additional arguments for make but only for catkin
packages, ``[str]``
'''
if not colorize:
disable_ANSI_colors()
# Check workspace existance
if not os.path.exists(workspace):
sys.exit("Workspace path '{0}' does not exist.".format(workspace))
workspace = os.path.abspath(workspace)
# Check source space existance
if sourcespace is None:
ws_sourcespace = os.path.join(workspace, 'src')
if not os.path.exists(ws_sourcespace):
sys.exit("Could not find source space: {0}".format(sourcespace))
sourcespace = ws_sourcespace
sourcespace = os.path.abspath(sourcespace)
print('Base path: ' + str(workspace))
print('Source space: ' + str(sourcespace))
# Check build space
if buildspace is None:
buildspace = os.path.join(workspace, 'build_isolated')
buildspace = os.path.abspath(buildspace)
if not os.path.exists(buildspace):
os.mkdir(buildspace)
print('Build space: ' + str(buildspace))
# Check devel space
if develspace is None:
develspace = os.path.join(workspace, 'devel_isolated')
develspace = os.path.abspath(develspace)
print('Devel space: ' + str(develspace))
# Check install space
if installspace is None:
installspace = os.path.join(workspace, 'install_isolated')
installspace = os.path.abspath(installspace)
print('Install space: ' + str(installspace))
if cmake_args:
print("Additional CMake Arguments: " + " ".join(cmake_args))
else:
cmake_args = []
if make_args:
print("Additional make Arguments: " + " ".join(make_args))
else:
make_args = []
if catkin_make_args:
print("Additional make Arguments for catkin packages: " + " ".join(catkin_make_args))
else:
catkin_make_args = []
# Find packages
packages = find_packages(sourcespace, exclude_subspaces=True)
if not packages:
print(fmt("@{yf}No packages found in source space: %s@|" % sourcespace))
# verify that specified package exists in workspace
if build_packages:
packages_by_name = {p.name: path for path, p in packages.iteritems()}
unknown_packages = [p for p in build_packages if p not in packages_by_name]
if unknown_packages:
sys.exit('Packages not found in the workspace: %s' % ', '.join(unknown_packages))
# Report topological ordering
ordered_packages = topological_order_packages(packages)
unknown_build_types = []
msg = []
msg.append('@{pf}~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' + ('~' * len(str(len(ordered_packages)))))
msg.append('@{pf}~~@| traversing %d packages in topological order:' % len(ordered_packages))
for path, package in ordered_packages:
export_tags = [e.tagname for e in package.exports]
if 'build_type' in export_tags:
build_type_tag = [e.content for e in package.exports if e.tagname == 'build_type'][0]
else:
build_type_tag = 'catkin'
if build_type_tag == 'catkin':
msg.append('@{pf}~~@| - @!@{bf}' + package.name + '@|')
elif build_type_tag == 'cmake':
msg.append(
'@{pf}~~@| - @!@{bf}' + package.name + '@|' +
' (@!@{cf}plain cmake@|)'
)
else:
msg.append(
'@{pf}~~@| - @!@{bf}' + package.name + '@|' +
' (@{rf}unknown@|)'
)
unknown_build_types.append(package)
msg.append('@{pf}~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' + ('~' * len(str(len(ordered_packages)))))
for index in range(len(msg)):
msg[index] = fmt(msg[index])
print('\n'.join(msg))
# Error if there are packages with unknown build_types
if unknown_build_types:
print(colorize_line('Error: Packages with unknown build types exist'))
sys.exit('Can not build workspace with packages of unknown build_type')
# Check to see if the workspace has changed
if not force_cmake:
force_cmake, install_toggled = cmake_input_changed(
packages,
buildspace,
install=install,
cmake_args=cmake_args,
filename='catkin_make_isolated'
)
if force_cmake:
print('The packages or cmake arguments have changed, forcing cmake invocation')
elif install_toggled:
print('The install argument has been toggled, forcing cmake invocation on plain cmake package')
# Build packages
pkg_develspace = None
last_env = None
for index, path_package in enumerate(ordered_packages):
path, package = path_package
if merge:
pkg_develspace = develspace
else:
pkg_develspace = os.path.join(develspace, package.name)
if not build_packages or package.name in build_packages:
try:
export_tags = [e.tagname for e in package.exports]
is_cmake_package = 'cmake' in [e.content for e in package.exports if e.tagname == 'build_type']
last_env = build_package(
path, package,
workspace, buildspace, pkg_develspace, installspace,
install, force_cmake or (install_toggled and is_cmake_package),
quiet, last_env, cmake_args, make_args, catkin_make_args,
number=index + 1, of=len(ordered_packages)
)
except Exception as e:
import traceback
traceback.print_exc()
cprint(
'@{rf}@!<==@| ' +
'Failed to process package \'@!@{bf}' +
package.name + '@|\': \n ' +
('KeyboardInterrupt' if isinstance(e, KeyboardInterrupt)
else str(e))
)
if isinstance(e, subprocess.CalledProcessError):
cmd = ' '.join(e.cmd) if isinstance(e.cmd, list) else e.cmd
print(fmt("\n@{rf}Reproduce this error by running:"))
print(fmt("@{gf}@!==> @|") + cmd + "\n")
sys.exit('Command failed, exiting.')
else:
cprint("Skipping package: '@!@{bf}" + package.name + "@|'")
last_env = get_new_env(package, pkg_develspace, installspace, install, last_env)
# Provide a top level devel space environment setup script
if not os.path.exists(develspace):
os.makedirs(develspace)
if not build_packages:
generated_env_sh = os.path.join(develspace, 'env.sh')
generated_setup_sh = os.path.join(develspace, 'setup.sh')
generated_setup_util_py = os.path.join(develspace, '_setup_util.py')
if not merge and pkg_develspace:
# generate env.sh and setup.sh which relay to last devel space
with open(generated_env_sh, 'w') as f:
f.write("""\
#!/usr/bin/env sh
# generated from catkin.builder module
{0} "$@"
""".format(os.path.join(pkg_develspace, 'env.sh')))
os.chmod(generated_env_sh, stat.S_IXUSR | stat.S_IWUSR | stat.S_IRUSR)
with open(generated_setup_sh, 'w') as f:
f.write("""\
#!/usr/bin/env sh
# generated from catkin.builder module
. "{0}/setup.sh"
""".format(pkg_develspace))
elif not pkg_develspace:
# generate env.sh and setup.sh for an empty devel space
if 'CMAKE_PREFIX_PATH' in os.environ.keys():
variables = {
'CATKIN_GLOBAL_BIN_DESTINATION': 'bin',
'CATKIN_GLOBAL_LIB_DESTINATION': 'lib',
'CMAKE_PREFIX_PATH_AS_IS': ';'.join(os.environ['CMAKE_PREFIX_PATH'].split(os.pathsep)),
'PYTHON_INSTALL_DIR': get_python_install_dir(),
'SETUP_DIR': '',
}
with open(generated_setup_util_py, 'w') as f:
f.write(configure_file(os.path.join(get_cmake_path(), 'templates', '_setup_util.py.in'), variables))
os.chmod(generated_setup_util_py, stat.S_IXUSR | stat.S_IWUSR | stat.S_IRUSR)
else:
sys.exit("Unable to process CMAKE_PREFIX_PATH from environment. Cannot generate environment files.")
variables = {
'SETUP_DIR': develspace,
'SETUP_FILENAME': 'setup'
}
with open(generated_env_sh, 'w') as f:
f.write(configure_file(os.path.join(get_cmake_path(), 'templates', 'env.sh.in'), variables))
os.chmod(generated_env_sh, stat.S_IXUSR | stat.S_IWUSR | stat.S_IRUSR)
variables = {'SETUP_DIR': develspace}
with open(generated_setup_sh, 'w') as f:
f.write(configure_file(os.path.join(get_cmake_path(), 'templates', 'setup.sh.in'), variables))
if not merge and pkg_develspace:
# remove _setup_util.py file which might have been generated for an empty
if os.path.exists(generated_setup_util_py):
os.remove(generated_setup_util_py)
if not merge or not pkg_develspace:
# generate setup.bash and setup.zsh for convenience
variables = {'SETUP_DIR': develspace}
with open(os.path.join(develspace, 'setup.bash'), 'w') as f:
f.write(configure_file(os.path.join(get_cmake_path(), 'templates', 'setup.bash.in'), variables))
with open(os.path.join(develspace, 'setup.zsh'), 'w') as f:
f.write(configure_file(os.path.join(get_cmake_path(), 'templates', 'setup.zsh.in'), variables))
def cmake_input_changed(packages, build_path, install=None, cmake_args=None, filename='catkin_make'):
# get current input
package_paths = os.pathsep.join(sorted(packages.keys()))
cmake_args = ' '.join(cmake_args) if cmake_args else ''
# file to store current input
changed = False
install_toggled = False
input_filename = os.path.join(build_path, '%s.cache' % filename)
if not os.path.exists(input_filename):
changed = True
else:
# compare with previously stored input
with open(input_filename, 'r') as f:
previous_package_paths = f.readline().rstrip()
previous_cmake_args = f.readline().rstrip()
previous_install = f.readline().rstrip() == str(True)
if package_paths != previous_package_paths:
changed = True
if cmake_args != previous_cmake_args:
changed = True
if install is not None and install != previous_install:
install_toggled = True
# store current input for next invocation
with open(input_filename, 'w') as f:
f.write('%s\n%s\n%s' % (package_paths, cmake_args, install))
return changed, install_toggled
|
py | 1a476a6215b71c7c78dd49148d2890f190c78842 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Unit tests for gluon.contenttype
"""
import unittest
from .fix_path import fix_sys_path
fix_sys_path(__file__)
from gluon.contenttype import contenttype
from gluon._compat import iteritems
class TestContentType(unittest.TestCase):
def testTypeRecognition(self):
rtn = contenttype('.png')
self.assertEqual(rtn, 'image/png')
rtn = contenttype('.gif')
self.assertEqual(rtn, 'image/gif')
rtn = contenttype('.tar.bz2')
self.assertEqual(rtn, 'application/x-bzip-compressed-tar')
# test overrides and additions
mapping = {
'.load': 'text/html; charset=utf-8',
'.json': 'application/json',
'.jsonp': 'application/jsonp',
'.pickle': 'application/python-pickle',
'.w2p': 'application/w2p',
'.md': 'text/x-markdown; charset=utf-8'
}
for k, v in iteritems(mapping):
self.assertEqual(contenttype(k), v)
# test without dot extension
rtn = contenttype('png')
self.assertEqual(rtn, 'text/plain; charset=utf-8')
if __name__ == '__main__':
unittest.main()
|
py | 1a476bb37b3af4f400d6f38c70dbdc156ed690b6 | # %%
from numpy import array, matrix, zeros, empty, delete, insert, matmul, divide, add, subtract
from numpy import nanmax, seterr, shape
from numpy.linalg import solve
from scipy.sparse.linalg import spsolve
from scipy.sparse import csc_matrix
from math import isclose
from PyNite.Node3D import Node3D
from PyNite.Spring3D import Spring3D
from PyNite.Member3D import Member3D
from PyNite.Quad3D import Quad3D
from PyNite.Plate3D import Plate3D
from PyNite.LoadCombo import LoadCombo
# %%
class FEModel3D():
'''
A class representing a 3D finite element model.
'''
#%%
def __init__(self):
'''
Initializes a new 3D finite element model.
'''
self.Nodes = [] # A list of the structure's nodes
self.auxNodes = [] # A list of the structure's auxiliary nodes
self.Springs = [] # A list of the structure's springs
self.Members = [] # A list of the structure's members
self.Quads = [] # A list of the structura's quadiralterals
self.Plates = [] # A list of the structure's rectangular plates
self.__D = {} # A dictionary of the structure's nodal displacements by load combination
self.LoadCombos = {} # A dictionary of the structure's load combinations
#%%
def AddNode(self, Name, X, Y, Z):
'''
Adds a new node to the model.
Parameters
----------
Name : string
A unique user-defined name for the node.
X : number
The global X-coordinate of the node.
Y : number
The global Y-coordinate of the node.
Z : number
The global Z-coordinate of the node.
'''
# Create a new node
newNode = Node3D(Name, X, Y, Z)
# Add the new node to the list
self.Nodes.append(newNode)
#%%
def AddAuxNode(self, Name, X, Y, Z):
'''
Adds a new auxiliary node to the model.
Parameters
----------
Name : string
A unique user-defined name for the node.
X : number
The global X-coordinate of the node.
Y : number
The global Y-coordinate of the node.
Z : number
The global Z-coordinate of the node.
'''
# Create a new node
newNode = Node3D(Name, X, Y, Z)
# Add the new node to the list
self.auxNodes.append(newNode)
#%%
def AddSpring(self, Name, iNode, jNode, ks, tension_only=False, comp_only=False):
'''
Adds a new spring to the model.
Parameters
----------
Name : string
A unique user-defined name for the member.
iNode : string
The name of the i-node (start node).
jNode : string
The name of the j-node (end node).
ks : number
The spring constant (force/displacement).
tension_only : bool, optional
Indicates if the member is tension-only. Default is False.
comp_only : bool, optional
Indicates if the member is compression-only. Default is False.
'''
# Create a new spring
newSpring = Spring3D(Name, self.GetNode(iNode), self.GetNode(jNode), ks,
self.LoadCombos, tension_only=tension_only, comp_only=comp_only)
# Add the new member to the list
self.Springs.append(newSpring)
#%%
def AddMember(self, Name, iNode, jNode, E, G, Iy, Iz, J, A, auxNode=None,
tension_only=False, comp_only=False):
'''
Adds a new member to the model.
Parameters
----------
Name : string
A unique user-defined name for the member.
iNode : string
The name of the i-node (start node).
jNode : string
The name of the j-node (end node).
E : number
The modulus of elasticity of the member.
G : number
The shear modulus of the member.
Iy : number
The moment of inertia of the member about its local y-axis.
Iz : number
The moment of inertia of the member about its local z-axis.
J : number
The polar moment of inertia of the member.
A : number
The cross-sectional area of the member.
auxNode : string, optional
The name of the auxialary node used to define the local z-axis.
The default is for the program to define the axis instead of
using an auxiliary node.
tension_only : bool, optional
Indicates if the member is tension-only. Default is False.
comp_only : bool, optional
Indicates if the member is compression-only. Default is False.
'''
# Create a new member
if auxNode == None:
newMember = Member3D(Name, self.GetNode(iNode),
self.GetNode(jNode), E, G, Iy, Iz, J, A,
LoadCombos=self.LoadCombos, tension_only=tension_only, comp_only=comp_only)
else:
newMember = Member3D(Name, self.GetNode(iNode),
self.GetNode(jNode), E, G, Iy, Iz, J, A, self.GetAuxNode(auxNode),
self.LoadCombos, tension_only=tension_only, comp_only=comp_only)
# Add the new member to the list
self.Members.append(newMember)
#%%
def AddPlate(self, Name, iNode, jNode, mNode, nNode, t, E, nu):
'''
Adds a new plate to the model.
Plates will be dapricated in a future version. Quadrilaterals are more
verstile and will replace them.
Parameters
----------
Name : string
A unique user-defined name for the plate.
iNode : string
The name of the i-node (1st node definded in clockwise order).
jNode : string
The name of the j-node (2nd node defined in clockwise order).
mNode : string
The name of the m-node (3rd node defined in clockwise order).
nNode : string
The name of the n-node (4th node defined in clockwise order).
t : number
The thickness of the plate.
E : number
The modulus of elasticity of the plate.
mew : number
Posson's ratio for the plate.
'''
# Create a new member
newPlate = Plate3D(Name, self.GetNode(iNode), self.GetNode(jNode), self.GetNode(mNode), self.GetNode(nNode), t, E, nu)
# Add the new member to the list
self.Plates.append(newPlate)
#%%
def AddQuad(self, Name, iNode, jNode, mNode, nNode, t, E, nu):
'''
Adds a new quadrilateral to the model.
Quadrilaterals are similar to plates, except they do not have to be
rectangular. Plates will be dapricated in a future version. Note that
quadrilateral nodes are defined in counter-clockwise order instead of
the clockwise order that plates have used up to this point.
Parameters
----------
Name : string
A unique user-defined name for the quadrilateral.
iNode : string
The name of the i-node (1st node definded in counter-clockwise order).
jNode : string
The name of the j-node (2nd node defined in counter-clockwise order).
mNode : string
The name of the m-node (3rd node defined in counter-clockwise order).
nNode : string
The name of the n-node (4th node defined in counter-clockwise order).
t : number
The thickness of the quadrilateral.
E : number
The modulus of elasticity of the quadrilateral.
mew : number
Posson's ratio for the quadrilateral.
'''
# Create a new member
newQuad = Quad3D(Name, self.GetNode(iNode), self.GetNode(jNode), self.GetNode(mNode), self.GetNode(nNode), t, E, nu)
# Add the new member to the list
self.Quads.append(newQuad)
#%%
def RemoveNode(self, Node):
'''
Removes a node from the model. All nodal loads associated with the
node and members attached to the node will also be removed.
Parameters
----------
Node : string
The name of the node to be removed.
'''
# Remove the node. Nodal loads are stored within the node, so they
# will be deleted automatically when the node is deleted.
self.Nodes.remove(self.GetNode(Node))
# Find any members attached to the node and remove them
self.Members = [member for member in self.Members if member.iNode.Name != Node and member.jNode.Name != Node]
#%%
def RemoveSpring(self, Spring):
'''
Removes a spring from the model.
Parameters
----------
Spring : string
The name of the spring to be removed.
'''
# Remove the spring.
self.Springs.remove(self.GetSpring(Spring))
#%%
def RemoveMember(self, Member):
'''
Removes a member from the model. All member loads associated with the
member will also be removed.
Parameters
----------
Member : string
The name of the member to be removed.
'''
# Remove the member. Member loads are stored within the member, so they
# will be deleted automatically when the member is deleted.
self.Members.remove(self.GetMember(Member))
#%%
def DefineSupport(self, Node, SupportDX=False, SupportDY=False, SupportDZ=False, SupportRX=False, SupportRY=False, SupportRZ=False):
'''
Defines the support conditions at a node.
Nodes will default to fully unsupported unless specified otherwise.
Parameters
----------
Node : string
The name of the node where the support is being defined
SupportDX : number
Indicates whether the node is supported against translation in the global X-direction.
SupportDY : number
Indicates whether the node is supported against translation in the global Y-direction.
SupportDZ : number
Indicates whether the node is supported against translation in the global Z-direction.
SupportRX : number
Indicates whether the node is supported against rotation about the global X-axis.
SupportRY : number
Indicates whether the node is supported against rotation about the global Y-axis.
SupportRZ : number
Indicates whether the node is supported against rotation about the global Z-axis.
'''
# Get the node to be supported
node = self.GetNode(Node)
# Set the node's support conditions
node.SupportDX = SupportDX
node.SupportDY = SupportDY
node.SupportDZ = SupportDZ
node.SupportRX = SupportRX
node.SupportRY = SupportRY
node.SupportRZ = SupportRZ
#%%
def AddNodeDisplacement (self, Node, Direction, Magnitude):
'''
Defines a nodal displacement at a node.
Node : string
The name of the node where the nodal displacement is being applied.
Direction : {'DX', 'DY', 'DZ', 'RX', 'RY', 'RZ'}
The global direction the nodal displacement is being applied in. Displacements are 'DX', 'DY', and 'DZ'. Rotations are 'RX', 'RY', and 'RZ'.
Sign convention follows the model's global coordinate system.
Magnitude : number
The magnitude of the displacement.
'''
# Validate the value of Direction
if Direction not in ('DX', 'DY', 'DZ', 'RX', 'RY', 'RZ'):
raise ValueError(f"Direction must be 'DX', 'DY', 'DZ', 'RX', 'RY', or 'RZ'. {Direction} was given.")
# Get the node
node = self.GetNode(Node)
if Direction == 'DX':
node.EnforcedDX = Magnitude
if Direction == 'DY':
node.EnforcedDY = Magnitude
if Direction == 'DZ':
node.EnforcedDZ = Magnitude
if Direction == 'RX':
node.EnforcedRX = Magnitude
if Direction == 'RY':
node.EnforcedRY = Magnitude
if Direction == 'RZ':
node.EnforcedRZ = Magnitude
#%%
def DefineReleases(self, Member, Dxi=False, Dyi=False, Dzi=False, Rxi=False, Ryi=False, Rzi=False, Dxj=False, Dyj=False, Dzj=False, Rxj=False, Ryj=False, Rzj=False):
'''
Defines member end releases.
All member end releases will default to unreleased unless specified otherwise.
Parameters
----------
Member : string
The name of the member to have its releases modified.
Dxi : boolean
Indicates whether the member is released axially at its start.
Dyi : boolean
Indicates whether the member is released for shear in the local y-axis at its start.
Dzi : boolean
Indicates whether the member is released for shear in the local z-axis at its start.
Rxi : boolean
Indicates whether the member is released for torsion at its start.
Ryi : boolean
Indicates whether the member is released for moment about the local y-axis at its start.
Rzi : boolean
Indicates whether the member is released for moment about the local z-axis at its start.
Dxj : boolean
Indicates whether the member is released axially at its end.
Dyj : boolean
Indicates whether the member is released for shear in the local y-axis at its end.
Dzj : boolean
Indicates whether the member is released for shear in the local z-axis.
Rxj : boolean
Indicates whether the member is released for torsion at its end.
Ryj : boolean
Indicates whether the member is released for moment about the local y-axis at its end.
Rzj : boolean
Indicates whether the member is released for moment about the local z-axis at its end.
'''
# Apply the end releases to the member
self.GetMember(Member).Releases = [Dxi, Dyi, Dzi, Rxi, Ryi, Rzi, Dxj, Dyj, Dzj, Rxj, Ryj, Rzj]
#%%
def AddLoadCombo(self, name, factors, combo_type='strength'):
'''
Adds a load combination to the model
Parameters
----------
name : string
A unique name for the load combination (e.g. '1.2D+1.6L+0.5S' or 'Gravity Combo').
factors : dictionary
A dictionary containing load cases and their corresponding factors (e.g. {'D':1.2, 'L':1.6, 'S':0.5}).
combo_type : string
A description of the type of load combination (e.g. 'strength', 'service'). Currently
this does nothing in the program, and is a placeholder for future features.
'''
# Create a new load combination object
new_combo = LoadCombo(name, combo_type, factors)
# Add the load combination to the dictionary of load combinations
self.LoadCombos[name] = new_combo
#%%
def AddNodeLoad(self, Node, Direction, P, case='Case 1'):
'''
Adds a nodal load to the model.
Parameters
----------
Node : string
The name of the node where the load is being applied.
Direction : {'FX', 'FY', 'FZ', 'MX', 'MY', 'MZ'}
The global direction the load is being applied in. Forces are 'FX', 'FY', and 'FZ'. Moments are 'MX', 'MY', and 'MZ'.
P : number
The numeric value (magnitude) of the load.
case : string
The name of the load case the load belongs to.
'''
# Validate the value of Direction
if Direction not in ('FX', 'FY', 'FZ', 'MX', 'MY', 'MZ'):
raise ValueError(f"Direction must be 'FX', 'FY', 'FZ', 'MX', 'MY', or 'MZ'. {Direction} was given.")
# Add the node load to the model
self.GetNode(Node).NodeLoads.append((Direction, P, case))
#%%
def AddMemberPtLoad(self, Member, Direction, P, x, case='Case 1'):
'''
Adds a member point load to the model.
Parameters
----------
Member : string
The name of the member the load is being applied to.
Direction : {'Fx', 'Fy', 'Fz', 'Mx', 'My', 'Mz'}
The direction in which the force is to be applied. Note that
typical beam sign convention is used. Transverse forces acting
toward the beam are positive. Moments are positive if they act
counter-clockwise relative to the beam's local coordinate system.
Torsional point loads follow the right hand rule for sign convention.
P : number
The numeric value (magnitude) of the load.
x : number
The load's location along the member's local x-axis.
'''
# Validate the value of Direction
if Direction not in ('Fx', 'Fy', 'Fz', 'Mx', 'My', 'Mz'):
raise ValueError(f"Direction must be 'Fx', 'Fy', 'Fz', 'Mx', 'My', or 'Mz'. {Direction} was given.")
# Add the point load to the member
self.GetMember(Member).PtLoads.append((Direction, P, x, case))
#%%
def AddMemberDistLoad(self, Member, Direction, w1, w2, x1=None, x2=None, case='Case 1'):
'''
Adds a member distributed load to the model.
Parameters
----------
Member : string
The name of the member the load is being appied to
Direction : {'Fx', 'Fy', 'Fz'}
The direction in which the load is to be applied. Note that
typical beam sign convention is used. Forces acting toward the beam
are positive.
w1 : number
The starting value (magnitude) of the load.
w2 : number
The ending value (magnitude) of the load.
x1 : number
The load's start location along the member's local x-axis. If this argument
is not specified, the start of the member will be used.
x2 : number
The load's end location along the member's local x-axis. If this argument
is not specified, the end of the member will be used.
'''
# Validate the value of Direction
if Direction not in ('Fx', 'Fy', 'Fz'):
raise ValueError(f"Direction must be 'Fx', 'Fy', 'Fz'. {Direction} was given.")
# Determine if a starting and ending points for the load have been specified.
# If not, use the member start and end as defaults
if x1 == None:
start = 0
else:
start = x1
if x2 == None:
end = self.GetMember(Member).L()
else:
end = x2
# Add the distributed load to the member
self.GetMember(Member).DistLoads.append((Direction, w1, w2, start, end, case))
#%%
def AddPlateSurfacePressure(self, plate_ID, pressure, case='Case 1'):
'''
Adds a surface pressure to the rectangular plate element.
'''
# Add the surface pressure to the rectangle
self.GetPlate(plate_ID).pressures.append([pressure, case])
#%%
def AddQuadSurfacePressure(self, quad_ID, pressure, case='Case 1'):
'''
Adds a surface pressure to the quadrilateral element.
'''
# Add the surface pressure to the quadrilateral
self.GetQuad(quad_ID).pressures.append([pressure, case])
#%%
def ClearLoads(self):
'''
Clears all loads from the model along with any results based on the loads.
'''
# Clear out the member loads and the calculated internal forces
for member in self.Members:
member.DistLoads = []
member.PtLoads = []
member.SegmentsZ = []
member.SegmentsY = []
member.SegmentsX = []
# Clear out the nodal loads, calculated displacements, and calculated reactions
for node in self.Nodes:
node.NodeLoads = []
node.DX = {}
node.DY = {}
node.DZ = {}
node.RX = {}
node.RY = {}
node.RZ = {}
node.RxnFX = {}
node.RxnFY = {}
node.RxnFZ = {}
node.RxnMX = {}
node.RxnMY = {}
node.RxnMZ = {}
#%%
def GetNode(self, Name):
'''
Returns the node with the given name.
Parameters
----------
Name : string
The name of the node to be returned.
'''
# Step through each node in the 'Nodes' list
for node in self.Nodes:
# Check the name of the node
if node.Name == Name:
# Return the node of interest
return node
# if the node name is not found and loop finishes
raise ValueError(f"Node '{Name}' was not found in the model")
def GetAuxNode(self, Name):
'''
Returns the auxiliary node with the given name.
Parameters
----------
Name : string
The name of the auxiliary node to be returned.
'''
# Step through each node in the 'Nodes' list
for node in self.auxNodes:
# Check the name of the node
if node.Name == Name:
# Return the node of interest
return node
# If the node name is not found and loop finishes
raise ValueError(f"AuxNode '{Name}' was not found in the model")
#%%
def GetSpring(self, Name):
'''
Returns the spring with the given name.
Parameters
----------
Name : string
The name of the spring to be returned.
'''
# Step through each spring in the 'Springs' list
for spring in self.Springs:
# Check the name of the member
if spring.Name == Name:
# Return the spring of interest
return spring
# If the spring name is not found and loop finishes
raise ValueError(f"Spring '{Name}' was not found in the model")
#%%
def GetMember(self, Name):
'''
Returns the member with the given name.
Parameters
----------
Name : string
The name of the member to be returned.
'''
# Step through each member in the 'Members' list
for member in self.Members:
# Check the name of the member
if member.Name == Name:
# Return the member of interest
return member
# If the member name is not found and loop finishes
raise ValueError(f"Member '{Name}' was not found in the model")
#%%
def GetPlate(self, Name):
'''
Returns the plate with the given name.
Parameters
----------
Name : string
The name of the plate to be returned.
'''
# Step through each plate in the 'Plates' list
for plate in self.Plates:
# Check the name of the plate
if plate.Name == Name:
# Return the plate of interest
return plate
# Raise an exception if the plate name is not found and loop finishes
raise ValueError(f"Plate '{Name}' was not found in the model")
#%%
def GetQuad(self, Name):
'''
Returns the quadrilateral with the given name.
Parameters
----------
Name : string
The name of the quadrilateral to be returned.
'''
# Step through each quadrilateral in the 'Quads' list
for quad in self.Quads:
# Check the name of the quadrilateral
if quad.Name == Name:
# Return the quadrilateral of interest
return quad
# Raise an excption if the quadrilateral name is not found and loop
# finishes
raise ValueError(f"Quadrilateral '{Name}' was not found in the model")
#%%
def __Renumber(self):
'''
Assigns node, spring, member, and plate member ID numbers to be used internally by the
program. Numbers are assigned according to the order nodes, springs, members, and plates
were added to the model.
'''
# Number each node in the model
i = 0
for node in self.Nodes:
node.ID = i
i += 1
# Number each spring in the model
i = 0
for spring in self.Springs:
spring.ID = i
i += 1
# Number each member in the model
i = 0
for member in self.Members:
member.ID = i
i += 1
# Number each plate in the model
i = 0
for plate in self.Plates:
plate.ID = i
i += 1
# Number each quadrilateral in the model
i = 0
for quad in self.Quads:
quad.ID = i
i += 1
#%%
def __AuxList(self):
'''
Builds a list with known nodal displacements and with the positions in global stiffness matrix of known
and unknown nodal displacements
Returns
-------
D1_indices : number
A list of the global matrix indices for the unknown nodal displacements
D2_indices : number
A list of the global matrix indices for the known nodal displacements
D2 : number
A list of the known nodal displacements
'''
D1_indices = [] # A list of the indices for the unknown nodal displacements
D2_indices = [] # A list of the indices for the known nodal displacements
D2 = [] # A list of the values of the known nodal displacements (D != None)
# Create the auxiliary table
for node in self.Nodes:
# Unknown displacement DX
if node.SupportDX == False and node.EnforcedDX == None:
D1_indices.append((node.ID*6) + 0)
# Known displacement DX
elif node.EnforcedDX != None:
D2_indices.append((node.ID*6) + 0)
D2.append(node.EnforcedDX)
# Support at DX
else:
D2_indices.append((node.ID*6) + 0)
D2.append(0.0)
# Unknown displacement DY
if node.SupportDY == False and node.EnforcedDY == None:
D1_indices.append((node.ID*6) + 1)
# Known displacement DY
elif node.EnforcedDY != None:
D2_indices.append((node.ID*6) + 1)
D2.append(node.EnforcedDY)
# Support at DY
else:
D2_indices.append((node.ID*6) + 1)
D2.append(0.0)
# Unknown displacement DZ
if node.SupportDZ == False and node.EnforcedDZ == None:
D1_indices.append((node.ID*6) + 2)
# Known displacement DZ
elif node.EnforcedDZ != None:
D2_indices.append((node.ID*6) + 2)
D2.append(node.EnforcedDZ)
# Support at DZ
else:
D2_indices.append((node.ID*6) + 2)
D2.append(0.0)
# Unknown displacement RX
if node.SupportRX == False and node.EnforcedRX == None:
D1_indices.append((node.ID*6) + 3)
# Known displacement RX
elif node.EnforcedRX != None:
D2_indices.append((node.ID*6) + 3)
D2.append(node.EnforcedRX)
# Support at RX
else:
D2_indices.append((node.ID*6) + 3)
D2.append(0.0)
# Unknown displacement RY
if node.SupportRY == False and node.EnforcedRY == None:
D1_indices.append((node.ID*6) + 4)
# Known displacement RY
elif node.EnforcedRY != None:
D2_indices.append((node.ID*6) + 4)
D2.append(node.EnforcedRY)
# Support at RY
else:
D2_indices.append((node.ID*6) + 4)
D2.append(0.0)
# Unknown displacement RZ
if node.SupportRZ == False and node.EnforcedRZ == None:
D1_indices.append((node.ID*6) + 5)
# Known displacement RZ
elif node.EnforcedRZ != None:
D2_indices.append((node.ID*6) + 5)
D2.append(node.EnforcedRZ)
# Support at RZ
else:
D2_indices.append((node.ID*6) + 5)
D2.append(0.0)
# Return the indices and the known displacements
return D1_indices, D2_indices, D2
#%%
def K(self, combo_name='Combo_1'):
'''
Assembles and returns the global stiffness matrix.
'''
# Initialize a zero matrix to hold all the stiffness terms
K = zeros((len(self.Nodes)*6, len(self.Nodes)*6))
# Add stiffness terms for each spring in the model
print('...Adding spring stiffness terms to global stiffness matrix')
for spring in self.Springs:
if spring.active[combo_name] == True:
# Get the spring's global stiffness matrix
# Storing it as a local variable eliminates the need to rebuild it every time a term is needed
spring_K = spring.K()
# Step through each term in the spring's stiffness matrix
# 'a' & 'b' below are row/column indices in the spring's stiffness matrix
# 'm' & 'n' are corresponding row/column indices in the global stiffness matrix
for a in range(12):
# Determine if index 'a' is related to the i-node or j-node
if a < 6:
# Find the corresponding index 'm' in the global stiffness matrix
m = spring.iNode.ID*6 + a
else:
# Find the corresponding index 'm' in the global stiffness matrix
m = spring.jNode.ID*6 + (a-6)
for b in range(12):
# Determine if index 'b' is related to the i-node or j-node
if b < 6:
# Find the corresponding index 'n' in the global stiffness matrix
n = spring.iNode.ID*6 + b
else:
# Find the corresponding index 'n' in the global stiffness matrix
n = spring.jNode.ID*6 + (b-6)
# Now that 'm' and 'n' are known, place the term in the global stiffness matrix
K.itemset((m, n), K.item((m, n)) + spring_K.item((a, b)))
# Add stiffness terms for each member in the model
print('...Adding member stiffness terms to global stiffness matrix')
for member in self.Members:
if member.active[combo_name] == True:
# Get the member's global stiffness matrix
# Storing it as a local variable eliminates the need to rebuild it every time a term is needed
member_K = member.K()
# Step through each term in the member's stiffness matrix
# 'a' & 'b' below are row/column indices in the member's stiffness matrix
# 'm' & 'n' are corresponding row/column indices in the global stiffness matrix
for a in range(12):
# Determine if index 'a' is related to the i-node or j-node
if a < 6:
# Find the corresponding index 'm' in the global stiffness matrix
m = member.iNode.ID*6 + a
else:
# Find the corresponding index 'm' in the global stiffness matrix
m = member.jNode.ID*6 + (a-6)
for b in range(12):
# Determine if index 'b' is related to the i-node or j-node
if b < 6:
# Find the corresponding index 'n' in the global stiffness matrix
n = member.iNode.ID*6 + b
else:
# Find the corresponding index 'n' in the global stiffness matrix
n = member.jNode.ID*6 + (b-6)
# Now that 'm' and 'n' are known, place the term in the global stiffness matrix
K.itemset((m, n), K.item((m, n)) + member_K.item((a, b)))
# Add stiffness terms for each quadrilateral in the model
print('...Adding quadrilateral stiffness terms to global stiffness matrix')
for quad in self.Quads:
# Get the quadrilateral's global stiffness matrix
# Storing it as a local variable eliminates the need to rebuild it every time a term is needed
quad_K = quad.K()
# Step through each term in the quadrilateral's stiffness matrix
# 'a' & 'b' below are row/column indices in the quadrilateral's stiffness matrix
# 'm' & 'n' are corresponding row/column indices in the global stiffness matrix
for a in range(24):
# Determine which node the index 'a' is related to
if a < 6:
# Find the corresponding index 'm' in the global stiffness matrix
m = quad.mNode.ID*6 + a
elif a < 12:
# Find the corresponding index 'm' in the global stiffness matrix
m = quad.nNode.ID*6 + (a-6)
elif a < 18:
# Find the corresponding index 'm' in the global stiffness matrix
m = quad.iNode.ID*6 + (a-12)
else:
# Find the corresponding index 'm' in the global stiffness matrix
m = quad.jNode.ID*6 + (a-18)
for b in range(24):
# Determine which node the index 'b' is related to
if b < 6:
# Find the corresponding index 'n' in the global stiffness matrix
n = quad.mNode.ID*6 + b
elif b < 12:
# Find the corresponding index 'n' in the global stiffness matrix
n = quad.nNode.ID*6 + (b-6)
elif b < 18:
# Find the corresponding index 'n' in the global stiffness matrix
n = quad.iNode.ID*6 + (b-12)
else:
# Find the corresponding index 'n' in the global stiffness matrix
n = quad.jNode.ID*6 + (b-18)
# Now that 'm' and 'n' are known, place the term in the global stiffness matrix
K.itemset((m, n), K.item((m, n)) + quad_K.item((a, b)))
# Add stiffness terms for each plate in the model
print('...Adding plate stiffness terms to global stiffness matrix')
for plate in self.Plates:
# Get the plate's global stiffness matrix
# Storing it as a local variable eliminates the need to rebuild it every time a term is needed
plate_K = plate.K()
# Step through each term in the plate's stiffness matrix
# 'a' & 'b' below are row/column indices in the plate's stiffness matrix
# 'm' & 'n' are corresponding row/column indices in the global stiffness matrix
for a in range(24):
# Determine which node the index 'a' is related to
if a < 6:
# Find the corresponding index 'm' in the global stiffness matrix
m = plate.iNode.ID*6 + a
elif a < 12:
# Find the corresponding index 'm' in the global stiffness matrix
m = plate.nNode.ID*6 + (a-6)
elif a < 18:
# Find the corresponding index 'm' in the global stiffness matrix
m = plate.mNode.ID*6 + (a-12)
else:
# Find the corresponding index 'm' in the global stiffness matrix
m = plate.jNode.ID*6 + (a-18)
for b in range(24):
# Determine which node the index 'b' is related to
if b < 6:
# Find the corresponding index 'n' in the global stiffness matrix
n = plate.iNode.ID*6 + b
elif b < 12:
# Find the corresponding index 'n' in the global stiffness matrix
n = plate.nNode.ID*6 + (b-6)
elif b < 18:
# Find the corresponding index 'n' in the global stiffness matrix
n = plate.mNode.ID*6 + (b-12)
else:
# Find the corresponding index 'n' in the global stiffness matrix
n = plate.jNode.ID*6 + (b-18)
# Now that 'm' and 'n' are known, place the term in the global stiffness matrix
K.itemset((m, n), K.item((m, n)) + plate_K.item((a, b)))
# Return the global stiffness matrix
return K
#%%
def Kg(self, combo_name='Combo 1'):
'''
Assembles and returns the global geometric stiffness matrix.
The model must have a static solution prior to obtaining the geometric stiffness matrix.
Stiffness of plates is not included.
Parameters
----------
combo_name : string
The name of the load combination to derive the matrix for (not the load combination itself).
'''
# Initialize a zero matrix to hold all the stiffness terms
Kg = zeros((len(self.Nodes)*6, len(self.Nodes)*6))
# Add stiffness terms for each member in the model
print('...Adding member geometric stiffness terms to global geometric stiffness matrix')
for member in self.Members:
if member.active[combo_name] == True:
# Calculate the axial force in the member
E = member.E
A = member.A
L = member.L()
d = member.d(combo_name)
P = E*A/L*(d[6, 0] - d[0, 0])
# Get the member's global stiffness matrix
# Storing it as a local variable eliminates the need to rebuild it every time a term is needed
member_Kg = member.Kg(P)
# Step through each term in the member's stiffness matrix
# 'a' & 'b' below are row/column indices in the member's stiffness matrix
# 'm' & 'n' are corresponding row/column indices in the global stiffness matrix
for a in range(12):
# Determine if index 'a' is related to the i-node or j-node
if a < 6:
# Find the corresponding index 'm' in the global stiffness matrix
m = member.iNode.ID*6 + a
else:
# Find the corresponding index 'm' in the global stiffness matrix
m = member.jNode.ID*6 + (a-6)
for b in range(12):
# Determine if index 'b' is related to the i-node or j-node
if b < 6:
# Find the corresponding index 'n' in the global stiffness matrix
n = member.iNode.ID*6 + b
else:
# Find the corresponding index 'n' in the global stiffness matrix
n = member.jNode.ID*6 + (b-6)
# Now that 'm' and 'n' are known, place the term in the global stiffness matrix
Kg.itemset((m, n), Kg.item((m, n)) + member_Kg.item((a, b)))
# Return the global geometric stiffness matrix
return Kg
#%%
def FER(self, combo_name='Combo 1'):
'''
Assembles and returns the global fixed end reaction vector.
Parameters
----------
combo_name : string
The name of the load combination to get the fixed end reaction vector for (not the load combination itself).
'''
# Initialize a zero vector to hold all the terms
FER = zeros((len(self.Nodes) * 6, 1))
# Add terms for each member in the model
for member in self.Members:
# Get the member's global fixed end reaction vector
# Storing it as a local variable eliminates the need to rebuild it every time a term is needed
member_FER = member.FER(combo_name)
# Step through each term in the member's fixed end reaction vector
# 'a' below is the row index in the member's fixed end reaction vector
# 'm' below is the corresponding row index in the global fixed end reaction vector
for a in range(12):
# Determine if index 'a' is related to the i-node or j-node
if a < 6:
# Find the corresponding index 'm' in the global fixed end reaction vector
m = member.iNode.ID * 6 + a
else:
# Find the corresponding index 'm' in the global fixed end reaction vector
m = member.jNode.ID * 6 + (a - 6)
# Now that 'm' is known, place the term in the global fixed end reaction vector
FER.itemset((m, 0), FER[m, 0] + member_FER[a, 0])
# Add terms for each rectangle in the model
for plate in self.Plates:
# Get the quadrilateral's global fixed end reaction vector
# Storing it as a local variable eliminates the need to rebuild it every time a term is needed
plate_FER = plate.FER(combo_name)
# Step through each term in the quadrilateral's fixed end reaction vector
# 'a' below is the row index in the quadrilateral's fixed end reaction vector
# 'm' below is the corresponding row index in the global fixed end reaction vector
for a in range(24):
# Determine if index 'a' is related to the i-node, j-node, m-node, or n-node
if a < 6:
# Find the corresponding index 'm' in the global fixed end reaction vector
m = plate.iNode.ID*6 + a
elif a < 12:
# Find the corresponding index 'm' in the global fixed end reaction vector
m = plate.nNode.ID*6 + (a - 6)
elif a < 18:
# Find the corresponding index 'm' in the global fixed end reaction vector
m = plate.mNode.ID*6 + (a - 12)
else:
# Find the corresponding index 'm' in the global fixed end reaction vector
m = plate.jNode.ID*6 + (a - 18)
# Now that 'm' is known, place the term in the global fixed end reaction vector
FER.itemset((m, 0), FER[m, 0] + plate_FER[a, 0])
# Add terms for each quadrilateral in the model
for quad in self.Quads:
# Get the quadrilateral's global fixed end reaction vector
# Storing it as a local variable eliminates the need to rebuild it every time a term is needed
quad_FER = quad.FER(combo_name)
# Step through each term in the quadrilateral's fixed end reaction vector
# 'a' below is the row index in the quadrilateral's fixed end reaction vector
# 'm' below is the corresponding row index in the global fixed end reaction vector
for a in range(24):
# Determine if index 'a' is related to the i-node, j-node, m-node, or n-node
if a < 6:
# Find the corresponding index 'm' in the global fixed end reaction vector
m = quad.mNode.ID*6 + a
elif a < 12:
# Find the corresponding index 'm' in the global fixed end reaction vector
m = quad.nNode.ID*6 + (a - 6)
elif a < 18:
# Find the corresponding index 'm' in the global fixed end reaction vector
m = quad.iNode.ID*6 + (a - 12)
else:
# Find the corresponding index 'm' in the global fixed end reaction vector
m = quad.jNode.ID*6 + (a - 18)
# Now that 'm' is known, place the term in the global fixed end reaction vector
FER.itemset((m, 0), FER[m, 0] + quad_FER[a, 0])
# Return the global fixed end reaction vector
return FER
#%%
def P(self, combo_name='Combo 1'):
'''
Assembles and returns the global nodal force vector.
Parameters
----------
combo_name : string
The name of the load combination to get the force vector for (not the load combination itself).
'''
# Initialize a zero vector to hold all the terms
P = zeros((len(self.Nodes)*6, 1))
# Add terms for each node in the model
for node in self.Nodes:
# Get the node's ID
ID = node.ID
# Get the load combination for the given 'combo_name'
combo = self.LoadCombos[combo_name]
# Step through each load factor in the load combination
for case, factor in combo.factors.items():
# Add the node's loads to the global nodal load vector
for load in node.NodeLoads:
if load[2] == case:
if load[0] == 'FX':
P.itemset((ID*6 + 0, 0), P[ID*6 + 0, 0] + factor*load[1])
elif load[0] == 'FY':
P.itemset((ID*6 + 1, 0), P[ID*6 + 1, 0] + factor*load[1])
elif load[0] == 'FZ':
P.itemset((ID*6 + 2, 0), P[ID*6 + 2, 0] + factor*load[1])
elif load[0] == 'MX':
P.itemset((ID*6 + 3, 0), P[ID*6 + 3, 0] + factor*load[1])
elif load[0] == 'MY':
P.itemset((ID*6 + 4, 0), P[ID*6 + 4, 0] + factor*load[1])
elif load[0] == 'MZ':
P.itemset((ID*6 + 5, 0), P[ID*6 + 5, 0] + factor*load[1])
# Return the global nodal force vector
return P
#%%
def D(self, combo_name='Combo 1'):
'''
Returns the global displacement vector for the model.
Parameters
----------
combo_name : string
The name of the load combination to get the displacements for (not the load combination itself).
'''
# Return the global displacement vector
return self.__D[combo_name]
#%%
def __Partition(self, unp_matrix, D1_indices, D2_indices):
'''
Partitions a matrix into submatrices based on degree of freedom boundary conditions
Parameters
----------
unp_matrix : matrix
The unpartitioned matrix to be partitioned.
'''
if unp_matrix.shape[1] == 1:
m1 = unp_matrix[D1_indices, :]
m2 = unp_matrix[D2_indices, :]
return m1, m2
else:
m11 = unp_matrix[D1_indices, :][:, D1_indices]
m12 = unp_matrix[D1_indices, :][:, D2_indices]
m21 = unp_matrix[D2_indices, :][:, D1_indices]
m22 = unp_matrix[D2_indices, :][:, D2_indices]
return m11, m12, m21, m22
#%%
def Analyze(self, check_statics=False, max_iter=30, sparse=True):
'''
Performs first-order static analysis.
Iterations are performed if tension-only members or
compression-only members are present.
Parameters
----------
check_statics : bool, optional
When set to True, causes a statics check to be performed
max_iter : number, optional
The maximum number of iterations to try to get convergence
for tension/compression-only analysis.
sparse : bool, optional
Indicates whether the sparse matrix solver should be used. A matrix can be considered
sparse or dense depening on how many zero terms there are. Structural stiffness
matrices often contain many zero terms. The sparse solver can offer faster solutions
for such matrices. Using the sparse solver on dense matrices may lead to slower
solution times.
'''
print('+-----------+')
print('| Analyzing |')
print('+-----------+')
# Assign an ID to all nodes and elements in the model
self.__Renumber()
# Ensure there is at least 1 load combination to solve if the user didn't define any
if self.LoadCombos == {}:
# Create and add a default load combination to the dictionary of load combinations
self.LoadCombos['Combo 1'] = LoadCombo('Combo 1', factors={'Case 1':1.0})
# Activate all springs and members for all load combinations
for spring in self.Springs:
for combo_name in self.LoadCombos.keys():
spring.active[combo_name] = True
for member in self.Members:
for combo_name in self.LoadCombos.keys():
member.active[combo_name] = True
# Get the auxiliary list used to determine how the matrices will be partitioned
D1_indices, D2_indices, D2 = self.__AuxList()
# Convert D2 from a list to a matrix
D2 = matrix(D2).T
# Step through each load combination
for combo in self.LoadCombos.values():
print('')
print('...Analyzing load combination ' + combo.name)
# Keep track of the number of iterations
iter_count = 1
convergence = False
divergence = False
# Iterate until convergence or divergence occurs
while convergence == False and divergence == False:
# Get the partitioned global stiffness matrix K11, K12, K21, K22
K11, K12, K21, K22 = self.__Partition(self.K(combo.name), D1_indices, D2_indices)
# Get the partitioned global fixed end reaction vector
FER1, FER2 = self.__Partition(self.FER(combo.name), D1_indices, D2_indices)
# Get the partitioned global nodal force vector
P1, P2 = self.__Partition(self.P(combo.name), D1_indices, D2_indices)
# Calculate the global displacement vector
print('...Calculating global displacement vector for load combination', combo.name)
if K11.shape == (0, 0):
# All displacements are known, so D1 is an empty vector
D1 = []
else:
try:
# Calculate the unknown displacements D1
if sparse == True:
D1 = spsolve(csc_matrix(K11), subtract(subtract(P1, FER1), matmul(K12, D2)))
D1 = D1.reshape(len(D1), 1)
else:
D1 = solve(K11, subtract(subtract(P1, FER1), matmul(K12, D2)))
except:
# Return out of the method if 'K' is singular and provide an error message
raise Exception('The stiffness matrix is singular, which implies rigid body motion. The structure is unstable. Aborting analysis.')
# Form the global displacement vector, D, from D1 and D2
D = zeros((len(self.Nodes)*6, 1))
for node in self.Nodes:
if D2_indices.count(node.ID*6 + 0) == 1:
D.itemset((node.ID*6 + 0, 0), D2[D2_indices.index(node.ID*6 + 0), 0])
else:
D.itemset((node.ID*6 + 0, 0), D1[D1_indices.index(node.ID*6 + 0), 0])
if D2_indices.count(node.ID*6 + 1) == 1:
D.itemset((node.ID*6 + 1, 0), D2[D2_indices.index(node.ID*6 + 1), 0])
else:
D.itemset((node.ID*6 + 1, 0), D1[D1_indices.index(node.ID*6 + 1), 0])
if D2_indices.count(node.ID*6 + 2) == 1:
D.itemset((node.ID*6 + 2, 0), D2[D2_indices.index(node.ID*6 + 2), 0])
else:
D.itemset((node.ID*6 + 2, 0), D1[D1_indices.index(node.ID*6 + 2), 0])
if D2_indices.count(node.ID*6 + 3) == 1:
D.itemset((node.ID*6 + 3, 0), D2[D2_indices.index(node.ID*6 + 3), 0])
else:
D.itemset((node.ID*6 + 3, 0), D1[D1_indices.index(node.ID*6 + 3), 0])
if D2_indices.count(node.ID*6 + 4) == 1:
D.itemset((node.ID*6 + 4, 0), D2[D2_indices.index(node.ID*6 + 4), 0])
else:
D.itemset((node.ID*6 + 4, 0), D1[D1_indices.index(node.ID*6 + 4), 0])
if D2_indices.count(node.ID*6 + 5) == 1:
D.itemset((node.ID*6 + 5, 0), D2[D2_indices.index(node.ID*6 + 5), 0])
else:
D.itemset((node.ID*6 + 5, 0), D1[D1_indices.index(node.ID*6 + 5), 0])
# Save the global displacement vector
self.__D[combo.name] = D
# Store the calculated global nodal displacements into each node
for node in self.Nodes:
node.DX[combo.name] = D[node.ID*6 + 0, 0]
node.DY[combo.name] = D[node.ID*6 + 1, 0]
node.DZ[combo.name] = D[node.ID*6 + 2, 0]
node.RX[combo.name] = D[node.ID*6 + 3, 0]
node.RY[combo.name] = D[node.ID*6 + 4, 0]
node.RZ[combo.name] = D[node.ID*6 + 5, 0]
# Check for divergence
if iter_count > max_iter:
divergence = True
raise Exception('...Model diverged during tension/compression-only analysis')
# Assume the model has converged (to be checked below)
convergence = True
# Check tension-only and compression-only springs
print('...Checking for tension/compression-only spring convergence')
for spring in self.Springs:
if spring.active[combo.name] == True:
# Check if tension-only conditions exist
if spring.tension_only == True and spring.Axial(combo.name) > 0:
spring.active[combo.name] = False
convergence = False
# Check if compression-only conditions exist
elif spring.comp_only == True and spring.Axial(combo.name) < 0:
spring.active[combo.name] = False
convergence = False
# Check tension-only and compression-only members
print('...Checking for tension/compression-only member convergence')
for member in self.Members:
# Only run the tension/compression only check if the member is still active
if member.active[combo.name] == True:
# Check if tension-only conditions exist
if member.tension_only == True and member.MaxAxial(combo.name) > 0:
member.active[combo.name] = False
convergence = False
# Check if compression-only conditions exist
elif member.comp_only == True and member.MinAxial(combo.name) < 0:
member.active[combo.name] = False
convergence = False
if convergence == False:
print('...Tension/compression-only analysis did not converge. Adjusting stiffness matrix and reanalyzing.')
else:
print('...Tension/compression-only analysis converged after ' + str(iter_count) + ' iteration(s)')
# Keep track of the number of tension/compression only iterations
iter_count += 1
# Calculate reactions
self.__CalcReactions()
print('...Analysis complete')
print('')
# Check statics if requested
if check_statics == True:
self.__CheckStatics()
#%%
def Analyze_PDelta(self, max_iter=30, tol=0.01, sparse=True):
'''
Performs second order (P-Delta) analysis.
Parameters
----------
max_iter : number
The maximum number of iterations permitted. If this value is exceeded the program will
report divergence.
tol : number
The deflection tolerance (as a percentage) between iterations that will be used to
define whether the model has converged (e.g. 0.01 = deflections must converge within 1%
between iterations).
sparse : bool, optional
Indicates whether the sparse matrix solver should be used. A matrix can be considered
sparse or dense depening on how many zero terms there are. Structural stiffness
matrices often contain many zero terms. The sparse solver can offer faster solutions
for such matrices. Using the sparse solver on dense matrices may lead to slower
solution times.
'''
print('+--------------------+')
print('| Analyzing: P-Delta |')
print('+--------------------+')
# Assign an ID to all nodes and elements in the model
self.__Renumber()
# Ensure there is at least 1 load combination to solve if the user didn't define any
if self.LoadCombos == {}:
# Create and add a default load combination to the dictionary of load combinations
self.LoadCombos['Combo 1'] = LoadCombo('Combo 1', factors={'Case 1':1.0})
# Activate all springs and members for all load combinations. They can be turned inactive
# during the course of the tension/compression-only analysis
for spring in self.Springs:
for combo_name in self.LoadCombos.keys():
spring.active[combo_name] = True
for member in self.Members:
for combo_name in self.LoadCombos.keys():
member.active[combo_name] = True
# Get the auxiliary list used to determine how the matrices will be partitioned
D1_indices, D2_indices, D2 = self.__AuxList()
# Convert D2 from a list to a matrix
D2 = array(D2, ndmin=2).T
# Step through each load combination
for combo in self.LoadCombos.values():
print('')
print('...Analyzing load combination ' + combo.name)
iter_count_TC = 1 # Tracks tension/compression-only iterations
iter_count_PD = 1 # Tracks P-Delta iterations
convergence_TC = False # Tracks tension/compression-only convergence
convergence_PD = False # Tracks P-Delta convergence
divergence_TC = False # Tracks tension/compression-only divergence
divergence_PD = False # Tracks P-Delta divergence
# Iterate until convergence or divergence occurs
while ((convergence_TC == False or convergence_PD == False)
and (divergence_TC == False and divergence_PD == False)):
# Inform the user which iteration we're on
print('...Beginning tension/compression-only iteration #' + str(iter_count_TC))
print('...Beginning P-Delta iteration #' + str(iter_count_PD))
# Get the partitioned global matrices
if iter_count_PD == 1:
K11, K12, K21, K22 = self.__Partition(self.K(combo.name), D1_indices, D2_indices) # Initial stiffness matrix
FER1, FER2 = self.__Partition(self.FER(combo.name), D1_indices, D2_indices) # Fixed end reactions
P1, P2 = self.__Partition(self.P(combo.name), D1_indices, D2_indices) # Nodal forces
else:
# Calculate the global stiffness matrices (partitioned)
K11, K12, K21, K22 = self.__Partition(self.K(combo.name), D1_indices, D2_indices) # Initial stiffness matrix
Kg11, Kg12, Kg21, Kg22 = self.__Partition(self.Kg(combo.name), D1_indices, D2_indices) # Geometric stiffness matrix
# Combine the stiffness matrices
K11 = add(K11, Kg11)
K12 = add(K12, Kg12)
K21 = add(K21, Kg21)
K22 = add(K22, Kg22)
# Calculate the global displacement vector
print('...Calculating the global displacement vector')
if K11.shape == (0, 0):
# All displacements are known, so D1 is an empty vector
D1 = []
else:
try:
# Calculate the global displacement vector
if sparse == True:
D1 = spsolve(csc_matrix(K11), subtract(subtract(P1, FER1), matmul(K12, D2)))
D1 = D1.reshape(len(D1), 1)
else:
D1 = solve(K11, subtract(subtract(P1, FER1), matmul(K12, D2)))
except:
# Return out of the method if 'K' is singular and provide an error message
raise ValueError('The stiffness matrix is singular, which implies rigid body motion. The structure is unstable. Aborting analysis.')
D = zeros((len(self.Nodes)*6, 1))
for node in self.Nodes:
if D2_indices.count(node.ID*6 + 0) == 1:
D.itemset((node.ID*6 + 0, 0), D2[D2_indices.index(node.ID*6 + 0), 0])
else:
D.itemset((node.ID*6 + 0, 0), D1[D1_indices.index(node.ID*6 + 0), 0])
if D2_indices.count(node.ID*6 + 1) == 1:
D.itemset((node.ID*6 + 1, 0), D2[D2_indices.index(node.ID*6 + 1), 0])
else:
D.itemset((node.ID*6 + 1, 0), D1[D1_indices.index(node.ID*6 + 1), 0])
if D2_indices.count(node.ID*6 + 2) == 1:
D.itemset((node.ID*6 + 2, 0), D2[D2_indices.index(node.ID*6 + 2), 0])
else:
D.itemset((node.ID*6 + 2, 0), D1[D1_indices.index(node.ID*6 + 2), 0])
if D2_indices.count(node.ID*6 + 3) == 1:
D.itemset((node.ID*6 + 3, 0), D2[D2_indices.index(node.ID*6 + 3), 0])
else:
D.itemset((node.ID*6 + 3, 0), D1[D1_indices.index(node.ID*6 + 3), 0])
if D2_indices.count(node.ID*6 + 4) == 1:
D.itemset((node.ID*6 + 4, 0), D2[D2_indices.index(node.ID*6 + 4), 0])
else:
D.itemset((node.ID*6 + 4, 0), D1[D1_indices.index(node.ID*6 + 4), 0])
if D2_indices.count(node.ID*6 + 5) == 1:
D.itemset((node.ID*6 + 5, 0), D2[D2_indices.index(node.ID*6 + 5), 0])
else:
D.itemset((node.ID*6 + 5, 0), D1[D1_indices.index(node.ID*6 + 5), 0])
# Save the global displacement vector
self.__D[combo.name] = D
# Store the calculated global nodal displacements into each node
for node in self.Nodes:
node.DX[combo.name] = D[node.ID*6 + 0, 0]
node.DY[combo.name] = D[node.ID*6 + 1, 0]
node.DZ[combo.name] = D[node.ID*6 + 2, 0]
node.RX[combo.name] = D[node.ID*6 + 3, 0]
node.RY[combo.name] = D[node.ID*6 + 4, 0]
node.RZ[combo.name] = D[node.ID*6 + 5, 0]
# Assume the model has converged (to be checked below)
convergence_TC = True
# Check for tension/compression-only springs that need to be deactivated
print('...Checking for tension/compression-only spring convergence')
for spring in self.Springs:
# Only run the tension/compression only check if the spring is still active
if spring.active[combo.name] == True:
# Check if tension-only conditions exist
if spring.tension_only == True and spring.Axial(combo.name) > 0:
spring.active[combo.name] = False
convergence_TC = False
# Reset the P-Delta analysis for the new geometry
iter_count_PD = 0
convergence_PD = False
# Check if compression-only conditions exist
elif spring.comp_only == True and spring.Axial(combo.name) < 0:
spring.active[combo.name] = False
convergence_TC = False
# Reset the P-Delta analysis for the new geometry
iter_count_PD = 0
convergence_PD = False
# Check for tension/compression-only members that need to be deactivated
print('...Checking for tension/compression-only member convergence')
for member in self.Members:
# Only run the tension/compression only check if the member is still active
if member.active[combo.name] == True:
# Check if tension-only conditions exist
if member.tension_only == True and member.MaxAxial(combo.name) > 0:
member.active[combo.name] = False
convergence_TC = False
# Reset the P-Delta analysis for the new geometry
iter_count_PD = 0
convergence_PD = False
# Check if compression-only conditions exist
elif member.comp_only == True and member.MinAxial(combo.name) < 0:
member.active[combo.name] = False
convergence_TC = False
# Reset the P-Delta analysis for the new geometry
iter_count_PD = 0
convergence_PD = False
# Report on convergence of tension/compression only analysis
if convergence_TC == False:
print('...Tension/compression-only analysis did not converge on this iteration')
print('...Stiffness matrix will be adjusted for newly deactivated elements')
print('...P-Delta analysis will be restarted')
# Increment the tension/compression-only iteration count
iter_count_TC += 1
else:
print('...Tension/compression-only analysis converged after ' + str(iter_count_TC) + ' iteration(s)')
# Check for divergence in the tension/compression-only analysis
if iter_count_TC > max_iter:
divergence_TC = True
raise Exception('...Model diverged during tension/compression-only analysis')
# Check for P-Delta convergence
if iter_count_PD > 1:
# Print a status update for the user
print('...Checking for convergence')
# Temporarily disable error messages for invalid values.
# We'll be dealing with some 'nan' values due to division by zero at supports with zero deflection.
seterr(invalid='ignore')
# Check for convergence
if abs(1 - nanmax(divide(prev_results, D1))) <= tol:
convergence_PD = True
print('...P-Delta analysis converged after ' + str(iter_count_PD) + ' iteration(s)')
# Check for divergence
elif iter_count_PD > max_iter:
divergence_PD = True
print('...P-Delta analysis failed to converge after ' + str(max_iter) + ' iteration(s)')
# Turn invalid value warnings back on
seterr(invalid='warn')
# Save the results for the next iteration
prev_results = D1
# Increment the P-Delta iteration count
iter_count_PD += 1
# Calculate reactions
self.__CalcReactions()
print('...Analysis complete')
print('')
#%%
def __CalcReactions(self):
'''
Calculates reactions once the model is solved.
'''
# Print a status update to the console
print('...Calculating reactions')
# Calculate the reactions, node by node
for node in self.Nodes:
# Step through each load combination
for combo in self.LoadCombos.values():
# Initialize reactions for this node and load combination
node.RxnFX[combo.name] = 0.0
node.RxnFY[combo.name] = 0.0
node.RxnFZ[combo.name] = 0.0
node.RxnMX[combo.name] = 0.0
node.RxnMY[combo.name] = 0.0
node.RxnMZ[combo.name] = 0.0
# Determine if the node has any supports
if (node.SupportDX == True) \
or (node.SupportDY == True) \
or (node.SupportDZ == True) \
or (node.SupportRX == True) \
or (node.SupportRY == True) \
or (node.SupportRZ == True):
# Sum the spring end forces at the node
for spring in self.Springs:
if spring.iNode == node and spring.active[combo.name] == True:
# Get the spring's global force matrix
# Storing it as a local variable eliminates the need to rebuild it every time a term is needed
spring_F = spring.F(combo.name)
node.RxnFX[combo.name] += spring_F[0, 0]
node.RxnFY[combo.name] += spring_F[1, 0]
node.RxnFZ[combo.name] += spring_F[2, 0]
node.RxnMX[combo.name] += spring_F[3, 0]
node.RxnMY[combo.name] += spring_F[4, 0]
node.RxnMZ[combo.name] += spring_F[5, 0]
elif spring.jNode == node and spring.active[combo.name] == True:
# Get the spring's global force matrix
# Storing it as a local variable eliminates the need to rebuild it every time a term is needed
spring_F = spring.F(combo.name)
node.RxnFX[combo.name] += spring_F[6, 0]
node.RxnFY[combo.name] += spring_F[7, 0]
node.RxnFZ[combo.name] += spring_F[8, 0]
node.RxnMX[combo.name] += spring_F[9, 0]
node.RxnMY[combo.name] += spring_F[10, 0]
node.RxnMZ[combo.name] += spring_F[11, 0]
# Sum the member end forces at the node
for member in self.Members:
if member.iNode == node and member.active[combo.name] == True:
# Get the member's global force matrix
# Storing it as a local variable eliminates the need to rebuild it every time a term is needed
member_F = member.F(combo.name)
node.RxnFX[combo.name] += member_F[0, 0]
node.RxnFY[combo.name] += member_F[1, 0]
node.RxnFZ[combo.name] += member_F[2, 0]
node.RxnMX[combo.name] += member_F[3, 0]
node.RxnMY[combo.name] += member_F[4, 0]
node.RxnMZ[combo.name] += member_F[5, 0]
elif member.jNode == node and member.active[combo.name] == True:
# Get the member's global force matrix
# Storing it as a local variable eliminates the need to rebuild it every time a term is needed
member_F = member.F(combo.name)
node.RxnFX[combo.name] += member_F[6, 0]
node.RxnFY[combo.name] += member_F[7, 0]
node.RxnFZ[combo.name] += member_F[8, 0]
node.RxnMX[combo.name] += member_F[9, 0]
node.RxnMY[combo.name] += member_F[10, 0]
node.RxnMZ[combo.name] += member_F[11, 0]
# Sum the plate forces at the node
for plate in self.Plates:
if plate.iNode == node:
# Get the plate's global force matrix
# Storing it as a local variable eliminates the need to rebuild it every time a term is needed
plate_F = plate.F(combo.name)
node.RxnFX[combo.name] += plate_F[0, 0]
node.RxnFY[combo.name] += plate_F[1, 0]
node.RxnFZ[combo.name] += plate_F[2, 0]
node.RxnMX[combo.name] += plate_F[3, 0]
node.RxnMY[combo.name] += plate_F[4, 0]
node.RxnMZ[combo.name] += plate_F[5, 0]
elif plate.jNode == node:
# Get the plate's global force matrix
# Storing it as a local variable eliminates the need to rebuild it every time a term is needed
plate_F = plate.F(combo.name)
node.RxnFX[combo.name] += plate_F[18, 0]
node.RxnFY[combo.name] += plate_F[19, 0]
node.RxnFZ[combo.name] += plate_F[20, 0]
node.RxnMX[combo.name] += plate_F[21, 0]
node.RxnMY[combo.name] += plate_F[22, 0]
node.RxnMZ[combo.name] += plate_F[23, 0]
elif plate.mNode == node:
# Get the plate's global force matrix
# Storing it as a local variable eliminates the need to rebuild it every time a term is needed
plate_F = plate.F(combo.name)
node.RxnFX[combo.name] += plate_F[12, 0]
node.RxnFY[combo.name] += plate_F[13, 0]
node.RxnFZ[combo.name] += plate_F[14, 0]
node.RxnMX[combo.name] += plate_F[15, 0]
node.RxnMY[combo.name] += plate_F[16, 0]
node.RxnMZ[combo.name] += plate_F[17, 0]
elif plate.nNode == node:
# Get the plate's global force matrix
# Storing it as a local variable eliminates the need to rebuild it every time a term is needed
plate_F = plate.F(combo.name)
node.RxnFX[combo.name] += plate_F[6, 0]
node.RxnFY[combo.name] += plate_F[7, 0]
node.RxnFZ[combo.name] += plate_F[8, 0]
node.RxnMX[combo.name] += plate_F[9, 0]
node.RxnMY[combo.name] += plate_F[10, 0]
node.RxnMZ[combo.name] += plate_F[11, 0]
# Sum the quad forces at the node
for quad in self.Quads:
if quad.iNode == node:
# Get the quad's global force matrix
# Storing it as a local variable eliminates the need to rebuild it every time a term is needed
quad_F = quad.F(combo.name)
node.RxnFX[combo.name] += quad_F[12, 0]
node.RxnFY[combo.name] += quad_F[13, 0]
node.RxnFZ[combo.name] += quad_F[14, 0]
node.RxnMX[combo.name] += quad_F[15, 0]
node.RxnMY[combo.name] += quad_F[16, 0]
node.RxnMZ[combo.name] += quad_F[17, 0]
elif quad.jNode == node:
# Get the quad's global force matrix
# Storing it as a local variable eliminates the need to rebuild it every time a term is needed
quad_F = quad.F(combo.name)
node.RxnFX[combo.name] += quad_F[18, 0]
node.RxnFY[combo.name] += quad_F[19, 0]
node.RxnFZ[combo.name] += quad_F[20, 0]
node.RxnMX[combo.name] += quad_F[21, 0]
node.RxnMY[combo.name] += quad_F[22, 0]
node.RxnMZ[combo.name] += quad_F[23, 0]
elif quad.mNode == node:
# Get the quad's global force matrix
# Storing it as a local variable eliminates the need to rebuild it every time a term is needed
quad_F = quad.F(combo.name)
node.RxnFX[combo.name] += quad_F[0, 0]
node.RxnFY[combo.name] += quad_F[1, 0]
node.RxnFZ[combo.name] += quad_F[2, 0]
node.RxnMX[combo.name] += quad_F[3, 0]
node.RxnMY[combo.name] += quad_F[4, 0]
node.RxnMZ[combo.name] += quad_F[5, 0]
elif quad.nNode == node:
# Get the quad's global force matrix
# Storing it as a local variable eliminates the need to rebuild it every time a term is needed
quad_F = quad.F(combo.name)
node.RxnFX[combo.name] += quad_F[6, 0]
node.RxnFY[combo.name] += quad_F[7, 0]
node.RxnFZ[combo.name] += quad_F[8, 0]
node.RxnMX[combo.name] += quad_F[9, 0]
node.RxnMY[combo.name] += quad_F[10, 0]
node.RxnMZ[combo.name] += quad_F[11, 0]
# Sum the joint forces at the node
for load in node.NodeLoads:
if load[0] == 'FX':
node.RxnFX[combo.name] -= load[1]
elif load[0] == 'FY':
node.RxnFY[combo.name] -= load[1]
elif load[0] == 'FZ':
node.RxnFZ[combo.name] -= load[1]
elif load[0] == 'MX':
node.RxnMX[combo.name] -= load[1]
elif load[0] == 'MY':
node.RxnMY[combo.name] -= load[1]
elif load[0] == 'MZ':
node.RxnMZ[combo.name] -= load[1]
#%%
def __CheckStatics(self):
'''
Checks static equilibrium and prints results to the console.
Parameters
----------
precision : number
The number of decimal places to carry the results to.
'''
print('+----------------+')
print('| Statics Check: |')
print('+----------------+')
print('')
from prettytable import PrettyTable
# Start a blank table and create a header row
statics_table = PrettyTable()
statics_table.field_names = ['Load Combination', 'Sum FX', 'Sum RX', 'Sum FY', 'Sum RY', 'Sum FZ', 'Sum RZ', 'Sum MX', 'Sum RMX', 'Sum MY', 'Sum RMY', 'Sum MZ', 'Sum RMZ']
# Step through each load combination
for combo in self.LoadCombos.values():
# Initialize force and moment summations to zero
SumFX, SumFY, SumFZ = 0.0, 0.0, 0.0
SumMX, SumMY, SumMZ = 0.0, 0.0, 0.0
SumRFX, SumRFY, SumRFZ = 0.0, 0.0, 0.0
SumRMX, SumRMY, SumRMZ = 0.0, 0.0, 0.0
# Get the global force vector and the global fixed end reaction vector
P = self.P(combo.name)
FER = self.FER(combo.name)
# Step through each node and sum its forces
for node in self.Nodes:
# Get the node's coordinates
X = node.X
Y = node.Y
Z = node.Z
# Get the nodal forces
FX = P[node.ID*6+0][0] - FER[node.ID*6+0][0]
FY = P[node.ID*6+1][0] - FER[node.ID*6+1][0]
FZ = P[node.ID*6+2][0] - FER[node.ID*6+2][0]
MX = P[node.ID*6+3][0] - FER[node.ID*6+3][0]
MY = P[node.ID*6+4][0] - FER[node.ID*6+4][0]
MZ = P[node.ID*6+5][0] - FER[node.ID*6+5][0]
# Get the nodal reactions
RFX = node.RxnFX[combo.name]
RFY = node.RxnFY[combo.name]
RFZ = node.RxnFZ[combo.name]
RMX = node.RxnMX[combo.name]
RMY = node.RxnMY[combo.name]
RMZ = node.RxnMZ[combo.name]
# Sum the global forces
SumFX += FX
SumFY += FY
SumFZ += FZ
SumMX += MX - FY*Z + FZ*Y
SumMY += MY + FX*Z - FZ*X
SumMZ += MZ - FX*Y + FY*X
# Sum the global reactions
SumRFX += RFX
SumRFY += RFY
SumRFZ += RFZ
SumRMX += RMX - RFY*Z + RFZ*Y
SumRMY += RMY + RFX*Z - RFZ*X
SumRMZ += RMZ - RFX*Y + RFY*X
# Add the results to the table
statics_table.add_row([combo.name, '{:.3g}'.format(SumFX), '{:.3g}'.format(SumRFX),
'{:.3g}'.format(SumFY), '{:.3g}'.format(SumRFY),
'{:.3g}'.format(SumFZ), '{:.3g}'.format(SumRFZ),
'{:.3g}'.format(SumMX), '{:.3g}'.format(SumRMX),
'{:.3g}'.format(SumMY), '{:.3g}'.format(SumRMY),
'{:.3g}'.format(SumMZ), '{:.3g}'.format(SumRMZ)])
# Print the static check table
print(statics_table)
print('')
|
py | 1a476bdc613d9ff3df8c999095969ee363e078df | import pytest
from bispy.utilities.graph_entities import (
_QBlock,
_Vertex,
_Edge,
)
from typing import Set, Tuple, List
import networkx as nx
from bispy.saha.ranked_pta import ranked_split
from bispy.paige_tarjan.paige_tarjan import paige_tarjan
from bispy.saha.saha import add_edge
from bispy.utilities.graph_decorator import decorate_nx_graph
def partition_to_integer(partition: List[_QBlock]) -> Set[Set[int]]:
return set(
frozenset(vertex.label for vertex in block.vertexes)
for block in filter(lambda b: b.vertexes.size > 0, partition)
)
def integer_to_partition(
partition: List[Tuple], vertexes: List[_Vertex]
) -> List[_QBlock]:
qblocks = []
for block in partition:
qblocks.append(_QBlock([vertexes[i] for i in block], None))
return qblocks
def test_resets_aux_count():
g = nx.DiGraph()
g.add_nodes_from(range(5))
g.add_edges_from([(0, 1), (0, 2), (3, 1), (3, 2), (4, 1), (4, 2), (4, 3)])
vertexes, _ = decorate_nx_graph(g)
integer_partition = paige_tarjan(g)
q_partition = integer_to_partition(integer_partition, vertexes)
# now we modify the graph
add_edge(vertexes[3], vertexes[0])
# find [v]
modified_destination_block = None
for block in q_partition:
for vertex in block.vertexes:
if vertex.label == 0:
modified_destination_block = block
break
ranked_split(q_partition, modified_destination_block, 2)
for vx in vertexes:
assert not hasattr(vx, "aux_count") or vx.aux_count is None
def test_ranked_split():
g = nx.DiGraph()
g.add_nodes_from(range(5))
g.add_edges_from([(0, 1), (0, 2), (3, 1), (3, 2), (4, 1), (4, 2), (4, 3)])
vertexes, _ = decorate_nx_graph(g)
integer_partition = paige_tarjan(g)
q_partition = integer_to_partition(integer_partition, vertexes)
# now we modify the graph
add_edge(vertexes[3], vertexes[0])
# find [v]
modified_destination_block = None
for block in q_partition:
for vertex in block.vertexes:
if vertex.label == 0:
modified_destination_block = block
break
ranked_split(q_partition, modified_destination_block, 2)
final_integer_partition = partition_to_integer(q_partition)
assert final_integer_partition == set(
[frozenset([0]), frozenset([1, 2]), frozenset([3]), frozenset([4])]
)
|
py | 1a476d496790ee6a0b38b1aa74ea4d16157645f8 | # -*- coding: utf-8 -*-
"""
Fast Kalman Filter attitude estimation
======================================
References
----------
.. [Guo] Siwen Guo, Jin Wu, Zuocai Wang, and Jide Qian, "Novel MARG-Sensor
Orientation Estimation Algorithm Using Fast Kalman Filter." Journal of
Sensors, vol. 2017, Article ID 8542153, 12 pages.
https://doi.org/10.1155/2017/8542153 and https://github.com/zarathustr/FKF
"""
import numpy as np
from ahrs.common.orientation import *
from ahrs.common import DEG2RAD
class FKF:
"""
Class of Fast Kalman Filter algorithm
Parameters
----------
acc : array
Sample of tri-axial Accelerometer.
mag : array
Sample of tri-axial Magnetometer.
"""
def __init__(self, acc: np.ndarray = None, mag: np.ndarray = None, **kwargs):
self.q = np.array([1.0, 0.0, 0.0, 0.0])
self.Ar = np.array([0.0, 0.0, 1.0])
self.Mr = np.array([0.0, 0.0, 1.0])
def update(self, acc, mag):
"""
FKF algorithm with a 6-axis Accelerometer-Magnetometer architecture.
Parameters
----------
acc : array
Sample of tri-axial Accelerometer.
mag : array
Sample of tri-axial Magnetometer.
Returns
-------
q : array
Estimated quaternion.
"""
Ab = acc.copy()
Mb = mag.copy()
return self.q
|
py | 1a476db7cd76d7ae200f6910321aa02a2137810d | import cv2
import numpy as np
def detect_face(net, frame, conf_threshold=0.7):
# Siapkan input image
h, w, c = frame.shape
blob = cv2.dnn.blobFromImage(frame, 1.0, (300, 300), [104, 117, 123], False, False)
# Feedforward
# prediksi dari SSD mengeluarkan output (1, 1, N, 7)
# 7 output tersebut merupakan: image_id, label, conf, x_min, y_min, x_max, y_max
net.setInput(blob)
detections = net.forward()
# Filter prediksi yang low confidence
bbox = []
for _, _, conf, x1, y1, x2, y2 in detections[0, 0]:
if conf > conf_threshold:
box = np.array([x1, y1, x2, y2]) * [w, h, w, h]
bbox.append(box.astype(int))
return bbox
def normalize_image(img):
mean = img.reshape(-1, 3).mean(0).reshape(1, 1, -1)
std = img.reshape(-1, 3).std(0).reshape(1, 1, -1)
img = (img - mean) / std
img = (np.clip(img, [-4, -4, -4], [4, 4, 4]) + 4) / 8
img = (img*255).astype(np.uint8)
return img
def calculate_skin_percent(face, min_val=(90, 100, 110), max_val=(150, 150, 150)):
face = normalize_image(face)
min_val = np.array(min_val, dtype=np.uint8)
max_val = np.array(max_val, dtype=np.uint8)
skin = ((face >= min_val) & (face <= max_val)).all(2)
skin_percent = skin.mean()
return skin_percent |
py | 1a476de9dfd09f30c3371f3dbb98e85f16cc8a05 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-08-11 17:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0005_option_votes'),
]
operations = [
migrations.AlterField(
model_name='option',
name='details',
field=models.CharField(help_text='Episodes watched (Ep.1, Eps. 1-3), etc.', max_length=200, null=True),
),
migrations.AlterField(
model_name='poll',
name='description',
field=models.CharField(help_text='Description of the poll', max_length=50, null=True),
),
migrations.AlterField(
model_name='poll',
name='name',
field=models.CharField(help_text='Enter name of poll', max_length=30),
),
]
|
py | 1a476fae70439fa39efdf14392cf089b1cc5cdd7 | # Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import os
import sys
import unittest
import fundamental_tester_base
from pyplusplus import code_creators
class tester_t(fundamental_tester_base.fundamental_tester_base_t):
EXTENSION_NAME = 'duplicate_aliases'
def __init__( self, *args ):
fundamental_tester_base.fundamental_tester_base_t.__init__(
self
, tester_t.EXTENSION_NAME
, *args )
def customize(self, mb):
classes = mb.classes( lambda decl: 'duplicate_aliases' in decl.name )
classes.alias = 'duplicate_aliases'
classes.wrapper_alias = 'wrapper_duplicate_aliases'
def run_tests( self, module):
#check compilation
pass
def create_suite():
suite = unittest.TestSuite()
suite.addTest( unittest.makeSuite(tester_t))
return suite
def run_suite():
unittest.TextTestRunner(verbosity=2).run( create_suite() )
if __name__ == "__main__":
run_suite()
|
py | 1a4770867afeb94a76d8dd47245efaad59b444e3 | from easydict import EasyDict
cartpole_iqn_config = dict(
env=dict(
collector_env_num=8,
evaluator_env_num=5,
n_evaluator_episode=5,
stop_value=195,
),
policy=dict(
cuda=False,
on_policy=False,
priority=True,
model=dict(
obs_shape=4,
action_shape=2,
encoder_hidden_size_list=[128, 128, 64],
num_quantiles=32,
),
discount_factor=0.97,
nstep=3,
learn=dict(
update_per_collect=3,
batch_size=64,
learning_rate=0.001,
target_update_freq=100,
kappa=1.0,
),
collect=dict(
n_sample=80,
unroll_len=1,
),
other=dict(
eps=dict(
type='exp',
start=0.95,
end=0.1,
decay=10000,
), replay_buffer=dict(replay_buffer_size=20000, )
),
),
)
cartpole_iqn_config = EasyDict(cartpole_iqn_config)
main_config = cartpole_iqn_config
cartpole_iqn_create_config = dict(
env=dict(
type='cartpole',
import_names=['dizoo.classic_control.cartpole.envs.cartpole_env'],
),
env_manager=dict(type='base'),
policy=dict(type='iqn'),
)
cartpole_iqn_create_config = EasyDict(cartpole_iqn_create_config)
create_config = cartpole_iqn_create_config
|
py | 1a4770909ed65743f4eb3301f9568472c62f85b0 | #!/usr/bin/env python
#
# This example can be used to demonstrate pvaPy server/client channel
# monitoring
#
# Run server.py in one window, and client.py in another one.
#
import sys
import time
from pvaccess import Channel
from collections import OrderedDict
class ClientMonitor:
def __init__(self, name):
self.name = name
self.value = 0
self.nReceived = 0
self.nMissed = 0
self.percentageMissed = 0
self.startTime = 0
self.receiveRate = 0
def toString(self):
return '%6s: Received: %7d (%6.2f [kHz]); Missed: %7d (%6.2f%%)' % (self.name, self.nReceived, self.receiveRateKHz, self.nMissed, self.percentageMissed)
def monitor(self, pv):
oldValue = self.value
self.value = pv['c']
self.nReceived += 1
diff = self.value - oldValue
if oldValue > 0:
self.nMissed += diff-1
else:
self.startTime = time.time()
if self.nReceived % 10000 == 0:
currentTime = time.time()
deltaT = currentTime - self.startTime
self.receiveRateKHz = self.nReceived/deltaT/1000.0
self.percentageMissed = (self.nMissed*100.0)/(self.nReceived+self.nMissed)
if self.nReceived % 100000 == 0:
print(self.toString())
if __name__ == '__main__':
runtime = 60
if len(sys.argv) > 1:
runtime = float(sys.argv[1])
channelName = 'counter'
c = Channel(channelName)
#print('CONNECT TO %s:\n%s\n' % (channelName, c.get()))
m = ClientMonitor(channelName)
t0 = time.time()
print('STARTING MONITOR for %s at %s\n' % (channelName, t0))
#c.monitor(m.monitor)
c.monitor(m.monitor, 'field(c)')
time.sleep(runtime)
c.stopMonitor()
t1 = time.time()
deltaT = t1-t0
print('STOP MONITOR at %s\n' % t1)
print('FINAL STATS:')
print(m.toString())
print('')
print('RUNTIME: %.2f [s]' % (deltaT))
print('\nDONE')
|
py | 1a47713a0531c57bcc413e90552d434bfc7a3455 | # -*- coding: utf-8 -*-
"""
drftoolbox.views
~~~~~~~~~~~~~~~~
This module defines view classes used by the API
:copyright: (c) 2018 by Medical Decisions LLC
"""
import functools
import json
import logging
import re
from django.contrib.auth import get_user_model
from rest_framework import generics
from jose import jwt as jose_jwt, exceptions as jose_exceptions
from drftoolbox.serializers import UserKMSKeySerializer
LOGGER = logging.getLogger(__name__)
class BaseUserKMSKeyView(generics.RetrieveAPIView):
queryset = get_user_model().objects.filter(is_active=True)
serializer_class = UserKMSKeySerializer
def http_sign_class(self):
raise NotImplementedError
class RequestLoggingViewMixin(object):
REQUEST_LOGGING_LOGGER = LOGGER
REQUEST_LOGGING_LEVEL = logging.INFO
REQUEST_LOGGING_OBFUSCATE_PATTERN = re.compile(r'.*(authorization|cookie)$', re.I)
@classmethod
def obfuscate(cls, value):
result = []
for section in str(value).split('; '):
# try handling the value as a cookie, and if so see if we can
# only obfuscate the value parts of that cookie, however if not
# a cookie just fall back to obfuscating everything after the
# first 6 chars
parts = section.split('=', 1)
k = parts[0] if len(parts) > 1 else ''
v = parts[-1]
result.append(f'{k} {v[:6]}...'.strip())
return ' '.join(result)
@classmethod
def request_logging(cls, request):
"""
utility method to log the details of a request
"""
log = functools.partial(cls.REQUEST_LOGGING_LOGGER.log, cls.REQUEST_LOGGING_LEVEL)
pattern = cls.REQUEST_LOGGING_OBFUSCATE_PATTERN
data, headers = {}, {}
for k, v in request.data.items():
if pattern.match(k):
v = cls.obfuscate(v)
data[k] = v
for k, v in request._request.headers.items(): # pylint: disable=protected-access
if pattern.match(k):
try:
token = v.split()[-1]
v = {
'jwt_headers': jose_jwt.get_unverified_header(token),
'jwt_claims': jose_jwt.get_unverified_claims(token),
}
except (jose_exceptions.JOSEError, IndexError):
v = cls.obfuscate(v)
headers[k] = v
msg = {
'path': request._request.path, # pylint: disable=protected-access
'query params': dict(request.query_params),
'data': data,
'headers': headers,
}
log(f'REQUEST => {json.dumps(msg, indent=2)}')
def initialize_request(self, *args, **kwargs):
request = super().initialize_request(*args, **kwargs)
self.request_logging(request)
return request
|
py | 1a477177db1fc6daea92f3094a8f6b4bfb1ddcc2 | """
Dialog for building Tkinter accelerator key bindings
"""
from Tkinter import *
import tkMessageBox
import string, os
class GetKeysDialog(Toplevel):
def __init__(self,parent,title,action,currentKeySequences):
"""
action - string, the name of the virtual event these keys will be
mapped to
currentKeys - list, a list of all key sequence lists currently mapped
to virtual events, for overlap checking
"""
Toplevel.__init__(self, parent)
self.configure(borderwidth=5)
self.resizable(height=FALSE,width=FALSE)
self.title(title)
self.transient(parent)
self.grab_set()
self.protocol("WM_DELETE_WINDOW", self.Cancel)
self.parent = parent
self.action=action
self.currentKeySequences=currentKeySequences
self.result=''
self.keyString=StringVar(self)
self.keyString.set('')
self.SetModifiersForPlatform() # set self.modifiers, self.modifier_label
self.modifier_vars = []
for modifier in self.modifiers:
variable = StringVar(self)
variable.set('')
self.modifier_vars.append(variable)
self.advanced = False
self.CreateWidgets()
self.LoadFinalKeyList()
self.withdraw() #hide while setting geometry
self.update_idletasks()
self.geometry("+%d+%d" %
((parent.winfo_rootx()+((parent.winfo_width()/2)
-(self.winfo_reqwidth()/2)),
parent.winfo_rooty()+((parent.winfo_height()/2)
-(self.winfo_reqheight()/2)) )) ) #centre dialog over parent
self.deiconify() #geometry set, unhide
self.wait_window()
def CreateWidgets(self):
frameMain = Frame(self,borderwidth=2,relief=SUNKEN)
frameMain.pack(side=TOP,expand=TRUE,fill=BOTH)
frameButtons=Frame(self)
frameButtons.pack(side=BOTTOM,fill=X)
self.buttonOK = Button(frameButtons,text='OK',
width=8,command=self.OK)
self.buttonOK.grid(row=0,column=0,padx=5,pady=5)
self.buttonCancel = Button(frameButtons,text='Cancel',
width=8,command=self.Cancel)
self.buttonCancel.grid(row=0,column=1,padx=5,pady=5)
self.frameKeySeqBasic = Frame(frameMain)
self.frameKeySeqAdvanced = Frame(frameMain)
self.frameControlsBasic = Frame(frameMain)
self.frameHelpAdvanced = Frame(frameMain)
self.frameKeySeqAdvanced.grid(row=0,column=0,sticky=NSEW,padx=5,pady=5)
self.frameKeySeqBasic.grid(row=0,column=0,sticky=NSEW,padx=5,pady=5)
self.frameKeySeqBasic.lift()
self.frameHelpAdvanced.grid(row=1,column=0,sticky=NSEW,padx=5)
self.frameControlsBasic.grid(row=1,column=0,sticky=NSEW,padx=5)
self.frameControlsBasic.lift()
self.buttonLevel = Button(frameMain,command=self.ToggleLevel,
text='Advanced Key Binding Entry >>')
self.buttonLevel.grid(row=2,column=0,stick=EW,padx=5,pady=5)
labelTitleBasic = Label(self.frameKeySeqBasic,
text="New keys for '"+self.action+"' :")
labelTitleBasic.pack(anchor=W)
labelKeysBasic = Label(self.frameKeySeqBasic,justify=LEFT,
textvariable=self.keyString,relief=GROOVE,borderwidth=2)
labelKeysBasic.pack(ipadx=5,ipady=5,fill=X)
self.modifier_checkbuttons = {}
column = 0
for modifier, variable in zip(self.modifiers, self.modifier_vars):
label = self.modifier_label.get(modifier, modifier)
check=Checkbutton(self.frameControlsBasic,
command=self.BuildKeyString,
text=label,variable=variable,onvalue=modifier,offvalue='')
check.grid(row=0,column=column,padx=2,sticky=W)
self.modifier_checkbuttons[modifier] = check
column += 1
labelFnAdvice=Label(self.frameControlsBasic,justify=LEFT,
text=\
"Select the desired modifier keys\n"+
"above, and the final key from the\n"+
"list on the right.\n\n" +
"Use upper case Symbols when using\n" +
"the Shift modifier. (Letters will be\n" +
"converted automatically.)")
labelFnAdvice.grid(row=1,column=0,columnspan=4,padx=2,sticky=W)
self.listKeysFinal=Listbox(self.frameControlsBasic,width=15,height=10,
selectmode=SINGLE)
self.listKeysFinal.bind('<ButtonRelease-1>',self.FinalKeySelected)
self.listKeysFinal.grid(row=0,column=4,rowspan=4,sticky=NS)
scrollKeysFinal=Scrollbar(self.frameControlsBasic,orient=VERTICAL,
command=self.listKeysFinal.yview)
self.listKeysFinal.config(yscrollcommand=scrollKeysFinal.set)
scrollKeysFinal.grid(row=0,column=5,rowspan=4,sticky=NS)
self.buttonClear=Button(self.frameControlsBasic,
text='Clear Keys',command=self.ClearKeySeq)
self.buttonClear.grid(row=2,column=0,columnspan=4)
labelTitleAdvanced = Label(self.frameKeySeqAdvanced,justify=LEFT,
text="Enter new binding(s) for '"+self.action+"' :\n"+
"(These bindings will not be checked for validity!)")
labelTitleAdvanced.pack(anchor=W)
self.entryKeysAdvanced=Entry(self.frameKeySeqAdvanced,
textvariable=self.keyString)
self.entryKeysAdvanced.pack(fill=X)
labelHelpAdvanced=Label(self.frameHelpAdvanced,justify=LEFT,
text="Key bindings are specified using Tkinter keysyms as\n"+
"in these samples: <Control-f>, <Shift-F2>, <F12>,\n"
"<Control-space>, <Meta-less>, <Control-Alt-Shift-X>.\n"
"Upper case is used when the Shift modifier is present!\n\n" +
"'Emacs style' multi-keystroke bindings are specified as\n" +
"follows: <Control-x><Control-y>, where the first key\n" +
"is the 'do-nothing' keybinding.\n\n" +
"Multiple separate bindings for one action should be\n"+
"separated by a space, eg., <Alt-v> <Meta-v>." )
labelHelpAdvanced.grid(row=0,column=0,sticky=NSEW)
def SetModifiersForPlatform(self):
"""Determine list of names of key modifiers for this platform.
The names are used to build Tk bindings -- it doesn't matter if the
keyboard has these keys, it matters if Tk understands them. The
order is also important: key binding equality depends on it, so
config-keys.def must use the same ordering.
"""
import sys
if sys.platform == 'darwin' and sys.argv[0].count('.app'):
self.modifiers = ['Shift', 'Control', 'Option', 'Command']
else:
self.modifiers = ['Control', 'Alt', 'Shift']
self.modifier_label = {'Control': 'Ctrl'} # short name
def ToggleLevel(self):
if self.buttonLevel.cget('text')[:8]=='Advanced':
self.ClearKeySeq()
self.buttonLevel.config(text='<< Basic Key Binding Entry')
self.frameKeySeqAdvanced.lift()
self.frameHelpAdvanced.lift()
self.entryKeysAdvanced.focus_set()
self.advanced = True
else:
self.ClearKeySeq()
self.buttonLevel.config(text='Advanced Key Binding Entry >>')
self.frameKeySeqBasic.lift()
self.frameControlsBasic.lift()
self.advanced = False
def FinalKeySelected(self,event):
self.BuildKeyString()
def BuildKeyString(self):
keyList = modifiers = self.GetModifiers()
finalKey = self.listKeysFinal.get(ANCHOR)
if finalKey:
finalKey = self.TranslateKey(finalKey, modifiers)
keyList.append(finalKey)
self.keyString.set('<' + string.join(keyList,'-') + '>')
def GetModifiers(self):
modList = [variable.get() for variable in self.modifier_vars]
return filter(None, modList)
def ClearKeySeq(self):
self.listKeysFinal.select_clear(0,END)
self.listKeysFinal.yview(MOVETO, '0.0')
for variable in self.modifier_vars:
variable.set('')
self.keyString.set('')
def LoadFinalKeyList(self):
#these tuples are also available for use in validity checks
self.functionKeys=('F1','F2','F2','F4','F5','F6','F7','F8','F9',
'F10','F11','F12')
self.alphanumKeys=tuple(string.ascii_lowercase+string.digits)
self.punctuationKeys=tuple('~!@#%^&*()_-+={}[]|;:,.<>/?')
self.whitespaceKeys=('Tab','Space','Return')
self.editKeys=('BackSpace','Delete','Insert')
self.moveKeys=('Home','End','Page Up','Page Down','Left Arrow',
'Right Arrow','Up Arrow','Down Arrow')
#make a tuple of most of the useful common 'final' keys
keys=(self.alphanumKeys+self.punctuationKeys+self.functionKeys+
self.whitespaceKeys+self.editKeys+self.moveKeys)
self.listKeysFinal.insert(END, *keys)
def TranslateKey(self, key, modifiers):
"Translate from keycap symbol to the Tkinter keysym"
translateDict = {'Space':'space',
'~':'asciitilde','!':'exclam','@':'at','#':'numbersign',
'%':'percent','^':'asciicircum','&':'ampersand','*':'asterisk',
'(':'parenleft',')':'parenright','_':'underscore','-':'minus',
'+':'plus','=':'equal','{':'braceleft','}':'braceright',
'[':'bracketleft',']':'bracketright','|':'bar',';':'semicolon',
':':'colon',',':'comma','.':'period','<':'less','>':'greater',
'/':'slash','?':'question','Page Up':'Prior','Page Down':'Next',
'Left Arrow':'Left','Right Arrow':'Right','Up Arrow':'Up',
'Down Arrow': 'Down', 'Tab':'Tab'}
if key in translateDict.keys():
key = translateDict[key]
if 'Shift' in modifiers and key in string.ascii_lowercase:
key = key.upper()
key = 'Key-' + key
return key
def OK(self, event=None):
if self.advanced or self.KeysOK(): # doesn't check advanced string yet
self.result=self.keyString.get()
self.destroy()
def Cancel(self, event=None):
self.result=''
self.destroy()
def KeysOK(self):
'''Validity check on user's 'basic' keybinding selection.
Doesn't check the string produced by the advanced dialog because
'modifiers' isn't set.
'''
keys = self.keyString.get()
keys.strip()
finalKey = self.listKeysFinal.get(ANCHOR)
modifiers = self.GetModifiers()
# create a key sequence list for overlap check:
keySequence = keys.split()
keysOK = False
title = 'Key Sequence Error'
if not keys:
tkMessageBox.showerror(title=title, parent=self,
message='No keys specified.')
elif not keys.endswith('>'):
tkMessageBox.showerror(title=title, parent=self,
message='Missing the final Key')
elif (not modifiers
and finalKey not in self.functionKeys + self.moveKeys):
tkMessageBox.showerror(title=title, parent=self,
message='No modifier key(s) specified.')
elif (modifiers == ['Shift']) \
and (finalKey not in
self.functionKeys + self.moveKeys + ('Tab', 'Space')):
msg = 'The shift modifier by itself may not be used with'\
' this key symbol.'
tkMessageBox.showerror(title=title, parent=self, message=msg)
elif keySequence in self.currentKeySequences:
msg = 'This key combination is already in use.'
tkMessageBox.showerror(title=title, parent=self, message=msg)
else:
keysOK = True
return keysOK
if __name__ == '__main__':
#test the dialog
root=Tk()
def run():
keySeq=''
dlg=GetKeysDialog(root,'Get Keys','find-again',[])
print dlg.result
Button(root,text='Dialog',command=run).pack()
root.mainloop()
|
py | 1a4771f0bcc082903de994caed18c8cf0ee234fe | from django.db.models import CharField, Expression
from psycopg2.sql import Identifier, Literal, SQL
from usaspending_api.common.helpers.sql_helpers import convert_composable_query_to_string
from usaspending_api.recipient.models import RecipientLookup, RecipientProfile
from usaspending_api.recipient.v2.lookups import SPECIAL_CASES
def obtain_recipient_uri(recipient_name, recipient_unique_id, parent_recipient_unique_id, is_parent_recipient=False):
""" Return a valid string to be used for api/v2/recipient/duns/<recipient-hash>/ (or None)
Keyword Arguments:
recipient_name -- Legal Entity Name from the record
recipient_unique_id -- DUNS from the record
parent_recipient_unique_id -- parent DUNS from the record
is_parent_recipient -- boolean flag to force the recipient level to be "P" (default False)
By the nature of transaction records, the listed recipient can only be "R" or "C"
This flag is for the parent recipient link (as appropriate)
Return example string: 11fcdf15-3490-cdad-3df4-3b410f3d9b20-C
"""
if (is_parent_recipient and not recipient_unique_id) or not (recipient_unique_id or recipient_name):
return None
if recipient_unique_id:
recipient_hash = fetch_recipient_hash_using_duns(recipient_unique_id)
else:
recipient_hash = None
if recipient_hash is None:
recipient_hash = generate_missing_recipient_hash(recipient_unique_id, recipient_name)
recipient_level = obtain_recipient_level(
{
"duns": recipient_unique_id,
"parent_duns": parent_recipient_unique_id,
"is_parent_recipient": is_parent_recipient,
}
)
# Confirm that a recipient profile exists for the recipient information we have collected/generated.
if RecipientProfile.objects.filter(recipient_hash=recipient_hash, recipient_level=recipient_level).exists():
return combine_recipient_hash_and_level(recipient_hash, recipient_level)
return None
def generate_missing_recipient_hash(recipient_unique_id, recipient_name):
# SQL: MD5(UPPER(
# CASE
# WHEN awardee_or_recipient_uniqu IS NOT NULL THEN CONCAT('duns-', awardee_or_recipient_uniqu)
# ELSE CONCAT('name-', awardee_or_recipient_legal) END
# ))::uuid AS recipient_hash,
import hashlib
import uuid
if recipient_unique_id is None:
prefix = "name"
value = recipient_name
else:
prefix = "duns"
value = recipient_unique_id
return str(uuid.UUID(hashlib.md5(f"{prefix}-{value}".upper().encode("utf-8")).hexdigest()))
def fetch_recipient_hash_using_duns(recipient_unique_id):
recipient = RecipientLookup.objects.filter(duns=recipient_unique_id).values("recipient_hash").first()
return recipient["recipient_hash"] if recipient else None
def obtain_recipient_level(recipient_record: dict) -> str:
level = None
if recipient_is_parent(recipient_record):
level = "P"
elif recipient_is_standalone(recipient_record):
level = "R"
elif recipient_is_child(recipient_record):
level = "C"
return level
def recipient_is_parent(recipient_record: dict) -> bool:
return recipient_record["is_parent_recipient"]
def recipient_is_standalone(recipient_record: dict) -> bool:
return recipient_record["parent_duns"] is None
def recipient_is_child(recipient_record: dict) -> bool:
return recipient_record["parent_duns"] is not None
def combine_recipient_hash_and_level(recipient_hash, recipient_level):
return f"{recipient_hash}-{recipient_level.upper()}"
def _annotate_recipient_id(field_name, queryset, annotation_sql):
"""
Add recipient id (recipient hash + recipient level) to a queryset. The assumption here is that
the queryset is based on a data source that contains recipient_unique_id and
parent_recipient_unique_id which, currently, all of our advanced search materialized views do.
"""
class RecipientId(Expression):
"""
Used to graft a subquery into a queryset that can build recipient ids.
This is a bit less than ideal, but I just couldn't construct an ORM query to mimic this
logic. There are several issues including but not limited to:
- There are currently no relations between these tables in the Django ORM which makes
joining them... challenging.
- Adding relations to the ORM changes how the fields behave making this a much bigger
enhancement than originally planned.
- When I did add relations to the ORM, I couldn't figure out how to make the Django
OuterRef expression check for nulls since the subquery needs to check to see if the
parent_recipient_unique_id in the outer query is null.
Anyhow, this works and is encapsulated so if someone smart figures out how to use pure ORM,
it should be easy to patch in.
"""
def __init__(self):
super(RecipientId, self).__init__(CharField())
def as_sql(self, compiler, connection):
return (
convert_composable_query_to_string(
SQL(annotation_sql).format(
outer_table=Identifier(compiler.query.model._meta.db_table),
special_cases=Literal(tuple(sc for sc in SPECIAL_CASES)),
)
),
[],
)
return queryset.annotate(**{field_name: RecipientId()})
def annotate_recipient_id(field_name, queryset):
return _annotate_recipient_id(
field_name,
queryset,
"""(
select
rp.recipient_hash || '-' || rp.recipient_level
from
recipient_profile rp
inner join recipient_lookup rl on rl.recipient_hash = rp.recipient_hash
where
(
(
{outer_table}.recipient_unique_id is null
and rl.duns is null
and {outer_table}.recipient_name = rl.legal_business_name
) or (
{outer_table}.recipient_unique_id is not null
and rl.duns is not null
and rl.duns = {outer_table}.recipient_unique_id
)
)
and rp.recipient_level = case
when {outer_table}.parent_recipient_unique_id is null then 'R'
else 'C' end
and rp.recipient_name not in {special_cases}
)""",
)
def annotate_prime_award_recipient_id(field_name, queryset):
return _annotate_recipient_id(
field_name,
queryset,
"""(
select
rp.recipient_hash || '-' || rp.recipient_level
from
broker_subaward bs
inner join recipient_lookup rl on rl.duns = bs.awardee_or_recipient_uniqu
inner join recipient_profile rp on rp.recipient_hash = rl.recipient_hash
where
bs.id = {outer_table}.subaward_id and
rp.recipient_level = case
when bs.ultimate_parent_unique_ide is null or bs.ultimate_parent_unique_ide = '' then 'R'
else 'C'
end and
rp.recipient_name not in {special_cases}
)""",
)
|
py | 1a47724112d7cd66bbf02a7fb8c1ced501c0fc20 | #!/usr/bin/env python3
import sys
import asyncio
from electrum_spero.util import json_encode, print_msg, create_and_start_event_loop, log_exceptions
from electrum_spero.simple_config import SimpleConfig
from electrum_spero.network import Network
from electrum_spero.keystore import bip39_to_seed
from electrum_spero.bip32 import BIP32Node
from electrum_spero.bip39_recovery import account_discovery
try:
mnemonic = sys.argv[1]
passphrase = sys.argv[2] if len(sys.argv) > 2 else ""
except Exception:
print("usage: bip39_recovery <mnemonic> [<passphrase>]")
sys.exit(1)
loop, stopping_fut, loop_thread = create_and_start_event_loop()
config = SimpleConfig()
network = Network(config)
network.start()
@log_exceptions
async def f():
try:
def get_account_xpub(account_path):
root_seed = bip39_to_seed(mnemonic, passphrase)
root_node = BIP32Node.from_rootseed(root_seed, xtype="standard")
account_node = root_node.subkey_at_private_derivation(account_path)
account_xpub = account_node.to_xpub()
return account_xpub
active_accounts = await account_discovery(network, get_account_xpub)
print_msg(json_encode(active_accounts))
finally:
stopping_fut.set_result(1)
asyncio.run_coroutine_threadsafe(f(), loop)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.