ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 7dfc0cc505a4d4639138842b1d1a2e6ba2d5d295 | n=int(input(' number : '))
num=1
inc=1
for i in range(n):
print(' '*(n-i-1) + str(num)*inc,end=' ')
num+=2#odd number
inc+=2
print() |
py | 7dfc0d648a8040d463d08391229784abf73cea3d | # -*- coding: utf-8 -*-
"""Top-level package for dicom-wsi."""
__author__ = """Steven N. Hart"""
__email__ = '[email protected]'
__version__ = '1.0.0'
|
py | 7dfc0d67154e95927768036781897aed1ad22d26 | import os
import trio
from kivy.config import Config
Config.set('graphics', 'width', '1600')
Config.set('graphics', 'height', '900')
Config.set('modules', 'touchring', '')
for items in Config.items('input'):
Config.remove_option('input', items[0])
from ceed.main import CeedApp
from kivy.tests.async_common import UnitKivyApp
class CeedTestApp(CeedApp, UnitKivyApp):
def __init__(self, ini_file, **kwargs):
self._ini_config_filename = ini_file
self._data_path = os.path.dirname(ini_file)
super(CeedTestApp, self).__init__(**kwargs)
async def async_sleep(self, dt):
await trio.sleep(dt)
def check_close(self):
super(CeedTestApp, self).check_close()
return True
def handle_exception(self, msg, exc_info=None,
level='error', *largs):
super(CeedApp, self).handle_exception(
msg, exc_info, level, *largs)
if isinstance(exc_info, str):
self.get_logger().error(msg)
self.get_logger().error(exc_info)
elif exc_info is not None:
tp, value, tb = exc_info
try:
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
finally:
value = None
tb = None
elif level in ('error', 'exception'):
raise Exception(msg)
|
py | 7dfc0db12a3be3f1d5e466c2064d37a091c3db49 | # -*- coding: utf-8 -*-
###############################################################################
#
# GetEncodersCount
# Returns the count of users who have shortened a specified Bitly link.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetEncodersCount(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetEncodersCount Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetEncodersCount, self).__init__(temboo_session, '/Library/Bitly/LinkMetrics/GetEncodersCount')
def new_input_set(self):
return GetEncodersCountInputSet()
def _make_result_set(self, result, path):
return GetEncodersCountResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetEncodersCountChoreographyExecution(session, exec_id, path)
class GetEncodersCountInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetEncodersCount
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The OAuth access token provided by Bitly.)
"""
super(GetEncodersCountInputSet, self)._set_input('AccessToken', value)
def set_Link(self, value):
"""
Set the value of the Link input for this Choreo. ((required, string) A Bitly link.)
"""
super(GetEncodersCountInputSet, self)._set_input('Link', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that you want the response to be in. Accepted values are "json" or "xml". Defaults to "json".)
"""
super(GetEncodersCountInputSet, self)._set_input('ResponseFormat', value)
class GetEncodersCountResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetEncodersCount Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Bitly.)
"""
return self._output.get('Response', None)
class GetEncodersCountChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetEncodersCountResultSet(response, path)
|
py | 7dfc0db4ab16f08fd5d287d9605d8644274cac62 | from pulser.devices import Chadoq2
import networkx as nx
from scipy.spatial import distance_matrix
import numpy as np
from numpy.linalg import eigh
import math
def compute_rydberg(G):
pos_list =[]
for n in G.nodes():
pos_list.append(G._node[n]['pos'])
pos=np.array(pos_list)
# find the rydberg blockade radius
dist_matrix = distance_matrix(pos, pos)
A = nx.to_numpy_matrix(G)
blockade_radius = dist_matrix[A==1].max()
rabi_freq = Chadoq2.rabi_from_blockade(blockade_radius)
return rabi_freq, blockade_radius
def _check_maximal(A, x):
not_selected_nodes = np.where(x == 0)[0]
maximal_set = True
for node in not_selected_nodes:
x_copy = x.copy()
x_copy[node]=1
if x_copy.T@A@x_copy==0:
maximal_set = False
break
return maximal_set
def is_MIS(x,G):
A = nx.to_numpy_matrix(G)
num_conflicts = int(x.T@A@x)
maximal_set = _check_maximal(A, x)
is_MIS = (num_conflicts == 0 and maximal_set)
return is_MIS
def compute_subgraph(x, G):
MIS_set = []
node_set = list(G.nodes())
for node in range(len(x)):
if x[node] == 1:
MIS_set.append(node_set[node])
remaining_nodes = set(node_set).difference(set(MIS_set))
H = G.subgraph(remaining_nodes)
return H, MIS_set
def NetworkxGC(G):
strategies = ['largest_first', 'random_sequential', 'smallest_last', 'independent_set', 'connected_sequential_bfs', 'connected_sequential_dfs', 'saturation_largest_first']
for strategy in strategies:
coloring = nx.coloring.greedy_color(G, strategy=strategy)
num_colors=max(coloring.values())+1
print('Networkx solution with {} strategy uses {} colors'.format(strategy, num_colors))
def compute_obj(G, colors_used, previous_coloring):
if len(G.edges())>0:
# coloring starts with index 0
obj = colors_used+len(G.nodes())
# for greedy_color_node in coloring.keys():
greedy_color = colors_used
for remaining_node in G.nodes():
greedy_color += 1
previous_coloring[remaining_node]=greedy_color
else:
for node in G.nodes():
previous_coloring[node]=colors_used+1
obj = colors_used+1
return obj, previous_coloring
def compute_LB(x, G):
H, MIS_set = compute_subgraph(x, G)
num_edges = len(H.edges())
A = nx.to_numpy_matrix(H)
if num_edges>0:
remaining_nodes = len(H.nodes())
# this method is aware that matrix A is symmetric
eigs, _ = eigh(A)
eigs=np.sort(eigs)
lambda_1=eigs[-1]
lambda_n=eigs[0]
n_plus=sum(eigs>0)
n_minus=sum(eigs<0)
# compute lower bound
HoffmanLB = math.floor(1-lambda_1/lambda_n)
ElphicLB = math.floor(1+max(n_minus/n_plus, n_plus/n_minus))
EdwardsLB = math.floor(remaining_nodes/(remaining_nodes-lambda_1))
LB=max([HoffmanLB, EdwardsLB, ElphicLB])
else:
LB=1
return H, LB, MIS_set
def compute_UB(H):
A = nx.to_numpy_matrix(H)
degrees = sum(A)
degrees=-np.sort(-degrees)
max_degree = degrees[0,0]
UB_array = np.zeros((degrees.shape[1]))
for i in range(degrees.shape[1]):
UB_array[i] = min(degrees[0,i]+1,i)
UB_chr_number = np.max(UB_array)
UB = int(min(max_degree+1, UB_chr_number))
return UB
def fingerprint(vertexes):
fp = -1
for i in vertexes:
fp+=2**i
return fp
def print_BB_history(story_dict):
hist_str='Best solution history:\n'
for step, idx in story_dict.items():
if step !=0:
hist_str+='\t Step {} -> MIS solution at position {}\n'.format(step, idx)
print(hist_str)
def print_itMIS(coloring_dict):
MIS_dict={}
#initializing an empty dictionary for inverting
for node, color in coloring_dict.items():
#iterative step for each key value pair in books_copies dictionary
if color in MIS_dict:
MIS_dict[color].append(node)
else:
MIS_dict[color] = [node]
for color, node_set in MIS_dict.items():
print('Step {} -> nodes in MIS solution {}'.format(color+1, node_set))
|
py | 7dfc0df9cc3900864184b79a8a5bc9af48082bca | # -*- coding: utf-8 -*-
from django.conf import settings
from django.db.models.manager import EmptyManager
from google.appengine.ext import db
from django.contrib.auth.models import *
AUTH_USER_MODULE = getattr(settings, 'AUTH_USER_MODULE', None)
if AUTH_USER_MODULE:
User = __import__(AUTH_USER_MODULE, {}, {}, ['']).User
if User._meta.app_label != 'auth':
# Remove ragendja's auth.user model registration from Django
from django.db.models.loading import cache
del cache.app_models['auth']['user']
else:
from ragendja.auth.models import User
class Message(db.Model):
"""User message model"""
user = db.ReferenceProperty(User)
message = db.TextProperty()
class Group(db.Model):
"""Group model not fully implemented yet."""
# TODO: Implement this model, requires contenttypes
name = db.StringProperty()
permissions = EmptyManager()
class Permission(db.Model):
"""Permission model not fully implemented yet."""
# TODO: Implement this model, requires contenttypes
name = db.StringProperty()
|
py | 7dfc0e52a30af0818e95401d5fbb8c3584752a3f | """Purpose: generate profiles for obs and models.
Author: Michel Zeller
Date: 09/03/2022.
"""
# Standard library
from pprint import pprint
# Third-party
import matplotlib.pyplot as plt
# First-party
from plot_profile.utils.stations import sdf
from plot_profile.utils.utils import colour_dict
from plot_profile.utils.utils import save_fig
from plot_profile.utils.variables import vdf
def create_plot(
data_dict,
lt_dict,
multi_axes,
location,
date,
xlims,
ylims,
colours,
grid,
show_marker,
datatypes,
outpath,
appendix,
verbose=True,
):
# get location dataframe
loc = sdf[location]
devices = data_dict.keys()
# get ymin, ymax
ymin = ylims[0]
ymax = ylims[1]
# determine ymin_dynamic from data & apply if ymin=None
ymin_dynamic = None
# overwrite colour-dict with user-specified colours
if isinstance(colours, tuple):
for i, colour in enumerate(colours):
colour_dict[i] = colour
# prepare figure
fig, ax = plt.subplots(1, 1, figsize=(5, 8), tight_layout=True)
if multi_axes:
top_ax = ax.twiny()
if grid:
ax.grid(which="major", color="#DDDDDD", linewidth=0.8)
ax.grid(which="minor", color="#EEEEEE", linestyle=":", linewidth=0.5)
ax.minorticks_on()
# xlims = (xmin, xmax) & xmin = (xmin1, xmin2) & xmax = (xmax1, xmax2)
if xlims:
xmins = xlims[0]
xmaxs = xlims[1]
if len(xmins) == len(xmaxs):
if len(xmins) == 1:
ax.set_xlim(xmins[0], xmaxs[0])
if len(xmins) == 2: # have xmins for two x-axes
ax.set_xlim(xmins[0], xmaxs[0])
top_ax.set_xlim(xmins[1], xmaxs[1])
else:
print(
f"Check xmin/xmax values again. Got {len(xmins)} x-min values and {len(xmaxs)} x-max values."
)
print(f"Warning: No x-limits have been applied.")
title = f"{loc.long_name}, {date.strftime('%d. %b, %Y, %H:%M')} UTC"
if multi_axes:
ax.set_title(label=title, bbox=dict(facecolor="none"), x=0.5, y=1.07)
else:
ax.set_title(label=title, bbox=dict(facecolor="none"), x=0.5, y=1.02)
ax.set_ylabel(f"Altitude [m asl]")
first_unit, second_unit = None, None
# plotting
colour_index = 0
for i, device in enumerate(devices):
model = False
# 1) retrieve df
df = data_dict[device]
if verbose:
print(i, device)
pprint(df)
# y-axis information: altitude
altitude = df["height"]
altitude_min = altitude.min()
if ymin_dynamic == None:
ymin_dynamic = altitude_min
elif (ymin_dynamic is not None) and (altitude_min < ymin_dynamic):
ymin_dynamic = altitude_min
# check if there are more than one variable in this dataframe
if verbose:
if len(df.columns) > 2:
print(f"More than one variable in the df for {device}")
else:
print(f"Only one variable in the df for {device}")
# iterate over the columns
for (variable, columnData) in df.iteritems():
if variable == "height":
continue
if verbose:
print(f" Variable: {variable}")
# extract current variable
variable = vdf[variable]
unit = variable.unit
var_long = variable.long_name
x = columnData.values
if device in lt_dict:
lt = lt_dict[device]
if "~" in device: # this means it must be a model (model~model_id)
if device.split("~")[1] != "0":
label = f"{var_long}: {device.split('~')[0].upper()} {device.split('~')[1].upper()} ({lt}h)"
else:
label = f"{var_long}: {device.split('~')[0].upper()} ({lt}h)"
else: # else it is a device
label = f"{var_long}: {device.upper()}"
# define unit for the bottom axis
if not first_unit:
first_unit = unit
ax.set_xlabel(f"{first_unit}")
# define unit for the right axes
if (not second_unit) and (unit is not first_unit):
second_unit = unit
top_ax.set_xlabel(f"{second_unit}")
# specify marker
if ("icon" in device) and show_marker:
marker = "d"
else:
marker = None
# choose correct axes for the current variable and plot data
if unit == first_unit:
ax.plot(
x,
altitude,
color=colour_dict[colour_index],
linestyle="-",
marker=marker,
label=label,
)
if unit == second_unit:
top_ax.plot(
x,
altitude,
color=colour_dict[colour_index],
linestyle="-",
marker=marker,
label=label,
)
colour_index += 1
# add ylim
if ymin == None:
ax.set_ylim(ymin_dynamic, ymax)
else:
ax.set_ylim(ymin, ymax)
# add legends
if multi_axes:
h1, l1 = ax.get_legend_handles_labels()
h2, l2 = top_ax.get_legend_handles_labels()
ax.legend(h1 + h2, l1 + l2, fontsize="small")
else:
ax.legend(fontsize="small")
# filename
start_str = date.strftime("%y%m%d_%H")
var_dev = ""
for key, df in data_dict.items():
# a) keys: "icon~0", "icon~1", "2m", "2m_tower"
# remove "0" for model-levels
if "~0" in key:
key = key.split(sep="~")[0]
var_dev += f"_{key}"
# b) columns: "clct", "sw_up", "temp"
columns = df.columns
for column in columns:
if column != "height":
var_dev += f"_{column}"
filename = f"profiles_{start_str}_{loc.short_name}{var_dev}"
save_fig(filename, datatypes, outpath, fig=fig)
plt.clf()
first_unit, second_unit = None, None
return
|
py | 7dfc1081830c060b6bf7fc55511683985f358f4a | """Debug menu.
The debug menu is for developer-focused functionality that we want to be
easy-to-use and discoverable, but which is not for the average user.
Current Items
-------------
Trace File -> Start Tracing...
Trace File -> Stop Tracking
"""
from qtpy.QtCore import QTimer
from qtpy.QtWidgets import QAction, QFileDialog
from ..utils import perf
def _ensure_extension(filename: str, extension: str):
"""Add the extension if needed."""
if filename.endswith(extension):
return filename
return filename + extension
class DebugMenu:
def __init__(self, main_window):
"""Create the debug menu.
Parameters
----------
main_menu : qtpy.QtWidgets.QMainWindow.menuBar
We add ourselves to this menu.
"""
self.debug_menu = main_window.main_menu.addMenu('&Debug')
self.perf = PerformanceSubMenu(
main_window, self.debug_menu.addMenu("Performance Trace")
)
class PerformanceSubMenu:
"""The flyout menu to start/stop recording a trace file.
"""
def __init__(self, main_window, sub_menu):
self.main_window = main_window
self.sub_menu = sub_menu
self.start = self._add_start()
self.stop = self._add_stop()
self._set_recording(False)
def _set_recording(self, recording: bool):
"""Toggle which are enabled/disabled.
Parameters
----------
record : bool
Are we currently recording a trace file.
"""
self.start.setEnabled(not recording)
self.stop.setEnabled(recording)
def _add_start(self):
"""Add Start Recording action.
"""
start = QAction('Start Recording...', self.main_window._qt_window)
start.setShortcut('Alt+T')
start.setStatusTip('Start recording a trace file')
start.triggered.connect(self._start_trace)
self.sub_menu.addAction(start)
return start
def _add_stop(self):
"""Add Stop Recording action.
"""
stop = QAction('Stop Recording', self.main_window._qt_window)
stop.setShortcut('Shift+Alt+T')
stop.setStatusTip('Stop recording a trace file')
stop.triggered.connect(self._stop_trace)
self.sub_menu.addAction(stop)
return stop
def _start_trace(self):
"""Start recording a trace file."""
viewer = self.main_window.qt_viewer
filename, _ = QFileDialog.getSaveFileName(
parent=viewer,
caption='Record performance trace file',
directory=viewer._last_visited_dir,
filter="Trace Files (*.json)",
)
if filename:
filename = _ensure_extension(filename, '.json')
def start_trace():
perf.timers.start_trace_file(filename)
self._set_recording(True)
# Schedule this to avoid bogus "MetaCall" event for the entire
# time the file dialog was up.
QTimer.singleShot(0, start_trace)
def _stop_trace(self):
"""Stop recording a trace file.
"""
perf.timers.stop_trace_file()
self._set_recording(False)
|
py | 7dfc10ecb579b7a5165e890354e8a8335da30ba9 | a = list(map(int,input().split()))
def heapsort(arr,e):
i=1
while i<e:
upheap(arr,i)
i+=1
i-=1
while i>0:
tmp=arr[0]
arr[0]=arr[i]
arr[i]=tmp
i-=1
downheap(arr,i)
def leftC(i):
return int(((i) + 1) * 2 - 1)
def rightC(i):
return int(((i) + 1) * 2)
def parent(i):
return int(((i) + 1) / 2 - 1)
def upheap(arr,n):
while n>0:
m = parent(n)
if arr[m]<arr[n]:
tmp=arr[m]
arr[m]=arr[n]
arr[n]=tmp
else:
break
n=m
def downheap(arr,n):
m=0
tmp=0
while True:
lc=leftC(m)
rc=rightC(m)
if lc>=n:
break
if arr[lc]>arr[tmp]:
tmp=lc
if rc<n and arr[rc]>arr[tmp]:
tmp=rc
if tmp==m:
break
swp=arr[tmp]
arr[tmp]=arr[m]
arr[m]=swp
m=tmp
heapsort(a,len(a))
#print(a)
|
py | 7dfc116d315aeb167a7417a34cb3356c037b72b1 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ConnectedClusterIdentity(Model):
"""Identity for the connected cluster.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar principal_id: The principal id of connected cluster identity. This
property will only be provided for a system assigned identity.
:vartype principal_id: str
:ivar tenant_id: The tenant id associated with the connected cluster. This
property will only be provided for a system assigned identity.
:vartype tenant_id: str
:param type: Required. The type of identity used for the connected
cluster. The type 'SystemAssigned, includes a system created identity. The
type 'None' means no identity is assigned to the connected cluster.
Possible values include: 'None', 'SystemAssigned'
:type type: str or
~azure.mgmt.hybridkubernetes.v2020_01_01_preview.models.ResourceIdentityType
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
'type': {'required': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'ResourceIdentityType'},
}
def __init__(self, **kwargs):
super(ConnectedClusterIdentity, self).__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
self.type = kwargs.get('type', None)
|
py | 7dfc145774e67ec750ebf37f98c3545b49600dbe | """
Discussion:
(1) Does the general shape of the inter-switch interval distribution
change or does it stay relatively the same?
(2) How does the bar graph of system states change based on these values?
Answers:
(1) The shape of the distribution remains the same, but larger values of either
c2o or o2c shifts the distribution towards shorter intervals.
(2) If c2o is larger than o2c, then the channel tends to be open a larger
fraction of the time.
"""; |
py | 7dfc14f3aed08c9829587581d50933322fb5d14f | """The most commonly used constructors are available from this toplevel module.
The rest are in the subpackages: core, draw, evolve, format, maths, parse
and phylo.
"""
import os
import pickle
import re
import sys
import warnings
import numpy
from cogent3.app import available_apps
from cogent3.core.alignment import (
Alignment,
ArrayAlignment,
SequenceCollection,
)
from cogent3.core.genetic_code import available_codes, get_code
# note that moltype has to be imported last, because it sets the moltype in
# the objects created by the other modules.
from cogent3.core.moltype import (
ASCII,
DNA,
PROTEIN,
RNA,
STANDARD_CODON,
CodonAlphabet,
available_moltypes,
get_moltype,
)
from cogent3.core.tree import PhyloNode, TreeBuilder, TreeError, TreeNode
from cogent3.evolve.fast_distance import (
available_distances,
get_distance_calculator,
)
from cogent3.evolve.models import available_models, get_model
from cogent3.parse.cogent3_json import load_from_json
from cogent3.parse.newick import parse_string as newick_parse_string
from cogent3.parse.sequence import FromFilenameParser
from cogent3.parse.table import load_delimited
from cogent3.parse.tree_xml import parse_string as tree_xml_parse_string
from cogent3.util.io import get_format_suffixes, open_
from cogent3.util.table import Table as _Table
from cogent3.util.table import cast_str_to_array
__author__ = ""
__copyright__ = "Copyright 2007-2021, The Cogent Project"
__credits__ = [
"Gavin Huttley",
"Rob Knight",
"Peter Maxwell",
"Jeremy Widmann",
"Catherine Lozupone",
"Matthew Wakefield",
"Edward Lang",
"Greg Caporaso",
"Mike Robeson",
"Micah Hamady",
"Sandra Smit",
"Zongzhi Liu",
"Andrew Butterfield",
"Amanda Birmingham",
"Brett Easton",
"Hua Ying",
"Jason Carnes",
"Raymond Sammut",
"Helen Lindsay",
"Daniel McDonald",
]
__license__ = "BSD-3"
__version__ = "2021.10.12a1"
__maintainer__ = "Gavin Huttley"
__email__ = "[email protected]"
__status__ = "Production"
_min_version = (3, 7)
if sys.version_info < _min_version:
PY_VERSION = ".".join([str(n) for n in sys.version_info])
_min_version = ".".join(_min_version)
raise RuntimeError(
f"Python-{_min_version} or greater is required, Python-{PY_VERSION} used."
)
version = __version__
version_info = tuple([int(v) for v in version.split(".") if v.isdigit()])
warn_env = "COGENT3_WARNINGS"
if warn_env in os.environ:
warnings.simplefilter(os.environ[warn_env])
def make_seq(seq, name=None, moltype=None):
"""
Parameters
----------
seq : str
raw string to be converted to sequence object
name : str
sequence name
moltype
name of a moltype or moltype instance
Returns
-------
returns a sequence object
"""
moltype = moltype or "text"
moltype = get_moltype(moltype)
seq = moltype.make_seq(seq)
if name is not None:
seq.name = name
return seq
def make_unaligned_seqs(
data, moltype=None, label_to_name=None, info=None, source=None, **kw
):
"""Initialize an unaligned collection of sequences.
Parameters
----------
data
sequences
moltype
the moltype, eg DNA, PROTEIN, 'dna', 'protein'
label_to_name
function for converting original name into another name.
info
a dict from which to make an info object
source
origins of this data, defaults to 'unknown'
**kw
other keyword arguments passed to SequenceCollection
"""
if moltype is not None:
moltype = get_moltype(moltype)
info = info or {}
for other_kw in ("constructor_kw", "kw"):
other_kw = kw.pop(other_kw, None) or {}
kw.update(other_kw)
assert isinstance(info, dict), "info must be a dict"
info["source"] = source or info.get("source", "unknown")
return SequenceCollection(
data=data, moltype=moltype, label_to_name=label_to_name, info=info, **kw
)
def make_aligned_seqs(
data,
moltype=None,
array_align=True,
label_to_name=None,
info=None,
source=None,
**kw,
):
"""Initialize an aligned collection of sequences.
Parameters
----------
data
sequences
moltype
the moltype, eg DNA, PROTEIN, 'dna', 'protein'
array_align : bool
if True, returns ArrayAlignment, otherwise an annotatable Alignment
label_to_name
function for converting original name into another name.
info
a dict from which to make an info object
source
origins of this data, defaults to 'unknown'
**kw
other keyword arguments passed to SequenceCollection
"""
if moltype is not None:
moltype = get_moltype(moltype)
info = info or {}
for other_kw in ("constructor_kw", "kw"):
other_kw = kw.pop(other_kw, None) or {}
kw.update(other_kw)
assert isinstance(info, dict), "info must be a dict"
info["source"] = source or info.get("source", "unknown")
klass = ArrayAlignment if array_align else Alignment
return klass(
data=data, moltype=moltype, label_to_name=label_to_name, info=info, **kw
)
def load_unaligned_seqs(
filename,
format=None,
moltype=None,
label_to_name=None,
parser_kw=None,
info=None,
**kw,
):
"""
loads unaligned sequences from file
Parameters
----------
filename : str
path to sequence file
format : str
sequence file format, if not specified tries to guess from the path suffix
moltype
the moltype, eg DNA, PROTEIN, 'dna', 'protein'
label_to_name
function for converting original name into another name.
parser_kw : dict
optional arguments for the parser
Returns
-------
``SequenceCollection``
"""
file_format, _ = get_format_suffixes(filename)
if file_format == "json":
return load_from_json(filename, (SequenceCollection,))
format = format or file_format
if not format:
msg = "could not determined file format, set using the format argument"
raise ValueError(msg)
parser_kw = parser_kw or {}
for other_kw in ("constructor_kw", "kw"):
other_kw = kw.pop(other_kw, None) or {}
kw.update(other_kw)
data = list(FromFilenameParser(filename, format, **parser_kw))
return make_unaligned_seqs(
data,
label_to_name=label_to_name,
moltype=moltype,
source=filename,
info=info,
**kw,
)
def load_aligned_seqs(
filename,
format=None,
array_align=True,
moltype=None,
label_to_name=None,
parser_kw=None,
info=None,
**kw,
):
"""
loads aligned sequences from file
Parameters
----------
filename : str
path to sequence file
format : str
sequence file format, if not specified tries to guess from the path suffix
moltype
the moltype, eg DNA, PROTEIN, 'dna', 'protein'
array_align : bool
if True, returns ArrayAlignment, otherwise an annotatable Alignment
label_to_name
function for converting original name into another name.
parser_kw : dict
optional arguments for the parser
Returns
-------
``ArrayAlignment`` or ``Alignment`` instance
"""
file_format, _ = get_format_suffixes(filename)
if file_format == "json":
return load_from_json(filename, (Alignment, ArrayAlignment))
format = format or file_format
if not format:
msg = "could not determined file format, set using the format argument"
raise ValueError(msg)
parser_kw = parser_kw or {}
for other_kw in ("constructor_kw", "kw"):
other_kw = kw.pop(other_kw, None) or {}
kw.update(other_kw)
data = list(FromFilenameParser(filename, format, **parser_kw))
return make_aligned_seqs(
data,
array_align=array_align,
label_to_name=label_to_name,
moltype=moltype,
source=filename,
info=info,
**kw,
)
def make_table(
header=None,
data=None,
row_order=None,
digits=4,
space=4,
title="",
max_width=1e100,
index_name=None,
legend="",
missing_data="",
column_templates=None,
data_frame=None,
format="simple",
**kwargs,
):
"""
Parameters
----------
header
column headings
data
a 2D dict, list or tuple. If a dict, it must have column
headings as top level keys, and common row labels as keys in each
column.
row_order
the order in which rows will be pulled from the twoDdict
digits
floating point resolution
space
number of spaces between columns or a string
title
as implied
max_width
maximum column width for printing
index_name
column name with values to be used as row identifiers and keys
for slicing. All column values must be unique.
legend
table legend
missing_data
replace missing data with this
column_templates
dict of column headings
or a function that will handle the formatting.
limit
exits after this many lines. Only applied for non pickled data
file types.
data_frame
a pandas DataFrame, supersedes header/rows
format
output format when using str(Table)
"""
if any(isinstance(a, str) for a in (header, data)):
raise TypeError("str type invalid, if it's a path use load_table()")
data = kwargs.get("rows", data)
if data_frame is not None:
from pandas import DataFrame
if not isinstance(data_frame, DataFrame):
raise TypeError(f"expecting a DataFrame, got{type(data_frame)}")
data = {c: data_frame[c].to_numpy() for c in data_frame}
return _Table(
header=header,
data=data,
digits=digits,
row_order=row_order,
title=title,
column_templates=column_templates,
space=space,
missing_data=missing_data,
max_width=max_width,
index_name=index_name,
legend=legend,
data_frame=data_frame,
format=format,
)
def load_table(
filename,
sep=None,
reader=None,
digits=4,
space=4,
title="",
missing_data="",
max_width=1e100,
index_name=None,
legend="",
column_templates=None,
static_column_types=False,
limit=None,
format="simple",
skip_inconsistent=False,
**kwargs,
):
"""
Parameters
----------
filename
path to file containing a tabular data
sep
the delimiting character between columns
reader
a parser for reading filename. This approach assumes the first
row returned by the reader will be the header row.
static_column_types
if True, and reader is None, identifies columns
with a numeric/bool data types from the first non-header row.
This assumes all subsequent entries in that column are of the same type.
Default is False.
digits
floating point resolution
space
number of spaces between columns or a string
title
as implied
missing_data
character assigned if a row has no entry for a column
max_width
maximum column width for printing
index_name
column name with values to be used as row identifiers and keys
for slicing. All column values must be unique.
legend
table legend
column_templates
dict of column headings
or a function that will handle the formatting.
limit
exits after this many lines. Only applied for non pickled data
file types.
format
output format when using str(Table)
skip_inconsistent
skips rows that have different length to header row
"""
import pathlib
if not any(isinstance(filename, t) for t in (str, pathlib.PurePath)):
raise TypeError(
"filename must be string or Path, perhaps you want make_table()"
)
sep = sep or kwargs.pop("delimiter", None)
file_format, compress_format = get_format_suffixes(filename)
if file_format == "json":
return load_from_json(filename, (_Table,))
elif file_format in ("pickle", "pkl"):
with open_(filename, mode="rb") as f:
loaded_table = pickle.load(f)
r = _Table()
r.__setstate__(loaded_table)
return r
if reader:
with open_(filename, newline=None) as f:
data = [row for row in reader(f)]
header = data[0]
data = {column[0]: column[1:] for column in zip(*data)}
else:
if file_format == "csv":
sep = sep or ","
elif file_format == "tsv":
sep = sep or "\t"
header, rows, loaded_title, legend = load_delimited(
filename, sep=sep, limit=limit, **kwargs
)
if skip_inconsistent:
num_fields = len(header)
rows = [r for r in rows if len(r) == num_fields]
else:
lengths = set(map(len, [header] + rows))
if len(lengths) != 1:
msg = f"inconsistent number of fields {lengths}"
raise ValueError(msg)
title = title or loaded_title
data = {column[0]: column[1:] for column in zip(header, *rows)}
for key, value in data.items():
data[key] = cast_str_to_array(value, static_type=static_column_types)
return make_table(
header=header,
data=data,
digits=digits,
title=title,
column_templates=column_templates,
space=space,
missing_data=missing_data,
max_width=max_width,
index_name=index_name,
legend=legend,
format=format,
)
def make_tree(treestring=None, tip_names=None, format=None, underscore_unmunge=False):
"""Initialises a tree.
Parameters
----------
treestring
a newick or xml formatted tree string
tip_names
a list of tip names, returns a "star" topology tree
format : str
indicates treestring is either newick or xml formatted, default
is newick
underscore_unmunge : bool
replace underscores with spaces in all names read, i.e. "sp_name"
becomes "sp name"
Notes
-----
Underscore unmunging is turned off by default, although it is part
of the Newick format.
Returns
-------
PhyloNode
"""
assert treestring or tip_names, "must provide either treestring or tip_names"
if tip_names:
tree_builder = TreeBuilder().create_edge
tips = [tree_builder([], tip_name, {}) for tip_name in tip_names]
tree = tree_builder(tips, "root", {})
return tree
if format is None and treestring.startswith("<"):
format = "xml"
parser = tree_xml_parse_string if format == "xml" else newick_parse_string
tree_builder = TreeBuilder().create_edge
# FIXME: More general strategy for underscore_unmunge
if parser is newick_parse_string:
tree = parser(treestring, tree_builder, underscore_unmunge=underscore_unmunge)
else:
tree = parser(treestring, tree_builder)
if not tree.name_loaded:
tree.name = "root"
return tree
def load_tree(filename, format=None, underscore_unmunge=False):
"""Constructor for tree.
Parameters
----------
filename : str
a file path containing a newick or xml formatted tree.
format : str
either xml or json, all other values default to newick. Overrides
file name suffix.
underscore_unmunge : bool
replace underscores with spaces in all names read, i.e. "sp_name"
becomes "sp name".
Notes
-----
Underscore unmunging is turned off by default, although it is part
of the Newick format. Only the cogent3 json and xml tree formats are
supported.
Returns
-------
PhyloNode
"""
file_format, _ = get_format_suffixes(filename)
format = format or file_format
if format == "json":
return load_from_json(filename, (TreeNode, PhyloNode))
with open_(filename) as tfile:
treestring = tfile.read()
return make_tree(treestring, format=format, underscore_unmunge=underscore_unmunge)
|
py | 7dfc1591a9d612ae51eeb8153ddeb8ee3fbec3d2 | import asyncio
import unittest
from typing import Awaitable
from unittest.mock import patch, MagicMock
from hummingbot.client.config.global_config_map import global_config_map
from hummingbot.client.hummingbot_application import HummingbotApplication
from test.mock.mock_cli import CLIMockingAssistant
class BalanceCommandTest(unittest.TestCase):
@patch("hummingbot.core.utils.trading_pair_fetcher.TradingPairFetcher")
def setUp(self, _: MagicMock) -> None:
super().setUp()
self.ev_loop = asyncio.get_event_loop()
self.app = HummingbotApplication()
self.cli_mock_assistant = CLIMockingAssistant()
self.cli_mock_assistant.start()
def tearDown(self) -> None:
self.cli_mock_assistant.stop()
super().tearDown()
@staticmethod
def get_async_sleep_fn(delay: float):
async def async_sleep(*_, **__):
await asyncio.sleep(delay)
return async_sleep
def async_run_with_timeout(self, coroutine: Awaitable, timeout: float = 1):
ret = self.ev_loop.run_until_complete(asyncio.wait_for(coroutine, timeout))
return ret
def async_run_with_timeout_coroutine_must_raise_timeout(self, coroutine: Awaitable, timeout: float = 1):
class DesiredError(Exception):
pass
async def run_coro_that_raises(coro: Awaitable):
try:
await coro
except asyncio.TimeoutError:
raise DesiredError
try:
self.async_run_with_timeout(run_coro_that_raises(coroutine), timeout)
except DesiredError: # the coroutine raised an asyncio.TimeoutError as expected
raise asyncio.TimeoutError
except asyncio.TimeoutError: # the coroutine did not finish on time
raise RuntimeError
@patch("hummingbot.user.user_balances.UserBalances.all_balances_all_exchanges")
def test_show_balances_handles_network_timeouts(
self, all_balances_all_exchanges_mock
):
all_balances_all_exchanges_mock.side_effect = self.get_async_sleep_fn(delay=0.02)
global_config_map["other_commands_timeout"].value = 0.01
with self.assertRaises(asyncio.TimeoutError):
self.async_run_with_timeout_coroutine_must_raise_timeout(self.app.show_balances())
self.assertTrue(
self.cli_mock_assistant.check_log_called_with(
msg="\nA network error prevented the balances to update. See logs for more details."
)
)
|
py | 7dfc15d576ad15bcdbdbad6fbce866390f8e2e94 | #!/usr/bin/python
#
# Copyright 2018-2022 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from polyaxon.k8s import k8s_schemas
from polyaxon.polypod.common.accelerators import (
has_tpu_annotation,
requests_gpu,
requests_tpu,
)
from polyaxon.utils.test_utils import BaseTestCase
@pytest.mark.polypod_mark
class TestTPUs(BaseTestCase):
def test_has_tpu_annotation(self):
assert has_tpu_annotation(None) is False
assert has_tpu_annotation({}) is False
assert has_tpu_annotation({"foo": "bar"}) is False
assert has_tpu_annotation({"tf-version.cloud-tpus.google.com": "1.13"}) is True
def test_requests_tpu(self):
assert (
requests_tpu(k8s_schemas.V1ResourceRequirements(limits={"cpu": 1})) is False
)
assert (
requests_tpu(
k8s_schemas.V1ResourceRequirements(
limits={"cloud-tpus.google.com/v2": 1}
)
)
is True
)
assert (
requests_tpu(
k8s_schemas.V1ResourceRequirements(
requests={"cloud-tpus.google.com/v2:": 32}
)
)
is True
)
def test_requests_gpu(self):
assert (
requests_gpu(k8s_schemas.V1ResourceRequirements(limits={"cpu": 1})) is False
)
assert (
requests_gpu(k8s_schemas.V1ResourceRequirements(limits={"amd.com/gpu": 1}))
is True
)
assert (
requests_gpu(
k8s_schemas.V1ResourceRequirements(requests={"nvidia.com/gpu": 1})
)
is True
)
|
py | 7dfc17651b6f2828b6e6d2c9e6530ddda063b1b2 | from datetime import datetime, timezone
import disnake
from disnake.ext import commands
from cogs.mixins import AceMixin
from utils.time import pretty_timedelta, pretty_datetime
from utils.string import po
class WhoIs(AceMixin, commands.Cog):
'''View info about a member.'''
@commands.command()
@commands.bot_has_permissions(embed_links=True)
async def info(self, ctx, *, member: disnake.Member = None):
'''Display information about user or self.'''
member = member or ctx.author
e = disnake.Embed(description='')
if member.bot:
e.description = 'This account is a bot.\n\n'
e.description += member.mention
e.add_field(name='Status', value=member.status)
if member.activity:
e.add_field(name='Activity', value=member.activity.name)
e.set_author(name=str(member), icon_url=member.display_avatar.url)
now = datetime.now(timezone.utc)
created = member.created_at
joined = member.joined_at
e.add_field(
name='Account age',
value='{0} • Created <t:{1}:F>'.format(pretty_timedelta(now - created), round(created.timestamp())),
inline=False
)
e.add_field(
name='Member for',
value='{0} • Joined <t:{1}:F>'.format(pretty_timedelta(now - joined), round(joined.timestamp()))
)
if len(member.roles) > 1:
e.add_field(name='Roles', value=' '.join(role.mention for role in reversed(member.roles[1:])), inline=False)
e.set_footer(text='ID: ' + str(member.id))
await ctx.send(embed=e)
@commands.command(aliases=['newmembers'])
@commands.bot_has_permissions(embed_links=True)
async def newusers(self, ctx, *, count=5):
'''List newly joined members.'''
count = min(max(count, 5), 25)
now = datetime.now(timezone.utc)
e = disnake.Embed()
for idx, member in enumerate(sorted(ctx.guild.members, key=lambda m: m.joined_at, reverse=True)):
if idx >= count:
break
value = 'Joined {0} ago\nCreated {1} ago'.format(pretty_timedelta(now - member.joined_at), pretty_timedelta(now - member.created_at))
e.add_field(name=po(member), value=value, inline=False)
await ctx.send(embed=e)
@commands.command()
async def avatar(self, ctx, *, member: disnake.Member):
'''Show an enlarged version of a members avatar.'''
await ctx.send(member.display_avatar.url)
def setup(bot):
bot.add_cog(WhoIs(bot))
|
py | 7dfc17f9aaad38e6990c6cbc2a2d2c2cc21577f4 | """website URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/4.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('charts.urls')),
]
|
py | 7dfc18abadd2795d4ebc02b9fb0dd94fbd5a6915 | # -*- coding: utf-8 -*-
"""
Defines the function :func:`repr2`, which allows for a bit more customization than
:func:`repr` or :func:`pprint`. See the docstring for more details.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import six
import collections
def repr2(data, **kwargs):
"""
Makes a pretty and easy-to-doctest string representation!
This is an alternative to repr, and :func:`pprint.pformat` that attempts to be
both more configurable and generate output that is consistent between
python versions.
Notes:
This function has many keyword arguments that can be used to customize
the final representation. For convinience some of the more frequently
used kwargs have short aliases. See "Args" for more details.
Args:
data (object): an arbitrary python object
**kwargs: see "the Kwargs" section
Kwargs:
si, stritems, (bool):
dict/list items use str instead of repr
strkeys, sk (bool):
dict keys use str instead of repr
strvals, sv (bool):
dict values use str instead of repr
nl, newlines (int | bool):
number of top level nestings to place a newline after. If true all
items are followed by newlines regardless of nesting level.
Defaults to 1 for lists and True for dicts.
nobr, nobraces (bool, default=False):
if True, text will not contain outer braces for containers
cbr, compact_brace (bool, default=False):
if True, braces are compactified (i.e. they will not have newlines
placed directly after them, think java / K&R / 1TBS)
trailsep, trailing_sep (bool):
if True, a separator is placed after the last item in a sequence.
By default this is True if there are any ``nl > 0``.
explicit (bool, default=False):
changes dict representation from ``{k1: v1, ...}`` to
``dict(k1=v1, ...)``.
precision (int, default=None):
if specified floats are formatted with this precision
kvsep (str, default=': '):
separator between keys and values
itemsep (str, default=' '):
separator between items
sort (bool | callable, default=None):
if None, then sort unordered collections, but keep the ordering of
ordered collections. This option attempts to be determenistic in
most cases.
New in 0.8.0: if ``sort`` is callable, it will be used as a
key-function to sort all collections.
if False, then nothing will be sorted, and the representation of
unordered collections will be arbitrary and possibly
non-determenistic.
if True, attempts to sort all collections in the returned text.
Currently if True this WILL sort lists.
Currently if True this WILL NOT sort OrderedDicts.
NOTE:
The previous behavior may not be intuitive, as such the
behavior of this arg is subject to change.
suppress_small (bool):
passed to :func:`numpy.array2string` for ndarrays
max_line_width (int):
passed to :func:`numpy.array2string` for ndarrays
with_dtype (bool):
only relevant to ndarrays. if True includes the dtype.
extensions (FormatterExtensions):
a custom :class:`FormatterExtensions` instance that can overwrite or
define how different types of objects are formatted.
Returns:
str: outstr - output string
Notes:
There are also internal kwargs, which should not be used:
_return_info (bool): return information about child context
_root_info (depth): information about parent context
CommandLine:
python -m ubelt.util_format repr2:0
python -m ubelt.util_format repr2:1
Example:
>>> from ubelt.util_format import *
>>> import ubelt as ub
>>> dict_ = {
... 'custom_types': [slice(0, 1, None), 1/3],
... 'nest_dict': {'k1': [1, 2, {3: {4, 5}}],
... 'key2': [1, 2, {3: {4, 5}}],
... 'key3': [1, 2, {3: {4, 5}}],
... },
... 'nest_dict2': {'k': [1, 2, {3: {4, 5}}]},
... 'nested_tuples': [tuple([1]), tuple([2, 3]), frozenset([4, 5, 6])],
... 'one_tup': tuple([1]),
... 'simple_dict': {'spam': 'eggs', 'ham': 'jam'},
... 'simple_list': [1, 2, 'red', 'blue'],
... 'odict': ub.odict([(1, '1'), (2, '2')]),
... }
>>> # In the interest of saving space we are only going to show the
>>> # output for the first example.
>>> result = repr2(dict_, nl=1, precision=2)
>>> print(result)
{
'custom_types': [slice(0, 1, None), 0.33],
'nest_dict': {'k1': [1, 2, {3: {4, 5}}], 'key2': [1, 2, {3: {4, 5}}], 'key3': [1, 2, {3: {4, 5}}]},
'nest_dict2': {'k': [1, 2, {3: {4, 5}}]},
'nested_tuples': [(1,), (2, 3), {4, 5, 6}],
'odict': {1: '1', 2: '2'},
'one_tup': (1,),
'simple_dict': {'ham': 'jam', 'spam': 'eggs'},
'simple_list': [1, 2, 'red', 'blue'],
}
>>> # You can try the rest yourself.
>>> result = repr2(dict_, nl=3, precision=2); print(result)
>>> result = repr2(dict_, nl=2, precision=2); print(result)
>>> result = repr2(dict_, nl=1, precision=2, itemsep='', explicit=True); print(result)
>>> result = repr2(dict_, nl=1, precision=2, nobr=1, itemsep='', explicit=True); print(result)
>>> result = repr2(dict_, nl=3, precision=2, cbr=True); print(result)
>>> result = repr2(dict_, nl=3, precision=2, si=True); print(result)
>>> result = repr2(dict_, nl=3, sort=True); print(result)
>>> result = repr2(dict_, nl=3, sort=False, trailing_sep=False); print(result)
>>> result = repr2(dict_, nl=3, sort=False, trailing_sep=False, nobr=True); print(result)
Example:
>>> from ubelt.util_format import *
>>> def _nest(d, w):
... if d == 0:
... return {}
... else:
... return {'n{}'.format(d): _nest(d - 1, w + 1), 'm{}'.format(d): _nest(d - 1, w + 1)}
>>> dict_ = _nest(d=4, w=1)
>>> result = repr2(dict_, nl=6, precision=2, cbr=1)
>>> print('---')
>>> print(result)
>>> result = repr2(dict_, nl=-1, precision=2)
>>> print('---')
>>> print(result)
"""
custom_extensions = kwargs.get('extensions', None)
_return_info = kwargs.get('_return_info', False)
kwargs['_root_info'] = _rectify_root_info(kwargs.get('_root_info', None))
outstr = None
_leaf_info = None
if custom_extensions:
func = custom_extensions.lookup(data)
if func is not None:
outstr = func(data, **kwargs)
if outstr is None:
if isinstance(data, dict):
outstr, _leaf_info = _format_dict(data, **kwargs)
elif isinstance(data, (list, tuple, set, frozenset)):
outstr, _leaf_info = _format_list(data, **kwargs)
if outstr is None:
# check any globally registered functions for special formatters
func = _FORMATTER_EXTENSIONS.lookup(data)
if func is not None:
outstr = func(data, **kwargs)
else:
outstr = _format_object(data, **kwargs)
if _return_info:
_leaf_info = _rectify_leaf_info(_leaf_info)
return outstr, _leaf_info
else:
return outstr
def _rectify_root_info(_root_info):
if _root_info is None:
_root_info = {
'depth': 0,
}
return _root_info
def _rectify_leaf_info(_leaf_info):
if _leaf_info is None:
_leaf_info = {
'max_height': 0,
'min_height': 0,
}
return _leaf_info
class FormatterExtensions(object):
"""
Helper class for managing non-builtin (e.g. numpy) format types.
This module (:mod:`ubelt.util_format`) maintains a global set of basic
extensions, but it is also possible to create a locally scoped set of
extensions and explicilty pass it to repr2. The following example
demonstrates this.
Example:
>>> import ubelt as ub
>>> class MyObject(object):
>>> pass
>>> data = {'a': [1, 2.2222, MyObject()], 'b': MyObject()}
>>> # Create a custom set of extensions
>>> extensions = ub.FormatterExtensions()
>>> # Register a function to format your specific type
>>> @extensions.register(MyObject)
>>> def format_myobject(data, **kwargs):
>>> return 'I can do anything here'
>>> # Repr2 will now respect the passed custom extensions
>>> # Note that the global extensions will still be respected
>>> # unless they are overloaded.
>>> print(ub.repr2(data, nl=-1, precision=1, extensions=extensions))
{
'a': [1, 2.2, I can do anything here],
'b': I can do anything here
}
>>> # Overload the formatter for float and int
>>> @extensions.register((float, int))
>>> def format_myobject(data, **kwargs):
>>> return str((data + 10) // 2)
>>> print(ub.repr2(data, nl=-1, precision=1, extensions=extensions))
{
'a': [5, 6.0, I can do anything here],
'b': I can do anything here
}
"""
# set_types = [set, frozenset]
# list_types = [list, tuple]
# dict_types = [dict]
# custom_types = {
# 'numpy': [],
# 'pandas': [],
# }
# @classmethod
# def sequence_types(cls):
# return cls.list_types + cls.set_types
def __init__(self):
self.func_registry = {}
self.lazy_init = []
# self._lazy_registrations = [
# self._register_numpy_extensions,
# self._register_builtin_extensions,
# ]
def register(self, type):
"""
Registers a custom formatting function with ub.repr2
"""
def _decorator(func):
if isinstance(type, tuple):
for t in type:
self.func_registry[t] = func
else:
self.func_registry[type] = func
return func
return _decorator
def lookup(self, data):
"""
Returns an appropriate function to format ``data`` if one has been
registered.
"""
for func in self.lazy_init:
func()
for type, func in self.func_registry.items():
if isinstance(data, type):
return func
# def _register_pandas_extensions(self):
# # import numpy as np
# # @self.register(pd.DataFrame)
# def format_pandas(data, **kwargs):
# precision = kwargs.get('precision', None)
# float_format = (None if precision is None
# else '%.{}f'.format(precision))
# formatted = data.to_string(float_format=float_format)
# return formatted
def _register_numpy_extensions(self):
"""
CommandLine:
python -m ubelt.util_format FormatterExtensions._register_numpy_extensions
Example:
>>> import sys
>>> import pytest
>>> import ubelt as ub
>>> if not ub.modname_to_modpath('numpy'):
... raise pytest.skip()
>>> # xdoctest: +IGNORE_WHITESPACE
>>> import numpy as np
>>> data = np.array([[.2, 42, 5], [21.2, 3, .4]])
>>> print(ub.repr2(data))
np.array([[ 0.2, 42. , 5. ],
[21.2, 3. , 0.4]], dtype=np.float64)
>>> print(ub.repr2(data, with_dtype=False))
np.array([[ 0.2, 42. , 5. ],
[21.2, 3. , 0.4]])
>>> print(ub.repr2(data, strvals=True))
[[ 0.2, 42. , 5. ],
[21.2, 3. , 0.4]]
>>> data = np.empty((0, 10), dtype=np.float64)
>>> print(ub.repr2(data, strvals=False))
np.empty((0, 10), dtype=np.float64)
>>> print(ub.repr2(data, strvals=True))
[]
>>> data = np.ma.empty((0, 10), dtype=np.float64)
>>> print(ub.repr2(data, strvals=False))
np.ma.empty((0, 10), dtype=np.float64)
"""
import numpy as np
@self.register(np.ndarray)
def format_ndarray(data, **kwargs):
import re
strvals = kwargs.get('sv', kwargs.get('strvals', False))
itemsep = kwargs.get('itemsep', ' ')
precision = kwargs.get('precision', None)
suppress_small = kwargs.get('supress_small', None)
max_line_width = kwargs.get('max_line_width', None)
with_dtype = kwargs.get('with_dtype', kwargs.get('dtype', not strvals))
newlines = kwargs.pop('nl', kwargs.pop('newlines', 1))
# if with_dtype and strvals:
# raise ValueError('cannot format with strvals and dtype')
separator = ',' + itemsep
if strvals:
prefix = ''
suffix = ''
else:
modname = type(data).__module__
# substitute shorthand for numpy module names
np_nice = 'np'
modname = re.sub('\\bnumpy\\b', np_nice, modname)
modname = re.sub('\\bma.core\\b', 'ma', modname)
class_name = type(data).__name__
if class_name == 'ndarray':
class_name = 'array'
prefix = modname + '.' + class_name + '('
if with_dtype:
dtype_repr = data.dtype.name
# dtype_repr = np.core.arrayprint.dtype_short_repr(data.dtype)
suffix = ',{}dtype={}.{})'.format(itemsep, np_nice, dtype_repr)
else:
suffix = ')'
if not strvals and data.size == 0 and data.shape != (0,):
# Special case for displaying empty data
prefix = modname + '.empty('
body = repr(tuple(map(int, data.shape)))
else:
body = np.array2string(data, precision=precision,
separator=separator,
suppress_small=suppress_small,
prefix=prefix,
max_line_width=max_line_width)
if not newlines:
# remove newlines if we need to
body = re.sub('\n *', '', body)
formatted = prefix + body + suffix
return formatted
# Hack, make sure we also register numpy floats
self.register(np.float32)(self.func_registry[float])
def _register_builtin_extensions(self):
@self.register(float)
def format_float(data, **kwargs):
precision = kwargs.get('precision', None)
if precision is None:
return six.text_type(data)
else:
return ('{:.%df}' % precision).format(data)
@self.register(slice)
def format_slice(data, **kwargs):
if kwargs.get('itemsep', ' ') == '':
return 'slice(%r,%r,%r)' % (data.start, data.stop, data.step)
else:
return _format_object(data, **kwargs)
_FORMATTER_EXTENSIONS = FormatterExtensions()
_FORMATTER_EXTENSIONS._register_builtin_extensions()
@_FORMATTER_EXTENSIONS.lazy_init.append
def _lazy_init():
"""
Only called in the case where we encounter an unknown type that a commonly
used external library might have. For now this is just numpy. Numpy is
ubiquitous.
"""
try:
# TODO: can we use lazy loading to prevent trying to import numpy until
# some attribute of _FORMATTER_EXTENSIONS is used?
_FORMATTER_EXTENSIONS._register_numpy_extensions()
# TODO: register pandas by default if available
except ImportError: # nocover
pass
def _format_object(val, **kwargs):
stritems = kwargs.get('si', kwargs.get('stritems', False))
strvals = stritems or kwargs.get('sv', kwargs.get('strvals', False))
base_valfunc = six.text_type if strvals else repr
itemstr = base_valfunc(val)
# Remove unicode repr from python2 to agree with python3 output
if six.PY2 and isinstance(val, six.string_types): # nocover
if itemstr.startswith(("u'", 'u"')):
itemstr = itemstr[1:]
return itemstr
def _format_list(list_, **kwargs):
"""
Makes a pretty printable / human-readable string representation of a
sequence. In most cases this string could be evaled.
Args:
list_ (list): input list
**kwargs: nl, newlines, packed, nobr, nobraces, itemsep, trailing_sep,
strvals indent_, precision, use_numpy, with_dtype, force_dtype,
stritems, strkeys, explicit, sort, key_order, maxlen
Returns:
Tuple[str, Dict] : retstr, _leaf_info
Example:
>>> print(_format_list([])[0])
[]
>>> print(_format_list([], nobr=True)[0])
[]
>>> print(_format_list([1], nl=0)[0])
[1]
>>> print(_format_list([1], nobr=True)[0])
1,
"""
kwargs['_root_info'] = _rectify_root_info(kwargs.get('_root_info', None))
kwargs['_root_info']['depth'] += 1
newlines = kwargs.pop('nl', kwargs.pop('newlines', 1))
kwargs['nl'] = _rectify_countdown_or_bool(newlines)
nobraces = kwargs.pop('nobr', kwargs.pop('nobraces', False))
itemsep = kwargs.get('itemsep', ' ')
compact_brace = kwargs.get('cbr', kwargs.get('compact_brace', False))
# kwargs['cbr'] = _rectify_countdown_or_bool(compact_brace)
itemstrs, _leaf_info = _list_itemstrs(list_, **kwargs)
if len(itemstrs) == 0:
nobraces = False # force braces to prevent empty output
is_tuple = isinstance(list_, tuple)
is_set = isinstance(list_, (set, frozenset,))
if nobraces:
lbr, rbr = '', ''
elif is_tuple:
lbr, rbr = '(', ')'
elif is_set:
lbr, rbr = '{', '}'
else:
lbr, rbr = '[', ']'
# Doesn't actually put in trailing comma if on same line
trailing_sep = kwargs.get('trailsep', kwargs.get('trailing_sep', newlines > 0 and len(itemstrs)))
# The trailing separator is always needed for single item tuples
if is_tuple and len(list_) <= 1:
trailing_sep = True
if len(itemstrs) == 0:
newlines = False
retstr = _join_itemstrs(itemstrs, itemsep, newlines, _leaf_info, nobraces,
trailing_sep, compact_brace, lbr, rbr)
return retstr, _leaf_info
def _format_dict(dict_, **kwargs):
"""
Makes a pretty printable / human-readable string representation of a
dictionary. In most cases this string could be evaled.
Args:
dict_ (dict): a dictionary
**kwargs: si, stritems, strkeys, strvals, sk, sv, nl, newlines, nobr,
nobraces, cbr, compact_brace, trailing_sep,
explicit, itemsep, precision, kvsep, sort
Kwargs:
sort (None): if True, sorts ALL collections and subcollections,
note, collections with undefined orders (e.g. dicts, sets) are
sorted by default. (default = None)
nl (int): preferred alias for newline. can be a countdown variable
(default = None)
explicit (int): can be a countdown variable. if True, uses
dict(a=b) syntax instead of {'a': b}
nobr (bool): removes outer braces (default = False)
Returns:
Tuple[str, Dict] : retstr, _leaf_info
"""
kwargs['_root_info'] = _rectify_root_info(kwargs.get('_root_info', None))
kwargs['_root_info']['depth'] += 1
stritems = kwargs.pop('si', kwargs.pop('stritems', False))
if stritems:
kwargs['strkeys'] = True
kwargs['strvals'] = True
kwargs['strkeys'] = kwargs.pop('sk', kwargs.pop('strkeys', False))
kwargs['strvals'] = kwargs.pop('sv', kwargs.pop('strvals', False))
newlines = kwargs.pop('nl', kwargs.pop('newlines', True))
kwargs['nl'] = _rectify_countdown_or_bool(newlines)
nobraces = kwargs.pop('nobr', kwargs.pop('nobraces', False))
compact_brace = kwargs.get('cbr', kwargs.get('compact_brace', False))
# kwargs['cbr'] = _rectify_countdown_or_bool(compact_brace)
# Doesn't actually put in trailing comma if on same line
trailing_sep = kwargs.get('trailsep', kwargs.get('trailing_sep', newlines > 0))
explicit = kwargs.get('explicit', False)
itemsep = kwargs.get('itemsep', ' ')
if len(dict_) == 0:
retstr = 'dict()' if explicit else '{}'
_leaf_info = None
else:
itemstrs, _leaf_info = _dict_itemstrs(dict_, **kwargs)
if nobraces:
lbr, rbr = '', ''
elif explicit:
lbr, rbr = 'dict(', ')'
else:
lbr, rbr = '{', '}'
retstr = _join_itemstrs(itemstrs, itemsep, newlines, _leaf_info, nobraces,
trailing_sep, compact_brace, lbr, rbr)
return retstr, _leaf_info
def _join_itemstrs(itemstrs, itemsep, newlines, _leaf_info, nobraces,
trailing_sep, compact_brace, lbr, rbr):
"""
Joins string-ified items with separators newlines and container-braces.
"""
# positive newlines means start counting from the root
use_newline = newlines > 0
# negative countdown values mean start counting from the leafs
# if compact_brace < 0:
# compact_brace = (-compact_brace) >= _leaf_info['max_height']
if newlines < 0:
use_newline = (-newlines) < _leaf_info['max_height']
if use_newline:
sep = ',\n'
if nobraces:
body_str = sep.join(itemstrs)
if trailing_sep and len(itemstrs) > 0:
body_str += ','
retstr = body_str
else:
if compact_brace:
# Why must we modify the indentation below and not here?
# prefix = ''
# rest = [ub.indent(s, prefix) for s in itemstrs[1:]]
# indented = itemstrs[0:1] + rest
indented = itemstrs
else:
import ubelt as ub
prefix = ' ' * 4
indented = [ub.indent(s, prefix) for s in itemstrs]
body_str = sep.join(indented)
if trailing_sep and len(itemstrs) > 0:
body_str += ','
if compact_brace:
# Why can we modify the indentation here but not above?
braced_body_str = (lbr + body_str.replace('\n', '\n ') + rbr)
else:
braced_body_str = (lbr + '\n' + body_str + '\n' + rbr)
retstr = braced_body_str
else:
sep = ',' + itemsep
body_str = sep.join(itemstrs)
if trailing_sep and len(itemstrs) > 0:
body_str += ','
retstr = (lbr + body_str + rbr)
return retstr
def _dict_itemstrs(dict_, **kwargs):
"""
Create a string representation for each item in a dict.
Args:
dict_ (dict): the dict
**kwargs: explicit, precision, kvsep, strkeys, _return_info, cbr,
compact_brace, sort
Ignore:
import xinspect
', '.join(xinspect.get_kwargs(_dict_itemstrs, max_depth=0).keys())
Example:
>>> from ubelt.util_format import *
>>> dict_ = {'b': .1, 'l': 'st', 'g': 1.0, 's': 10, 'm': 0.9, 'w': .5}
>>> kwargs = {'strkeys': True}
>>> itemstrs, _ = _dict_itemstrs(dict_, **kwargs)
>>> char_order = [p[0] for p in itemstrs]
>>> assert char_order == ['b', 'g', 'l', 'm', 's', 'w']
"""
import ubelt as ub
explicit = kwargs.get('explicit', False)
kwargs['explicit'] = _rectify_countdown_or_bool(explicit)
precision = kwargs.get('precision', None)
kvsep = kwargs.get('kvsep', ': ')
if explicit:
kvsep = '='
def make_item_str(key, val):
if explicit or kwargs.get('strkeys', False):
key_str = six.text_type(key)
else:
key_str = repr2(key, precision=precision, newlines=0)
prefix = key_str + kvsep
kwargs['_return_info'] = True
val_str, _leaf_info = repr2(val, **kwargs)
# If the first line does not end with an open nest char
# (e.g. for ndarrays), otherwise we need to worry about
# residual indentation.
pos = val_str.find('\n')
first_line = val_str if pos == -1 else val_str[:pos]
compact_brace = kwargs.get('cbr', kwargs.get('compact_brace', False))
if compact_brace or not first_line.rstrip().endswith(tuple('([{<')):
rest = '' if pos == -1 else val_str[pos:]
val_str = first_line.lstrip() + rest
if '\n' in prefix:
# Fix issue with keys that span new lines
item_str = prefix + val_str
else:
item_str = ub.hzcat([prefix, val_str])
else:
item_str = prefix + val_str
return item_str, _leaf_info
items = list(six.iteritems(dict_))
_tups = [make_item_str(key, val) for (key, val) in items]
itemstrs = [t[0] for t in _tups]
max_height = max([t[1]['max_height'] for t in _tups]) if _tups else 0
_leaf_info = {
'max_height': max_height + 1,
}
sort = kwargs.get('sort', None)
if sort is None:
# if sort is None, force orderings on unordered collections like dicts,
# but keep ordering of ordered collections like OrderedDicts.
sort = True
if isinstance(dict_, collections.OrderedDict):
# never sort ordered dicts; they are perfect just the way they are!
sort = False
if sort:
key = sort if callable(sort) else None
itemstrs = _sort_itemstrs(items, itemstrs, key)
return itemstrs, _leaf_info
def _list_itemstrs(list_, **kwargs):
"""
Create a string representation for each item in a list.
Args:
list_ (Sequence):
**kwargs: _return_info, sort
Ignore:
import xinspect
', '.join(xinspect.get_kwargs(_list_itemstrs, max_depth=0).keys())
"""
items = list(list_)
kwargs['_return_info'] = True
_tups = [repr2(item, **kwargs) for item in items]
itemstrs = [t[0] for t in _tups]
max_height = max([t[1]['max_height'] for t in _tups]) if _tups else 0
_leaf_info = {
'max_height': max_height + 1,
}
sort = kwargs.get('sort', None)
if sort is None:
# if sort is None, force orderings on unordered collections like sets,
# but keep ordering of ordered collections like lists.
sort = isinstance(list_, (set, frozenset))
if sort:
key = sort if callable(sort) else None
itemstrs = _sort_itemstrs(items, itemstrs, key)
return itemstrs, _leaf_info
def _sort_itemstrs(items, itemstrs, key=None):
"""
Equivalent to ``sorted(items)`` except if ``items`` are unorderable, then
string values are used to define an ordering.
"""
# First try to sort items by their normal values
# If that doesnt work, then sort by their string values
import ubelt as ub
try:
# Set ordering is not unique. Sort by strings values instead.
if _peek_isinstance(items, (set, frozenset)):
raise TypeError
sortx = ub.argsort(items, key=key)
except TypeError:
sortx = ub.argsort(itemstrs, key=key)
itemstrs = [itemstrs[x] for x in sortx]
return itemstrs
def _peek_isinstance(items, types):
return len(items) > 0 and isinstance(items[0], types)
def _rectify_countdown_or_bool(count_or_bool):
"""
used by recursive functions to specify which level to turn a bool on in
counting down yields True, True, ..., False
counting up yields False, False, False, ... True
Args:
count_or_bool (bool or int): if positive and an integer, it will count
down, otherwise it will remain the same.
Returns:
int or bool: count_or_bool_
CommandLine:
python -m utool.util_str --test-_rectify_countdown_or_bool
Example:
>>> from ubelt.util_format import _rectify_countdown_or_bool # NOQA
>>> count_or_bool = True
>>> a1 = (_rectify_countdown_or_bool(2))
>>> a2 = (_rectify_countdown_or_bool(1))
>>> a3 = (_rectify_countdown_or_bool(0))
>>> a4 = (_rectify_countdown_or_bool(-1))
>>> a5 = (_rectify_countdown_or_bool(-2))
>>> a6 = (_rectify_countdown_or_bool(True))
>>> a7 = (_rectify_countdown_or_bool(False))
>>> a8 = (_rectify_countdown_or_bool(None))
>>> result = [a1, a2, a3, a4, a5, a6, a7, a8]
>>> print(result)
[1, 0, 0, -1, -2, True, False, False]
"""
if count_or_bool is True or count_or_bool is False:
count_or_bool_ = count_or_bool
elif isinstance(count_or_bool, int):
if count_or_bool == 0:
return 0
elif count_or_bool > 0:
count_or_bool_ = count_or_bool - 1
else:
# We dont countup negatives anymore
count_or_bool_ = count_or_bool
else:
count_or_bool_ = False
return count_or_bool_
|
py | 7dfc18dbf50eab47438c9a86d41816466aaf0d56 | """
@created: Dec 29, 2017
@Edited: May 5, 2018
@author: Doron Veltzer
"""
import functools
import fractions
import math
import re
from collections import OrderedDict
import sys
# define useful methods
# read line from file split it according to separator and convert it to type
def process_input_line(input_file,
input_mapping=int,
input_number=None,
force_list=False,
separator=' '):
input_line = input_file.readline().rstrip()
if input_number is None:
input_vector = input_line.split(separator)
else:
input_vector = input_line.split(separator, input_number)
output_vector = list(map(input_mapping, input_vector))
if len(output_vector) == 1 and not force_list:
return output_vector[0]
else:
return output_vector
# print debug output to standard error file (since we are using standard input and output)
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def gcd(a, b):
a = abs(a)
b = abs(b)
while a:
a, b = b % a, a
return b
def lcm(a, b):
return (a * b) // gcd(a, b)
def gcd_list(v):
return functools.reduce(gcd, v)
def lcm_list(v):
return functools.reduce(lcm, v)
def identity(x):
return x
# define useful constants
def solve(input_file, output_file, error_file):
# read case number
t = process_input_line(input_file)
# iterate on all cases
for i in range(t):
# error_file.write('Solving problem #{0}\n'.format(i + 1))
# read input
n = process_input_line(input_file)
ws = process_input_line(input_file)
# print input
# error_file.write('input {0} sized weights {1}\n'.format(n, ws))
# check input
# calculate output
stacks = [(0, 0)]
for j in range(n):
next_stacks = stacks[:]
for w, l in stacks:
if w <= 6 * ws[j]:
# remove tuples where w1 > w2 and l1 < l2
w1, l1 = ws[j] + w, l + 1
# error_file.write('Trying {0}, {1}\n'.format(w1, l1))
for w2, l2 in stacks:
if w2 <= w1 and l2 >= l1:
# error_file.write('Found {0}, {1}\n'.format(w2, l2))
# error_file.write('While adding {0}, {1}\n'.format(w1, l1))
break
if w2 >= w1 and l2 <= l1:
# error_file.write('Removing {0}, {1}\n'.format(w2, l2))
next_stacks.remove((w2, l2))
else:
# error_file.write('Added {0}, {1}\n'.format(w1, l1))
next_stacks.append((w1, l1))
stacks = next_stacks
# set output
output = str(max([l for _, l in stacks]))
# print output
output_file.write('Case #{}: {}\n'.format(i + 1, output))
output_file.flush()
if __name__ == "__main__":
solve(sys.stdin, sys.stdout, sys.stderr)
|
py | 7dfc1a53de702a47fb6233452be4d3b21ff166cb | """
Configuration for Datasets
"""
import os
RAW_DATASET_FILENAME = "GrammarDataset.csv"
DATASET_FOLDER = "data"
RAW_DATASET_FOLDER = "raw"
PROCESSED_DATASET_FOLDER = "processed"
PROCESSED_DATASET_TRAIN_FILENAME = "train.tsv"
PROCESSED_DATASET_TEST_FILENAME = "test.tsv"
RAW_DATASET = os.path.join(DATASET_FOLDER, RAW_DATASET_FOLDER, RAW_DATASET_FILENAME)
PROCESSED_DATASET = {
"train": os.path.join(
DATASET_FOLDER, PROCESSED_DATASET_FOLDER, PROCESSED_DATASET_TRAIN_FILENAME
),
"test": os.path.join(
DATASET_FOLDER, PROCESSED_DATASET_FOLDER, PROCESSED_DATASET_TEST_FILENAME
),
}
TEMP_DIR = ".temp"
|
py | 7dfc1b14724fd112a8dfdfc58f21be3213dfbfc4 | from .timex import Timex
class TimexSet:
timex: Timex
def __init__(self, timex):
self.timex = Timex(timex)
|
py | 7dfc1c4504a59c700f08e58ca070548b081b80a3 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class UsageOperations(object):
"""UsageOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2018_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
location, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.ListUsagesResult"]
"""Gets, for the specified location, the current compute resource usage information as well as the
limits for compute resources under the subscription.
:param location: The location for which resource usage is queried.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListUsagesResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2018_04_01.models.ListUsagesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ListUsagesResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-04-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str', pattern=r'^[-\w\._]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListUsagesResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/usages'} # type: ignore
|
py | 7dfc1cb6909771e7c627cf33ee5bd0b2d5d51e57 | """Fdscanner view"""
__docformat__ = "numpy"
import os
import pandas as pd
from tabulate import tabulate
from gamestonk_terminal.helper_funcs import export_data
from gamestonk_terminal.stocks.options import fdscanner_model
def display_options(num: int, sort_column: pd.Timestamp, export: str, ascending: bool):
"""Displays the unusual options table
Parameters
----------
num: int
Number of rows to show
sort_columns: pd.Timestamp
Data column to sort on
export: str
File type to export
ascending: bool
Whether to sort in ascending order
"""
data, last_update = fdscanner_model.unusual_options(num, sort_column, ascending)
print(f"Last Updated: {last_update}")
print(
tabulate(
data[:num], headers=data.columns, tablefmt="fancy_grid", showindex=False
)
)
print("")
if export:
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"unu_",
data,
)
|
py | 7dfc1d5ea84b1b7184d9eeb8b24db84238daa1d0 | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'temp_28451.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
py | 7dfc1e31220e8ddd45ffd1bd91723e5bc1354eb0 | """
==================================================
Explicit feature map approximation for RBF kernels
==================================================
An example illustrating the approximation of the feature map
of an RBF kernel.
.. currentmodule:: sklearn.kernel_approximation
It shows how to use :class:`RBFSampler` and :class:`Nystroem` to
approximate the feature map of an RBF kernel for classification with an SVM on
the digits dataset. Results using a linear SVM in the original space, a linear
SVM using the approximate mappings and using a kernelized SVM are compared.
Timings and accuracy for varying amounts of Monte Carlo samplings (in the case
of :class:`RBFSampler`, which uses random Fourier features) and different sized
subsets of the training set (for :class:`Nystroem`) for the approximate mapping
are shown.
Please note that the dataset here is not large enough to show the benefits
of kernel approximation, as the exact SVM is still reasonably fast.
Sampling more dimensions clearly leads to better classification results, but
comes at a greater cost. This means there is a tradeoff between runtime and
accuracy, given by the parameter n_components. Note that solving the Linear
SVM and also the approximate kernel SVM could be greatly accelerated by using
stochastic gradient descent via :class:`~sklearn.linear_model.SGDClassifier`.
This is not easily possible for the case of the kernelized SVM.
"""
# %%
# Python package and dataset imports, load dataset
# ---------------------------------------------------
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
import numpy as np
from time import time
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, pipeline
from sklearn.kernel_approximation import RBFSampler, Nystroem
from sklearn.decomposition import PCA
# The digits dataset
digits = datasets.load_digits(n_class=9)
# %%
# Timing and accuracy plots
# --------------------------------------------------
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.data)
data = digits.data / 16.0
data -= data.mean(axis=0)
# We learn the digits on the first half of the digits
data_train, targets_train = (data[: n_samples // 2], digits.target[: n_samples // 2])
# Now predict the value of the digit on the second half:
data_test, targets_test = (data[n_samples // 2 :], digits.target[n_samples // 2 :])
# data_test = scaler.transform(data_test)
# Create a classifier: a support vector classifier
kernel_svm = svm.SVC(gamma=0.2)
linear_svm = svm.LinearSVC()
# create pipeline from kernel approximation
# and linear svm
feature_map_fourier = RBFSampler(gamma=0.2, random_state=1)
feature_map_nystroem = Nystroem(gamma=0.2, random_state=1)
fourier_approx_svm = pipeline.Pipeline(
[("feature_map", feature_map_fourier), ("svm", svm.LinearSVC())]
)
nystroem_approx_svm = pipeline.Pipeline(
[("feature_map", feature_map_nystroem), ("svm", svm.LinearSVC())]
)
# fit and predict using linear and kernel svm:
kernel_svm_time = time()
kernel_svm.fit(data_train, targets_train)
kernel_svm_score = kernel_svm.score(data_test, targets_test)
kernel_svm_time = time() - kernel_svm_time
linear_svm_time = time()
linear_svm.fit(data_train, targets_train)
linear_svm_score = linear_svm.score(data_test, targets_test)
linear_svm_time = time() - linear_svm_time
sample_sizes = 30 * np.arange(1, 10)
fourier_scores = []
nystroem_scores = []
fourier_times = []
nystroem_times = []
for D in sample_sizes:
fourier_approx_svm.set_params(feature_map__n_components=D)
nystroem_approx_svm.set_params(feature_map__n_components=D)
start = time()
nystroem_approx_svm.fit(data_train, targets_train)
nystroem_times.append(time() - start)
start = time()
fourier_approx_svm.fit(data_train, targets_train)
fourier_times.append(time() - start)
fourier_score = fourier_approx_svm.score(data_test, targets_test)
nystroem_score = nystroem_approx_svm.score(data_test, targets_test)
nystroem_scores.append(nystroem_score)
fourier_scores.append(fourier_score)
# plot the results:
plt.figure(figsize=(16, 4))
accuracy = plt.subplot(121)
# second y axis for timings
timescale = plt.subplot(122)
accuracy.plot(sample_sizes, nystroem_scores, label="Nystroem approx. kernel")
timescale.plot(sample_sizes, nystroem_times, "--", label="Nystroem approx. kernel")
accuracy.plot(sample_sizes, fourier_scores, label="Fourier approx. kernel")
timescale.plot(sample_sizes, fourier_times, "--", label="Fourier approx. kernel")
# horizontal lines for exact rbf and linear kernels:
accuracy.plot(
[sample_sizes[0], sample_sizes[-1]],
[linear_svm_score, linear_svm_score],
label="linear svm",
)
timescale.plot(
[sample_sizes[0], sample_sizes[-1]],
[linear_svm_time, linear_svm_time],
"--",
label="linear svm",
)
accuracy.plot(
[sample_sizes[0], sample_sizes[-1]],
[kernel_svm_score, kernel_svm_score],
label="rbf svm",
)
timescale.plot(
[sample_sizes[0], sample_sizes[-1]],
[kernel_svm_time, kernel_svm_time],
"--",
label="rbf svm",
)
# vertical line for dataset dimensionality = 64
accuracy.plot([64, 64], [0.7, 1], label="n_features")
# legends and labels
accuracy.set_title("Classification accuracy")
timescale.set_title("Training times")
accuracy.set_xlim(sample_sizes[0], sample_sizes[-1])
accuracy.set_xticks(())
accuracy.set_ylim(np.min(fourier_scores), 1)
timescale.set_xlabel("Sampling steps = transformed feature dimension")
accuracy.set_ylabel("Classification accuracy")
timescale.set_ylabel("Training time in seconds")
accuracy.legend(loc="best")
timescale.legend(loc="best")
plt.tight_layout()
plt.show()
# %%
# Decision Surfaces of RBF Kernel SVM and Linear SVM
# --------------------------------------------------------
# The second plot visualized the decision surfaces of the RBF kernel SVM and
# the linear SVM with approximate kernel maps.
# The plot shows decision surfaces of the classifiers projected onto
# the first two principal components of the data. This visualization should
# be taken with a grain of salt since it is just an interesting slice through
# the decision surface in 64 dimensions. In particular note that
# a datapoint (represented as a dot) does not necessarily be classified
# into the region it is lying in, since it will not lie on the plane
# that the first two principal components span.
# The usage of :class:`RBFSampler` and :class:`Nystroem` is described in detail
# in :ref:`kernel_approximation`.
# visualize the decision surface, projected down to the first
# two principal components of the dataset
pca = PCA(n_components=8).fit(data_train)
X = pca.transform(data_train)
# Generate grid along first two principal components
multiples = np.arange(-2, 2, 0.1)
# steps along first component
first = multiples[:, np.newaxis] * pca.components_[0, :]
# steps along second component
second = multiples[:, np.newaxis] * pca.components_[1, :]
# combine
grid = first[np.newaxis, :, :] + second[:, np.newaxis, :]
flat_grid = grid.reshape(-1, data.shape[1])
# title for the plots
titles = [
"SVC with rbf kernel",
"SVC (linear kernel)\n with Fourier rbf feature map\nn_components=100",
"SVC (linear kernel)\n with Nystroem rbf feature map\nn_components=100",
]
plt.figure(figsize=(18, 7.5))
plt.rcParams.update({"font.size": 14})
# predict and plot
for i, clf in enumerate((kernel_svm, nystroem_approx_svm, fourier_approx_svm)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
plt.subplot(1, 3, i + 1)
Z = clf.predict(flat_grid)
# Put the result into a color plot
Z = Z.reshape(grid.shape[:-1])
plt.contourf(multiples, multiples, Z, cmap=plt.cm.Paired)
plt.axis("off")
# Plot also the training points
plt.scatter(
X[:, 0], X[:, 1], c=targets_train, cmap=plt.cm.Paired, edgecolors=(0, 0, 0)
)
plt.title(titles[i])
plt.tight_layout()
plt.show()
|
py | 7dfc1e9d943cffa9ddbb49e706d0627f6c01424f | # Generated by Django 3.2.5 on 2021-07-20 05:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("food", "0005_auto_20210719_1232"),
]
operations = [
migrations.RemoveField(
model_name="restaurant",
name="background",
),
migrations.AddField(
model_name="restaurant",
name="photo",
field=models.ImageField(null=True, upload_to="bg"),
),
]
|
py | 7dfc1eabc800ca3e76b1723a4a5b81a0a822c2a8 | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import mock
import unittest
from ebcli.lib import cloudwatch
from .. import mock_responses
class TestCloudWatch(unittest.TestCase):
@mock.patch('ebcli.lib.cloudwatch.aws.make_api_call')
def test_get_all_stream_names(self, make_api_call_mock):
make_api_call_mock.return_value = mock_responses.DESCRIBE_LOG_STREAMS_RESPONSE
self.assertEqual(
[
'archive-health-2018-03-26',
'archive-health-2018-03-27',
'archive-health-2018-03-28',
],
cloudwatch.get_all_stream_names('some-log-group')
)
@mock.patch('ebcli.lib.cloudwatch.aws.make_api_call')
def test_get_log_events(self, make_api_call_mock):
cloudwatch.get_log_events(
'environment-health.log',
'archive-health-2018-03-26',
next_token='1234123412341234',
start_time='4567456745674567',
end_time='7890789078907890',
limit=10
)
make_api_call_mock.assert_called_once_with(
'logs',
'get_log_events',
endTime='7890789078907890',
limit=10,
logGroupName='environment-health.log',
logStreamName='archive-health-2018-03-26',
nextToken='1234123412341234',
startFromHead=False,
startTime='4567456745674567'
)
@mock.patch('ebcli.lib.cloudwatch.aws.make_api_call')
def test_log_group_exists(
self,
make_api_call_mock
):
make_api_call_mock.return_value = mock_responses.DESCRIBE_LOG_GROUPS_RESPONSE
self.assertTrue(cloudwatch.log_group_exists('my-log-group-name'))
@mock.patch('ebcli.lib.cloudwatch.aws.make_api_call')
def test_log_group_exists__log_group_does_not_exist(
self,
make_api_call_mock
):
make_api_call_mock.return_value = {
'logGroups': []
}
self.assertFalse(cloudwatch.log_group_exists('my-log-group-name'))
|
py | 7dfc1f0f51a7c1b8a0b14db5658cb0eb90ee3a60 | # %%
import sys
from pathlib2 import Path
root_folder = Path(__file__).resolve().parents[0]
if root_folder not in sys.path:
sys.path.insert(0, str(root_folder))
print(root_folder)
# %%
|
py | 7dfc2089a0be3d6e845f28e60ee0724010ea1d0c | # Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyscf import lib
from pyscf.ci import cisd
from pyscf.ci import ucisd
from pyscf.ci import gcisd
from pyscf.pbc import mp
class RCISD(cisd.RCISD):
def __init__(self, mf, frozen=0, mo_coeff=None, mo_occ=None):
if abs(mf.kpt).max() > 1e-9:
raise NotImplementedError
from pyscf.pbc.df.df_ao2mo import warn_pbc2d_eri
warn_pbc2d_eri(mf)
cisd.RCISD.__init__(self, mf, frozen, mo_coeff, mo_occ)
def ao2mo(self, mo_coeff=None):
from pyscf.cc import rccsd
from pyscf.pbc import tools
from pyscf.pbc.cc.ccsd import _adjust_occ
ao2mofn = mp.mp2._gen_ao2mofn(self._scf)
with lib.temporary_env(self._scf, exxdiv=None):
eris = rccsd._make_eris_incore(self, mo_coeff, ao2mofn=ao2mofn)
if mo_coeff is self._scf.mo_coeff:
eris.mo_energy = self._scf.mo_energy[self.get_frozen_mask()]
else:
madelung = tools.madelung(self._scf.cell, self._scf.kpt)
eris.mo_energy = _adjust_occ(eris.mo_energy, eris.nocc, -madelung)
return eris
class UCISD(ucisd.UCISD):
def __init__(self, mf, frozen=0, mo_coeff=None, mo_occ=None):
if abs(mf.kpt).max() > 1e-9:
raise NotImplementedError
from pyscf.pbc.df.df_ao2mo import warn_pbc2d_eri
warn_pbc2d_eri(mf)
ucisd.UCISD.__init__(self, mf, frozen, mo_coeff, mo_occ)
def ao2mo(self, mo_coeff=None):
from pyscf.pbc import tools
from pyscf.pbc.cc.ccsd import _adjust_occ
ao2mofn = mp.mp2._gen_ao2mofn(self._scf)
with lib.temporary_env(self._scf, exxdiv=None):
eris = ucisd.uccsd._make_eris_incore(self, mo_coeff, ao2mofn=ao2mofn)
if mo_coeff is self._scf.mo_coeff:
idxa, idxb = self.get_frozen_mask()
mo_e_a, mo_e_b = self._scf.mo_energy
eris.mo_energy = (mo_e_a[idxa], mo_e_b[idxb])
else:
nocca, noccb = eris.nocc
madelung = tools.madelung(self._scf.cell, self._scf.kpt)
eris.mo_energy = (_adjust_occ(eris.mo_energy[0], nocca, -madelung),
_adjust_occ(eris.mo_energy[1], noccb, -madelung))
return eris
class GCISD(gcisd.GCISD):
def __init__(self, mf, frozen=0, mo_coeff=None, mo_occ=None):
from pyscf.pbc.df.df_ao2mo import warn_pbc2d_eri
warn_pbc2d_eri(mf)
gcisd.GCISD.__init__(self, mf, frozen, mo_coeff, mo_occ)
def ao2mo(self, mo_coeff=None):
from pyscf.pbc import tools
from pyscf.pbc.cc.ccsd import _adjust_occ
with_df = self._scf.with_df
kpt = self._scf.kpt
def ao2mofn(mo_coeff):
nao, nmo = mo_coeff.shape
mo_a = mo_coeff[:nao//2]
mo_b = mo_coeff[nao//2:]
orbspin = getattr(mo_coeff, 'orbspin', None)
if orbspin is None:
eri = with_df.ao2mo(mo_a, kpt, compact=False)
eri += with_df.ao2mo(mo_b, kpt, compact=False)
eri1 = with_df.ao2mo((mo_a,mo_a,mo_b,mo_b), kpt, compact=False)
eri += eri1
eri += eri1.T
eri = eri.reshape([nmo]*4)
else:
mo = mo_a + mo_b
eri = with_df.ao2mo(mo, kpt, compact=False).reshape([nmo]*4)
sym_forbid = (orbspin[:,None] != orbspin)
eri[sym_forbid,:,:] = 0
eri[:,:,sym_forbid] = 0
return eri
with lib.temporary_env(self._scf, exxdiv=None):
eris = gcisd.gccsd._make_eris_incore(self, mo_coeff, ao2mofn=ao2mofn)
if mo_coeff is self._scf.mo_coeff:
eris.mo_energy = self._scf.mo_energy[self.get_frozen_mask()]
else:
madelung = tools.madelung(self._scf.cell, self._scf.kpt)
eris.mo_energy = _adjust_occ(eris.mo_energy, eris.nocc, -madelung)
return eris
from pyscf.pbc import scf
scf.hf.RHF.CISD = lib.class_as_method(RCISD)
scf.uhf.UHF.CISD = lib.class_as_method(UCISD)
scf.ghf.GHF.CISD = lib.class_as_method(GCISD)
scf.rohf.ROHF.CISD = None
|
py | 7dfc21562cc5d09170ce3fedb4a974a5c847c0b5 | from datetime import date, datetime
import subprocess
import sys
import numpy as np
import pytest
from pandas.compat import u
from pandas.compat.numpy import np_datetime64_compat
from pandas import Index, Period, Series, Timestamp, date_range
import pandas.core.config as cf
import pandas.util.testing as tm
from pandas.tseries.offsets import Day, Micro, Milli, Second
converter = pytest.importorskip('pandas.plotting._converter')
from pandas.plotting import (deregister_matplotlib_converters, # isort:skip
register_matplotlib_converters)
def test_timtetonum_accepts_unicode():
assert (converter.time2num("00:01") == converter.time2num(u("00:01")))
class TestRegistration(object):
def test_register_by_default(self):
# Run in subprocess to ensure a clean state
code = ("'import matplotlib.units; "
"import pandas as pd; "
"units = dict(matplotlib.units.registry); "
"assert pd.Timestamp in units)'")
call = [sys.executable, '-c', code]
assert subprocess.check_call(call) == 0
def test_warns(self):
plt = pytest.importorskip("matplotlib.pyplot")
s = Series(range(12), index=date_range('2017', periods=12))
_, ax = plt.subplots()
# Set to the "warning" state, in case this isn't the first test run
converter._WARN = True
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False) as w:
ax.plot(s.index, s.values)
plt.close()
assert len(w) == 1
assert "Using an implicitly registered datetime converter" in str(w[0])
def test_registering_no_warning(self):
plt = pytest.importorskip("matplotlib.pyplot")
s = Series(range(12), index=date_range('2017', periods=12))
_, ax = plt.subplots()
# Set to the "warn" state, in case this isn't the first test run
converter._WARN = True
register_matplotlib_converters()
with tm.assert_produces_warning(None) as w:
ax.plot(s.index, s.values)
assert len(w) == 0
def test_pandas_plots_register(self):
pytest.importorskip("matplotlib.pyplot")
s = Series(range(12), index=date_range('2017', periods=12))
# Set to the "warn" state, in case this isn't the first test run
converter._WARN = True
with tm.assert_produces_warning(None) as w:
s.plot()
assert len(w) == 0
def test_matplotlib_formatters(self):
units = pytest.importorskip("matplotlib.units")
assert Timestamp in units.registry
ctx = cf.option_context("plotting.matplotlib.register_converters",
False)
with ctx:
assert Timestamp not in units.registry
assert Timestamp in units.registry
def test_option_no_warning(self):
pytest.importorskip("matplotlib.pyplot")
ctx = cf.option_context("plotting.matplotlib.register_converters",
False)
plt = pytest.importorskip("matplotlib.pyplot")
s = Series(range(12), index=date_range('2017', periods=12))
_, ax = plt.subplots()
converter._WARN = True
# Test without registering first, no warning
with ctx:
with tm.assert_produces_warning(None) as w:
ax.plot(s.index, s.values)
assert len(w) == 0
# Now test with registering
converter._WARN = True
register_matplotlib_converters()
with ctx:
with tm.assert_produces_warning(None) as w:
ax.plot(s.index, s.values)
assert len(w) == 0
def test_registry_resets(self):
units = pytest.importorskip("matplotlib.units")
dates = pytest.importorskip("matplotlib.dates")
# make a copy, to reset to
original = dict(units.registry)
try:
# get to a known state
units.registry.clear()
date_converter = dates.DateConverter()
units.registry[datetime] = date_converter
units.registry[date] = date_converter
register_matplotlib_converters()
assert units.registry[date] is not date_converter
deregister_matplotlib_converters()
assert units.registry[date] is date_converter
finally:
# restore original stater
units.registry.clear()
for k, v in original.items():
units.registry[k] = v
def test_old_import_warns(self):
with tm.assert_produces_warning(FutureWarning) as w:
from pandas.tseries import converter
converter.register()
assert len(w)
assert ('pandas.plotting.register_matplotlib_converters' in
str(w[0].message))
class TestDateTimeConverter(object):
def setup_method(self, method):
self.dtc = converter.DatetimeConverter()
self.tc = converter.TimeFormatter(None)
def test_convert_accepts_unicode(self):
r1 = self.dtc.convert("12:22", None, None)
r2 = self.dtc.convert(u("12:22"), None, None)
assert (r1 == r2), "DatetimeConverter.convert should accept unicode"
def test_conversion(self):
rs = self.dtc.convert(['2012-1-1'], None, None)[0]
xp = datetime(2012, 1, 1).toordinal()
assert rs == xp
rs = self.dtc.convert('2012-1-1', None, None)
assert rs == xp
rs = self.dtc.convert(date(2012, 1, 1), None, None)
assert rs == xp
rs = self.dtc.convert(datetime(2012, 1, 1).toordinal(), None, None)
assert rs == xp
rs = self.dtc.convert('2012-1-1', None, None)
assert rs == xp
rs = self.dtc.convert(Timestamp('2012-1-1'), None, None)
assert rs == xp
# also testing datetime64 dtype (GH8614)
rs = self.dtc.convert(np_datetime64_compat('2012-01-01'), None, None)
assert rs == xp
rs = self.dtc.convert(np_datetime64_compat(
'2012-01-01 00:00:00+0000'), None, None)
assert rs == xp
rs = self.dtc.convert(np.array([
np_datetime64_compat('2012-01-01 00:00:00+0000'),
np_datetime64_compat('2012-01-02 00:00:00+0000')]), None, None)
assert rs[0] == xp
# we have a tz-aware date (constructed to that when we turn to utc it
# is the same as our sample)
ts = (Timestamp('2012-01-01')
.tz_localize('UTC')
.tz_convert('US/Eastern')
)
rs = self.dtc.convert(ts, None, None)
assert rs == xp
rs = self.dtc.convert(ts.to_pydatetime(), None, None)
assert rs == xp
rs = self.dtc.convert(Index([ts - Day(1), ts]), None, None)
assert rs[1] == xp
rs = self.dtc.convert(Index([ts - Day(1), ts]).to_pydatetime(),
None, None)
assert rs[1] == xp
def test_conversion_float(self):
decimals = 9
rs = self.dtc.convert(
Timestamp('2012-1-1 01:02:03', tz='UTC'), None, None)
xp = converter.dates.date2num(Timestamp('2012-1-1 01:02:03', tz='UTC'))
tm.assert_almost_equal(rs, xp, decimals)
rs = self.dtc.convert(
Timestamp('2012-1-1 09:02:03', tz='Asia/Hong_Kong'), None, None)
tm.assert_almost_equal(rs, xp, decimals)
rs = self.dtc.convert(datetime(2012, 1, 1, 1, 2, 3), None, None)
tm.assert_almost_equal(rs, xp, decimals)
def test_conversion_outofbounds_datetime(self):
# 2579
values = [date(1677, 1, 1), date(1677, 1, 2)]
rs = self.dtc.convert(values, None, None)
xp = converter.dates.date2num(values)
tm.assert_numpy_array_equal(rs, xp)
rs = self.dtc.convert(values[0], None, None)
xp = converter.dates.date2num(values[0])
assert rs == xp
values = [datetime(1677, 1, 1, 12), datetime(1677, 1, 2, 12)]
rs = self.dtc.convert(values, None, None)
xp = converter.dates.date2num(values)
tm.assert_numpy_array_equal(rs, xp)
rs = self.dtc.convert(values[0], None, None)
xp = converter.dates.date2num(values[0])
assert rs == xp
@pytest.mark.parametrize('time,format_expected', [
(0, '00:00'), # time2num(datetime.time.min)
(86399.999999, '23:59:59.999999'), # time2num(datetime.time.max)
(90000, '01:00'),
(3723, '01:02:03'),
(39723.2, '11:02:03.200')
])
def test_time_formatter(self, time, format_expected):
# issue 18478
result = self.tc(time)
assert result == format_expected
def test_dateindex_conversion(self):
decimals = 9
for freq in ('B', 'L', 'S'):
dateindex = tm.makeDateIndex(k=10, freq=freq)
rs = self.dtc.convert(dateindex, None, None)
xp = converter.dates.date2num(dateindex._mpl_repr())
tm.assert_almost_equal(rs, xp, decimals)
def test_resolution(self):
def _assert_less(ts1, ts2):
val1 = self.dtc.convert(ts1, None, None)
val2 = self.dtc.convert(ts2, None, None)
if not val1 < val2:
raise AssertionError('{0} is not less than {1}.'.format(val1,
val2))
# Matplotlib's time representation using floats cannot distinguish
# intervals smaller than ~10 microsecond in the common range of years.
ts = Timestamp('2012-1-1')
_assert_less(ts, ts + Second())
_assert_less(ts, ts + Milli())
_assert_less(ts, ts + Micro(50))
def test_convert_nested(self):
inner = [Timestamp('2017-01-01'), Timestamp('2017-01-02')]
data = [inner, inner]
result = self.dtc.convert(data, None, None)
expected = [self.dtc.convert(x, None, None) for x in data]
assert (np.array(result) == expected).all()
class TestPeriodConverter(object):
def setup_method(self, method):
self.pc = converter.PeriodConverter()
class Axis(object):
pass
self.axis = Axis()
self.axis.freq = 'D'
def test_convert_accepts_unicode(self):
r1 = self.pc.convert("2012-1-1", None, self.axis)
r2 = self.pc.convert(u("2012-1-1"), None, self.axis)
assert r1 == r2
def test_conversion(self):
rs = self.pc.convert(['2012-1-1'], None, self.axis)[0]
xp = Period('2012-1-1').ordinal
assert rs == xp
rs = self.pc.convert('2012-1-1', None, self.axis)
assert rs == xp
rs = self.pc.convert([date(2012, 1, 1)], None, self.axis)[0]
assert rs == xp
rs = self.pc.convert(date(2012, 1, 1), None, self.axis)
assert rs == xp
rs = self.pc.convert([Timestamp('2012-1-1')], None, self.axis)[0]
assert rs == xp
rs = self.pc.convert(Timestamp('2012-1-1'), None, self.axis)
assert rs == xp
rs = self.pc.convert(
np_datetime64_compat('2012-01-01'), None, self.axis)
assert rs == xp
rs = self.pc.convert(
np_datetime64_compat('2012-01-01 00:00:00+0000'), None, self.axis)
assert rs == xp
rs = self.pc.convert(np.array([
np_datetime64_compat('2012-01-01 00:00:00+0000'),
np_datetime64_compat('2012-01-02 00:00:00+0000')]),
None, self.axis)
assert rs[0] == xp
def test_integer_passthrough(self):
# GH9012
rs = self.pc.convert([0, 1], None, self.axis)
xp = [0, 1]
assert rs == xp
def test_convert_nested(self):
data = ['2012-1-1', '2012-1-2']
r1 = self.pc.convert([data, data], None, self.axis)
r2 = [self.pc.convert(data, None, self.axis) for _ in range(2)]
assert r1 == r2
|
py | 7dfc23830afe16b75b76e2a38f1caf3292de5ba8 | #!/usr/bin/env python
"""Counts the vowels in a user input string."""
s = input ('Enter any string: ')
vcount = 0
for c in s:
if c in 'aeiouAIEOU':
vcount += 1
print('Vowel count:', vcount)
|
py | 7dfc245f2e49d012a3795521c05cb16e9b690d63 | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Article(models.Model):
title = models.CharField(max_length=500)
img = models.CharField(null=True, blank=True, max_length=250)
content = models.TextField(null=True, blank=True)
views = models.IntegerField(default=0)
likes = models.IntegerField(default=0)
createtime = models.DateField()
def __str__(self):
return self.title
class UserProfile(models.Model):
# 用户姓名
user = models.ForeignKey(User, related_name="profile")
# 用户头像
photo = models.ImageField(
upload_to='profile_image', blank=True, null=True) # 用户性别
GENDER_CHOICES = (
(u'M', u'Male'),
(u'F', u'Female'),
)
# gender = models.CharField(max_length=2, choices=GENDER_CHOICES)
def __str__(self):
return self.user
class Comment(models.Model):
name = models.CharField(max_length=500)
avatar = models.CharField(
max_length=250, default="static/images/default.png")
comment = models.TextField(null=True, blank=True)
createtime = models.DateField(auto_now=True)
belong_to = models.ForeignKey(
to=Article, related_name="under_comments", null=True, blank=True)
def __str__(self):
return self.name
class Ticket(models.Model):
voter = models.ForeignKey(to=User, related_name="user_tickets")
article = models.ForeignKey(to=Article, related_name="article_tickets")
ARTICLE_CHOICES = {
("like", "like"),
("dislike", "dislike"),
("normal", "normal")
}
choice = models.CharField(choices=ARTICLE_CHOICES, max_length=10)
def __str__(self):
return str(self.id)
|
py | 7dfc2471ef1631cc03de8acf51aaf7b9f7470e04 | # Generated by Django 3.1.5 on 2021-02-04 05:30
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='CustomUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('username', models.CharField(help_text='The username of the user.', max_length=150, unique=True)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'ordering': ('id',),
},
),
]
|
py | 7dfc24f0bc1797696f705cc6dceb889877530d7d | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['Input']
warnings.warn("""The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-nextgen:streamanalytics:Input'.""", DeprecationWarning)
class Input(pulumi.CustomResource):
warnings.warn("""The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-nextgen:streamanalytics:Input'.""", DeprecationWarning)
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
input_name: Optional[pulumi.Input[str]] = None,
job_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Union[pulumi.InputType['ReferenceInputPropertiesArgs'], pulumi.InputType['StreamInputPropertiesArgs']]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
An input object, containing all information associated with the named input. All inputs are contained under a streaming job.
Latest API Version: 2016-03-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] input_name: The name of the input.
:param pulumi.Input[str] job_name: The name of the streaming job.
:param pulumi.Input[str] name: Resource name
:param pulumi.Input[Union[pulumi.InputType['ReferenceInputPropertiesArgs'], pulumi.InputType['StreamInputPropertiesArgs']]] properties: The properties that are associated with an input. Required on PUT (CreateOrReplace) requests.
:param pulumi.Input[str] resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
"""
pulumi.log.warn("Input is deprecated: The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-nextgen:streamanalytics:Input'.")
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['input_name'] = input_name
if job_name is None and not opts.urn:
raise TypeError("Missing required property 'job_name'")
__props__['job_name'] = job_name
__props__['name'] = name
__props__['properties'] = properties
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:streamanalytics:Input"), pulumi.Alias(type_="azure-nextgen:streamanalytics/v20160301:Input"), pulumi.Alias(type_="azure-nextgen:streamanalytics/v20170401preview:Input")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Input, __self__).__init__(
'azure-nextgen:streamanalytics/latest:Input',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Input':
"""
Get an existing Input resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return Input(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> pulumi.Output[Any]:
"""
The properties that are associated with an input. Required on PUT (CreateOrReplace) requests.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
py | 7dfc259b09fb5df1d92083622cf079b9e28f7a47 | class TrackableObject:
def __init__(self, objectID, centroid):
# store the object ID, then initialize a list of centroids
# using the current centroid
self.objectID = objectID
self.centroids = [centroid]
# initialize a boolean used to indicate if the object has
# already been counted or not
self.counted = False
self.stepA = False
self.stepB = False |
py | 7dfc25a59f0c67fcd2e0b815ff19784565f5d8dc | from django.db import models
from django.contrib.auth.models import User
from django.shortcuts import render, redirect, reverse
from django.core.validators import FileExtensionValidator
# # Data model for a request
# class Request(models.Model):
# email = models.EmailField(max_length=64, default='')
# input_file = models.FileField(
# upload_to='./',
# validators=[FileExtensionValidator(allowed_extensions=('txt', 'zip'))]
# )
# conf_file = models.FileField(upload_to='./')
# request_time = models.CharField(max_length=30, default='')
# status = models.CharField(max_length=30, default='new')
# def __unicode__(self):
# return self
# Data model for a request
class Request(models.Model):
input_file = models.FileField(
validators=[FileExtensionValidator(allowed_extensions=(['txt', 'zip']))]
)
email = models.EmailField(
max_length=64,
)
max_context_lines = models.IntegerField(default=0)
min_context_lines = models.IntegerField(default=0)
ungram_kid_lines = models.IntegerField(default=0)
input_ungram_distractors = models.IntegerField(default=0)
output_ungram_distractor = models.IntegerField(default=0)
ungram_num = models.IntegerField(default=0)
nonsen_kid_words = models.IntegerField(default=0)
input_nonsensical_distractors = models.IntegerField(default=0)
nonsen_num = models.IntegerField(default=0)
plau_kid_words = models.IntegerField(default=0)
input_plau_distractors = models.IntegerField(default=0)
output_plau_distractor = models.IntegerField(default=0)
plau_num = models.IntegerField(default=0)
request_time = models.CharField(max_length=30, default='')
status = models.CharField(max_length=30, default='new')
def __unicode__(self):
return self
|
py | 7dfc2761ec2dcfa8d3ea7d74f2fa83b231ca1056 |
"""
Increasing Triangle Pattern
"""
print("")
n = 5
# Method 1
print("Method 1")
for x in range(n):
for y in range(x + 1):
print(" * ", end="")
print("")
print("\n*~*~*~*~*~*~*~*~*~*~*~*\n")
# Method 2
print("Method 2")
for x in range(n):
print(" * " * (x + 1))
print("")
"""
Author: Jovan De Guia
Github Username: jxmked
""" |
py | 7dfc27da1b45e9ee6afc84a29505066c62281947 | # --------------
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
# path- variable storing file path
#Code starts here
#Load dataset from 'path'(given) using "pd.read_csv()" in variable 'df' .
df=pd.read_csv(path)
#Display the first five columns of dataframe df.
print(df.head())
#Store all the features(independent values) in a variable called 'X'
X=df.drop(columns='Price')
print(X.head())
#Store the target variable (dependent value) in a variable called 'y'
y=df['Price']
print(y.head())
#Split the dataframe into X_train,X_test,y_train,y_test using the train_test_split() function.
#Use test_size = 0.3 and random_state = 6
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.3,random_state=6)
print(X_train.head(),X_test.head(),y_train.head(),y_test.head())
#Find the correlation between the features that are stored in 'X_train' and store the result in a variable called 'corr'.
corr=X_train.corr()
#Print corr.
print(corr)
# --------------
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
# Code Starts here
#Instantiate a linear regression model with LinearRegression() and save it to a variable called 'regressor'.
regressor=LinearRegression()
#Fit the model on the training data 'X_train' and 'y_train'.
regressor.fit(X_train, y_train)
#Make predictions on the X_test features and save the results in a variable called 'y_pred'.
y_pred=regressor.predict(X_test)
#Find the r^2 score and store the result in a variable called 'r2'
r2=r2_score(y_test,y_pred)
print(r2)
# --------------
from sklearn.linear_model import Lasso
# Code starts here
#Instantiate a lasso model with Lasso() and save it to a variable called 'lasso'.
lasso=Lasso()
#Fit the model on the training data X_train and y_train.
lasso.fit(X_train, y_train)
#Make predictions on the X_test features and save the results in a variable called 'lasso_pred'.
lasso_pred=lasso.predict(X_test)
#Find the r^2 score and store the result in a variable called 'r2_lasso'
r2_lasso=r2_score(y_test,lasso_pred)
print(r2_lasso)
# --------------
from sklearn.linear_model import Ridge
# Code starts here
#Instantiate a lasso model with Ridge() and save it to a variable called 'ridge'.
ridge=Ridge()
#Fit the model on the training data, X_train and y_train.
ridge.fit(X_train, y_train)
#Make predictions on the X_test features and save the results in a variable called 'ridge_pred'.
ridge_pred=ridge.predict(X_test)
#Find the r^2 score and store the result in a variable called 'r2_ridge'.
r2_ridge=r2_score(y_test,ridge_pred)
print(r2_ridge)
# Code ends here
# --------------
from sklearn.model_selection import cross_val_score
#Code starts here
#Initiate a LinearRegression() object and store it in a variable called 'regressor'.
regressor=LinearRegression()
#Calculate the cross_val_score on X_train,y_train having model = regressor and cv = 10,
#and store the result in a variable called 'score'.
score=cross_val_score(regressor,X_train,y_train, cv = 10)
#Calculate the mean of 'score' and store it in variable 'mean_score'.
mean_score=np.mean(score)
#Print mean_score.
print(score,mean_score)
# --------------
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
#Code starts here
#Initiate a pipeline for polynomial features as 'make_pipeline' having parameter 'PolynomialFeatures(2), LinearRegression()'
#as its parameter.Store it in the 'model' variable .
model=make_pipeline(PolynomialFeatures(2), LinearRegression())
#Fit the model on the training data, X_train and y_train.
model.fit(X_train, y_train)
#Make predictions on the X_test features and save the results in a variable called 'y_pred'.
y_pred=model.predict(X_test)
#Find the r^2 score and store the result in a variable called 'r2_poly'
r2_poly=r2_score(y_test,y_pred)
print(r2_poly)
|
py | 7dfc29d0dc20303aec95690ad5c2d7a030e1c33d | #!/pxrpythonsubst
#
# Copyright 2017 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
import os
import unittest
from pxr import Gf
from pxr import Usd
from pxr import UsdShade
from maya import cmds
from maya import standalone
class testUsdExportShadingModePxrRis(unittest.TestCase):
@classmethod
def setUpClass(cls):
standalone.initialize('usd')
mayaFile = os.path.abspath('MarbleCube.ma')
cmds.file(mayaFile, open=True, force=True)
# Export to USD.
usdFilePath = os.path.abspath('MarbleCube.usda')
cmds.loadPlugin('pxrUsd')
cmds.usdExport(mergeTransformAndShape=True, file=usdFilePath,
shadingMode='pxrRis', materialsScopeName='Materials')
cls._stage = Usd.Stage.Open(usdFilePath)
@classmethod
def tearDownClass(cls):
standalone.uninitialize()
def testStageOpens(self):
"""
Tests that the USD stage was opened successfully.
"""
self.assertTrue(self._stage)
def testExportPxrRisShading(self):
"""
Tests that exporting a Maya mesh with a simple Maya shading setup
results in the correct shading on the USD mesh.
"""
cubePrim = self._stage.GetPrimAtPath('/MarbleCube/Geom/Cube')
self.assertTrue(cubePrim)
# Validate the Material prim bound to the Mesh prim.
material = UsdShade.Material.GetBoundMaterial(cubePrim)
self.assertTrue(material)
materialPath = material.GetPath().pathString
self.assertEqual(materialPath, '/MarbleCube/Materials/MarbleCubeSG')
# Validate the surface shader that is connected to the material.
materialOutputs = material.GetOutputs()
self.assertEqual(len(materialOutputs), 4)
print self._stage.ExportToString()
materialOutput = material.GetOutput('ri:surface')
(connectableAPI, outputName, outputType) = materialOutput.GetConnectedSource()
self.assertEqual(outputName, 'out')
shader = UsdShade.Shader(connectableAPI)
self.assertTrue(shader)
shaderId = shader.GetIdAttr().Get()
self.assertEqual(shaderId, 'PxrMayaMarble')
# Validate the connected input on the surface shader.
shaderInput = shader.GetInput('placementMatrix')
self.assertTrue(shaderInput)
(connectableAPI, outputName, outputType) = shaderInput.GetConnectedSource()
self.assertEqual(outputName, 'worldInverseMatrix')
shader = UsdShade.Shader(connectableAPI)
self.assertTrue(shader)
shaderId = shader.GetIdAttr().Get()
self.assertEqual(shaderId, 'PxrMayaPlacement3d')
def testShaderAttrsAuthoredSparsely(self):
"""
Tests that only the attributes authored in Maya are exported to USD.
"""
shaderPrimPath = '/MarbleCube/Materials/MarbleCubeSG/MarbleShader'
shaderPrim = self._stage.GetPrimAtPath(shaderPrimPath)
self.assertTrue(shaderPrim)
shader = UsdShade.Shader(shaderPrim)
self.assertTrue(shader)
shaderId = shader.GetIdAttr().Get()
self.assertEqual(shaderId, 'PxrMayaMarble')
shaderInputs = shader.GetInputs()
self.assertEqual(len(shaderInputs), 1)
inputPlacementMatrix = shader.GetInput('placementMatrix')
(connectableAPI, outputName, outputType) = inputPlacementMatrix.GetConnectedSource()
self.assertEqual(connectableAPI.GetPath().pathString,
'/MarbleCube/Materials/MarbleCubeSG/MarbleCubePlace3dTexture')
shaderOutputs = shader.GetOutputs()
self.assertEqual(len(shaderOutputs), 1)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
py | 7dfc29e192f00d58a620682288c029565c57e6cb | """
sphinx.cmd.quickstart
~~~~~~~~~~~~~~~~~~~~~
Quickly setup documentation source to work with Sphinx.
:copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import argparse
import locale
import os
import sys
import time
from collections import OrderedDict
from os import path
from typing import Any, Callable, Dict, List, Union
# try to import readline, unix specific enhancement
try:
import readline
if readline.__doc__ and 'libedit' in readline.__doc__:
readline.parse_and_bind("bind ^I rl_complete")
USE_LIBEDIT = True
else:
readline.parse_and_bind("tab: complete")
USE_LIBEDIT = False
except ImportError:
readline = None
USE_LIBEDIT = False
from docutils.utils import column_width
import sphinx.locale
from sphinx import __display_version__, package_dir
from sphinx.locale import __
from sphinx.util.console import bold, color_terminal, colorize, nocolor, red # type: ignore
from sphinx.util.osutil import ensuredir
from sphinx.util.template import SphinxRenderer
EXTENSIONS = OrderedDict([
('autodoc', __('automatically insert docstrings from modules')),
('doctest', __('automatically test code snippets in doctest blocks')),
('intersphinx', __('link between Sphinx documentation of different projects')),
('todo', __('write "todo" entries that can be shown or hidden on build')),
('coverage', __('checks for documentation coverage')),
('imgmath', __('include math, rendered as PNG or SVG images')),
('mathjax', __('include math, rendered in the browser by MathJax')),
('ifconfig', __('conditional inclusion of content based on config values')),
('viewcode', __('include links to the source code of documented Python objects')),
('githubpages', __('create .nojekyll file to publish the document on GitHub pages')),
])
DEFAULTS = {
'path': '.',
'sep': False,
'dot': '_',
'language': None,
'suffix': '.rst',
'master': 'index',
'makefile': True,
'batchfile': True,
}
PROMPT_PREFIX = '> '
if sys.platform == 'win32':
# On Windows, show questions as bold because of color scheme of PowerShell (refs: #5294).
COLOR_QUESTION = 'bold'
else:
COLOR_QUESTION = 'purple'
# function to get input from terminal -- overridden by the test suite
def term_input(prompt: str) -> str:
if sys.platform == 'win32':
# Important: On windows, readline is not enabled by default. In these
# environment, escape sequences have been broken. To avoid the
# problem, quickstart uses ``print()`` to show prompt.
print(prompt, end='')
return input('')
else:
return input(prompt)
class ValidationError(Exception):
"""Raised for validation errors."""
def is_path(x: str) -> str:
x = path.expanduser(x)
if not path.isdir(x):
raise ValidationError(__("Please enter a valid path name."))
return x
def is_path_or_empty(x: str) -> str:
if x == '':
return x
return is_path(x)
def allow_empty(x: str) -> str:
return x
def nonempty(x: str) -> str:
if not x:
raise ValidationError(__("Please enter some text."))
return x
def choice(*l: str) -> Callable[[str], str]:
def val(x: str) -> str:
if x not in l:
raise ValidationError(__('Please enter one of %s.') % ', '.join(l))
return x
return val
def boolean(x: str) -> bool:
if x.upper() not in ('Y', 'YES', 'N', 'NO'):
raise ValidationError(__("Please enter either 'y' or 'n'."))
return x.upper() in ('Y', 'YES')
def suffix(x: str) -> str:
if not (x[0:1] == '.' and len(x) > 1):
raise ValidationError(__("Please enter a file suffix, e.g. '.rst' or '.txt'."))
return x
def ok(x: str) -> str:
return x
def do_prompt(text: str, default: str = None, validator: Callable[[str], Any] = nonempty) -> Union[str, bool]: # NOQA
while True:
if default is not None:
prompt = PROMPT_PREFIX + '%s [%s]: ' % (text, default)
else:
prompt = PROMPT_PREFIX + text + ': '
if USE_LIBEDIT:
# Note: libedit has a problem for combination of ``input()`` and escape
# sequence (see #5335). To avoid the problem, all prompts are not colored
# on libedit.
pass
elif readline:
# pass input_mode=True if readline available
prompt = colorize(COLOR_QUESTION, prompt, input_mode=True)
else:
prompt = colorize(COLOR_QUESTION, prompt, input_mode=False)
x = term_input(prompt).strip()
if default and not x:
x = default
try:
x = validator(x)
except ValidationError as err:
print(red('* ' + str(err)))
continue
break
return x
class QuickstartRenderer(SphinxRenderer):
def __init__(self, templatedir: str) -> None:
self.templatedir = templatedir or ''
super().__init__()
def _has_custom_template(self, template_name: str) -> bool:
"""Check if custom template file exists.
Note: Please don't use this function from extensions.
It will be removed in the future without deprecation period.
"""
template = path.join(self.templatedir, path.basename(template_name))
if self.templatedir and path.exists(template):
return True
else:
return False
def render(self, template_name: str, context: Dict) -> str:
if self._has_custom_template(template_name):
custom_template = path.join(self.templatedir, path.basename(template_name))
return self.render_from_file(custom_template, context)
else:
return super().render(template_name, context)
def ask_user(d: Dict) -> None:
"""Ask the user for quickstart values missing from *d*.
Values are:
* path: root path
* sep: separate source and build dirs (bool)
* dot: replacement for dot in _templates etc.
* project: project name
* author: author names
* version: version of project
* release: release of project
* language: document language
* suffix: source file suffix
* master: master document name
* extensions: extensions to use (list)
* makefile: make Makefile
* batchfile: make command file
"""
print(bold(__('Welcome to the Sphinx %s quickstart utility.')) % __display_version__)
print()
print(__('Please enter values for the following settings (just press Enter to\n'
'accept a default value, if one is given in brackets).'))
if 'path' in d:
print()
print(bold(__('Selected root path: %s')) % d['path'])
else:
print()
print(__('Enter the root path for documentation.'))
d['path'] = do_prompt(__('Root path for the documentation'), '.', is_path)
while path.isfile(path.join(d['path'], 'conf.py')) or \
path.isfile(path.join(d['path'], 'source', 'conf.py')):
print()
print(bold(__('Error: an existing conf.py has been found in the '
'selected root path.')))
print(__('sphinx-quickstart will not overwrite existing Sphinx projects.'))
print()
d['path'] = do_prompt(__('Please enter a new root path (or just Enter to exit)'),
'', is_path_or_empty)
if not d['path']:
sys.exit(1)
if 'sep' not in d:
print()
print(__('You have two options for placing the build directory for Sphinx output.\n'
'Either, you use a directory "_build" within the root path, or you separate\n'
'"source" and "build" directories within the root path.'))
d['sep'] = do_prompt(__('Separate source and build directories (y/n)'), 'n', boolean)
if 'dot' not in d:
print()
print(__('Inside the root directory, two more directories will be created; "_templates"\n' # NOQA
'for custom HTML templates and "_static" for custom stylesheets and other static\n' # NOQA
'files. You can enter another prefix (such as ".") to replace the underscore.')) # NOQA
d['dot'] = do_prompt(__('Name prefix for templates and static dir'), '_', ok)
if 'project' not in d:
print()
print(__('The project name will occur in several places in the built documentation.'))
d['project'] = do_prompt(__('Project name'))
if 'author' not in d:
d['author'] = do_prompt(__('Author name(s)'))
if 'version' not in d:
print()
print(__('Sphinx has the notion of a "version" and a "release" for the\n'
'software. Each version can have multiple releases. For example, for\n'
'Python the version is something like 2.5 or 3.0, while the release is\n'
'something like 2.5.1 or 3.0a1. If you don\'t need this dual structure,\n'
'just set both to the same value.'))
d['version'] = do_prompt(__('Project version'), '', allow_empty)
if 'release' not in d:
d['release'] = do_prompt(__('Project release'), d['version'], allow_empty)
if 'language' not in d:
print()
print(__('If the documents are to be written in a language other than English,\n'
'you can select a language here by its language code. Sphinx will then\n'
'translate text that it generates into that language.\n'
'\n'
'For a list of supported codes, see\n'
'https://www.sphinx-doc.org/en/master/usage/configuration.html#confval-language.')) # NOQA
d['language'] = do_prompt(__('Project language'), 'en')
if d['language'] == 'en':
d['language'] = None
if 'suffix' not in d:
print()
print(__('The file name suffix for source files. Commonly, this is either ".txt"\n'
'or ".rst". Only files with this suffix are considered documents.'))
d['suffix'] = do_prompt(__('Source file suffix'), '.rst', suffix)
if 'master' not in d:
print()
print(__('One document is special in that it is considered the top node of the\n'
'"contents tree", that is, it is the root of the hierarchical structure\n'
'of the documents. Normally, this is "index", but if your "index"\n'
'document is a custom template, you can also set this to another filename.'))
d['master'] = do_prompt(__('Name of your master document (without suffix)'), 'index')
while path.isfile(path.join(d['path'], d['master'] + d['suffix'])) or \
path.isfile(path.join(d['path'], 'source', d['master'] + d['suffix'])):
print()
print(bold(__('Error: the master file %s has already been found in the '
'selected root path.') % (d['master'] + d['suffix'])))
print(__('sphinx-quickstart will not overwrite the existing file.'))
print()
d['master'] = do_prompt(__('Please enter a new file name, or rename the '
'existing file and press Enter'), d['master'])
if 'extensions' not in d:
print(__('Indicate which of the following Sphinx extensions should be enabled:'))
d['extensions'] = []
for name, description in EXTENSIONS.items():
if do_prompt('%s: %s (y/n)' % (name, description), 'n', boolean):
d['extensions'].append('sphinx.ext.%s' % name)
# Handle conflicting options
if {'sphinx.ext.imgmath', 'sphinx.ext.mathjax'}.issubset(d['extensions']):
print(__('Note: imgmath and mathjax cannot be enabled at the same time. '
'imgmath has been deselected.'))
d['extensions'].remove('sphinx.ext.imgmath')
if 'makefile' not in d:
print()
print(__('A Makefile and a Windows command file can be generated for you so that you\n'
'only have to run e.g. `make html\' instead of invoking sphinx-build\n'
'directly.'))
d['makefile'] = do_prompt(__('Create Makefile? (y/n)'), 'y', boolean)
if 'batchfile' not in d:
d['batchfile'] = do_prompt(__('Create Windows command file? (y/n)'), 'y', boolean)
print()
def generate(d: Dict, overwrite: bool = True, silent: bool = False, templatedir: str = None
) -> None:
"""Generate project based on values in *d*."""
template = QuickstartRenderer(templatedir=templatedir)
if 'mastertoctree' not in d:
d['mastertoctree'] = ''
if 'mastertocmaxdepth' not in d:
d['mastertocmaxdepth'] = 2
d['root_doc'] = d['master']
d['now'] = time.asctime()
d['project_underline'] = column_width(d['project']) * '='
d.setdefault('extensions', [])
d['copyright'] = time.strftime('%Y') + ', ' + d['author']
d["path"] = os.path.abspath(d['path'])
ensuredir(d['path'])
srcdir = path.join(d['path'], 'source') if d['sep'] else d['path']
ensuredir(srcdir)
if d['sep']:
builddir = path.join(d['path'], 'build')
d['exclude_patterns'] = ''
else:
builddir = path.join(srcdir, d['dot'] + 'build')
exclude_patterns = map(repr, [
d['dot'] + 'build',
'Thumbs.db', '.DS_Store',
])
d['exclude_patterns'] = ', '.join(exclude_patterns)
ensuredir(builddir)
ensuredir(path.join(srcdir, d['dot'] + 'templates'))
ensuredir(path.join(srcdir, d['dot'] + 'static'))
def write_file(fpath: str, content: str, newline: str = None) -> None:
if overwrite or not path.isfile(fpath):
if 'quiet' not in d:
print(__('Creating file %s.') % fpath)
with open(fpath, 'wt', encoding='utf-8', newline=newline) as f:
f.write(content)
else:
if 'quiet' not in d:
print(__('File %s already exists, skipping.') % fpath)
conf_path = os.path.join(templatedir, 'conf.py_t') if templatedir else None
if not conf_path or not path.isfile(conf_path):
conf_path = os.path.join(package_dir, 'templates', 'quickstart', 'conf.py_t')
with open(conf_path) as f:
conf_text = f.read()
write_file(path.join(srcdir, 'conf.py'), template.render_string(conf_text, d))
masterfile = path.join(srcdir, d['master'] + d['suffix'])
if template._has_custom_template('quickstart/master_doc.rst_t'):
msg = ('A custom template `master_doc.rst_t` found. It has been renamed to '
'`root_doc.rst_t`. Please rename it on your project too.')
print(colorize('red', msg)) # RemovedInSphinx60Warning
write_file(masterfile, template.render('quickstart/master_doc.rst_t', d))
else:
write_file(masterfile, template.render('quickstart/root_doc.rst_t', d))
if d.get('make_mode') is True:
makefile_template = 'quickstart/Makefile.new_t'
batchfile_template = 'quickstart/make.bat.new_t'
else:
makefile_template = 'quickstart/Makefile_t'
batchfile_template = 'quickstart/make.bat_t'
if d['makefile'] is True:
d['rsrcdir'] = 'source' if d['sep'] else '.'
d['rbuilddir'] = 'build' if d['sep'] else d['dot'] + 'build'
# use binary mode, to avoid writing \r\n on Windows
write_file(path.join(d['path'], 'Makefile'),
template.render(makefile_template, d), '\n')
if d['batchfile'] is True:
d['rsrcdir'] = 'source' if d['sep'] else '.'
d['rbuilddir'] = 'build' if d['sep'] else d['dot'] + 'build'
write_file(path.join(d['path'], 'make.bat'),
template.render(batchfile_template, d), '\r\n')
if silent:
return
print()
print(bold(__('Finished: An initial directory structure has been created.')))
print()
print(__('You should now populate your master file %s and create other documentation\n'
'source files. ') % masterfile, end='')
if d['makefile'] or d['batchfile']:
print(__('Use the Makefile to build the docs, like so:\n'
' make builder'))
else:
print(__('Use the sphinx-build command to build the docs, like so:\n'
' sphinx-build -b builder %s %s') % (srcdir, builddir))
print(__('where "builder" is one of the supported builders, '
'e.g. html, latex or linkcheck.'))
print()
def valid_dir(d: Dict) -> bool:
dir = d['path']
if not path.exists(dir):
return True
if not path.isdir(dir):
return False
if {'Makefile', 'make.bat'} & set(os.listdir(dir)):
return False
if d['sep']:
dir = os.path.join('source', dir)
if not path.exists(dir):
return True
if not path.isdir(dir):
return False
reserved_names = [
'conf.py',
d['dot'] + 'static',
d['dot'] + 'templates',
d['master'] + d['suffix'],
]
if set(reserved_names) & set(os.listdir(dir)):
return False
return True
def get_parser() -> argparse.ArgumentParser:
description = __(
"\n"
"Generate required files for a Sphinx project.\n"
"\n"
"sphinx-quickstart is an interactive tool that asks some questions about your\n"
"project and then generates a complete documentation directory and sample\n"
"Makefile to be used with sphinx-build.\n"
)
parser = argparse.ArgumentParser(
usage='%(prog)s [OPTIONS] <PROJECT_DIR>',
epilog=__("For more information, visit <http://sphinx-doc.org/>."),
description=description)
parser.add_argument('-q', '--quiet', action='store_true', dest='quiet',
default=None,
help=__('quiet mode'))
parser.add_argument('--version', action='version', dest='show_version',
version='%%(prog)s %s' % __display_version__)
parser.add_argument('path', metavar='PROJECT_DIR', default='.', nargs='?',
help=__('project root'))
group = parser.add_argument_group(__('Structure options'))
group.add_argument('--sep', action='store_true', dest='sep', default=None,
help=__('if specified, separate source and build dirs'))
group.add_argument('--no-sep', action='store_false', dest='sep',
help=__('if specified, create build dir under source dir'))
group.add_argument('--dot', metavar='DOT', default='_',
help=__('replacement for dot in _templates etc.'))
group = parser.add_argument_group(__('Project basic options'))
group.add_argument('-p', '--project', metavar='PROJECT', dest='project',
help=__('project name'))
group.add_argument('-a', '--author', metavar='AUTHOR', dest='author',
help=__('author names'))
group.add_argument('-v', metavar='VERSION', dest='version', default='',
help=__('version of project'))
group.add_argument('-r', '--release', metavar='RELEASE', dest='release',
help=__('release of project'))
group.add_argument('-l', '--language', metavar='LANGUAGE', dest='language',
help=__('document language'))
group.add_argument('--suffix', metavar='SUFFIX', default='.rst',
help=__('source file suffix'))
group.add_argument('--master', metavar='MASTER', default='index',
help=__('master document name'))
group.add_argument('--epub', action='store_true', default=False,
help=__('use epub'))
group = parser.add_argument_group(__('Extension options'))
for ext in EXTENSIONS:
group.add_argument('--ext-%s' % ext, action='append_const',
const='sphinx.ext.%s' % ext, dest='extensions',
help=__('enable %s extension') % ext)
group.add_argument('--extensions', metavar='EXTENSIONS', dest='extensions',
action='append', help=__('enable arbitrary extensions'))
group = parser.add_argument_group(__('Makefile and Batchfile creation'))
group.add_argument('--makefile', action='store_true', dest='makefile', default=True,
help=__('create makefile'))
group.add_argument('--no-makefile', action='store_false', dest='makefile',
help=__('do not create makefile'))
group.add_argument('--batchfile', action='store_true', dest='batchfile', default=True,
help=__('create batchfile'))
group.add_argument('--no-batchfile', action='store_false',
dest='batchfile',
help=__('do not create batchfile'))
group.add_argument('-m', '--use-make-mode', action='store_true',
dest='make_mode', default=True,
help=__('use make-mode for Makefile/make.bat'))
group.add_argument('-M', '--no-use-make-mode', action='store_false',
dest='make_mode',
help=__('do not use make-mode for Makefile/make.bat'))
group = parser.add_argument_group(__('Project templating'))
group.add_argument('-t', '--templatedir', metavar='TEMPLATEDIR',
dest='templatedir',
help=__('template directory for template files'))
group.add_argument('-d', metavar='NAME=VALUE', action='append',
dest='variables',
help=__('define a template variable'))
return parser
def main(argv: List[str] = sys.argv[1:]) -> int:
sphinx.locale.setlocale(locale.LC_ALL, '')
sphinx.locale.init_console(os.path.join(package_dir, 'locale'), 'sphinx')
if not color_terminal():
nocolor()
# parse options
parser = get_parser()
try:
args = parser.parse_args(argv)
except SystemExit as err:
return err.code
d = vars(args)
# delete None or False value
d = {k: v for k, v in d.items() if v is not None}
# handle use of CSV-style extension values
d.setdefault('extensions', [])
for ext in d['extensions'][:]:
if ',' in ext:
d['extensions'].remove(ext)
d['extensions'].extend(ext.split(','))
try:
if 'quiet' in d:
if not {'project', 'author'}.issubset(d):
print(__('"quiet" is specified, but any of "project" or '
'"author" is not specified.'))
return 1
if {'quiet', 'project', 'author'}.issubset(d):
# quiet mode with all required params satisfied, use default
d.setdefault('version', '')
d.setdefault('release', d['version'])
d2 = DEFAULTS.copy()
d2.update(d)
d = d2
if not valid_dir(d):
print()
print(bold(__('Error: specified path is not a directory, or sphinx'
' files already exist.')))
print(__('sphinx-quickstart only generate into a empty directory.'
' Please specify a new root path.'))
return 1
else:
ask_user(d)
except (KeyboardInterrupt, EOFError):
print()
print('[Interrupted.]')
return 130 # 128 + SIGINT
for variable in d.get('variables', []):
try:
name, value = variable.split('=')
d[name] = value
except ValueError:
print(__('Invalid template variable: %s') % variable)
generate(d, overwrite=False, templatedir=args.templatedir)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
py | 7dfc2af669713314841b0201d046cfdfee4c2061 | # -*- coding: utf-8 -*-
import luigi
import subprocess
import json
import gzip
from pathos.multiprocessing import ProcessingPool as Pool
class utils():
@staticmethod
def fname(fn):
tf = fn.split('/')
if len(tf) > 1:
fname = tf[-1]
else:
fname = r
return fname
@staticmethod
def rem_suffix(fname):
return ('.').join(fname.split('.')[:-1])
@staticmethod
def executor(cmd):
subprocess.call(cmd)
#print cmd
@staticmethod
def executor_pipe(cmd):
sout = subprocess.Popen(cmd, stdout=subprocess.PIPE)
cnt = sout.stdout.read()
print 'saving to: '+cmd[-2]+'-stat.txt'
with open(cmd[-2]+'-stat.txt', 'wb') as w:
w.write(cnt)
class merge_files(luigi.Task):
config = luigi.DictParameter()
@staticmethod
def merge(s):
fbody =''
for r in s['in']:
# if are zipped unzip them first
print r
if utils.fname(r).split('.')[-1] == 'gz':
with gzip.open(r, 'r') as f:
fbody += f.read()
else:
with open(r, 'r') as f:
fbody += f.read()
with open(s['out'], 'w') as f:
f.write(fbody)
def requires(self):
return []
# merged file will be saved in output dir
def output(self):
lst = []
for s in self.config['samples']:
lst.append(luigi.LocalTarget(self.config['temp_dir'].rstrip('/')+'/clearing/'+s['name']+'/'+s['name']+'.fastq'))
return lst
def run(self):
fl = []
for s in self.config['samples']:
fl.append({'out':self.config['temp_dir'].rstrip('/')+'/clearing/'+s['name']+'/'+s['name']+'.fastq', 'in':s['files']})
with Pool(int(self.config['threads'])) as p:
p.map(merge_files.merge, fl)
########################
class clear_cut(luigi.Task):
config = luigi.DictParameter()
def requires(self):
return [merge_files(self.config)]
def run(self):
lst = []
for s in self.config['samples']:
#for r in s['files']:
lst.append(["cutadapt", "-a", self.config['adapter'], '-q', self.config['clear-quality'], '--quality-base='+self.config['qb'], '--minimum-length',
self.config['clear-len'], '-O', self.config['clear-overlap'], '-o', self.config['temp_dir'].rstrip('/')+'/clearing/'+s['name']+'/'+s['name']+'_clear.fastq',
self.config['temp_dir'].rstrip('/')+'/clearing/'+s['name']+'/'+s['name']+'.fastq'
])
p = Pool(int(self.config['threads']))
p.map(utils.executor_pipe, lst)
def output(self):
lst = []
for s in self.config['samples']:
lst.append(luigi.LocalTarget(self.config['temp_dir'].rstrip('/')+'/clearing/'+s['name']+'/'+s['name']+'_clear.fastq'))
return lst
class bwa_index(luigi.Task):
# -a algorithm [is|bwtsw]
# -p file prefix?
# input file db.fq
#bwa index
config = luigi.DictParameter()
def requires(self):
return []
def output(self):
return [luigi.LocalTarget(utils.rem_suffix(self.config['clear-ref'])+'.amb'),
luigi.LocalTarget(utils.rem_suffix(self.config['clear-ref'])+'.ann'),
luigi.LocalTarget(utils.rem_suffix(self.config['clear-ref'])+'.bwt'),
luigi.LocalTarget(utils.rem_suffix(self.config['clear-ref'])+'.pac'),
luigi.LocalTarget(utils.rem_suffix(self.config['clear-ref'])+'.sa')]
def run(self):
cmd = ['bwa', 'index']
if self.config['algorithm']:
cmd = cmd+['-a', self.config['algorithm']]
cmd = cmd + ['-p', utils.rem_suffix(self.config['clear-ref']), self.config['clear-ref']]
subprocess.call(cmd)
class bwa_aln(luigi.Task):
# -l seed len
# threads
# ref
# infile - result of merge_file
# sai output ? maybe not param?
#bwa aln
config = luigi.DictParameter()
def requires(self):
return [bwa_index(self.config),clear_cut(self.config)]
def output(self):
lst = []
for s in self.config['samples']:
lst.append(luigi.LocalTarget(self.config['temp_dir'].rstrip('/')+'/clearing/'+s['name']+'/'+s['name']+'.sai'))
return lst # [luigi.LocalTarget(self.config['odir'].rstrip('/')+'/'+self.config['quality']+'/'+self.config['target_file_name']+'.sai')]
def run(self):
cmd_base = ['bwa', 'aln']
if self.config['seedlen'] > 0:
cmd_base = cmd_base + ['-l', self.config['seedlen'] ]
if self.config['threads'] > 0:
cmd_base = cmd_base + ['-t', self.config['threads']]
for s in self.config['samples']:
cmd = cmd_base + [utils.rem_suffix(self.config['clear-ref']), self.config['temp_dir'].rstrip('/')+'/clearing/'+s['name']+'/'+s['name']+'_clear.fastq'] #suffix dependant from imput
with open(self.config['temp_dir'].rstrip('/')+'/clearing/'+s['name']+'/'+s['name']+'.sai', 'w') as f:
subprocess.call(cmd, stdout=f)
cmd = []
class bwa_samse(luigi.Task):
# db.fasta
# sai from baw_aln task
# output file
#bwa samse
config = luigi.DictParameter()
@staticmethod
def exec_samse(files):
cmd = ['bwa', 'samse', files['ref'], files['sai'], files['fq']]
with open(files['sam'], 'w') as f:
subprocess.call(cmd, stdout = f)
def requires(self):
return [bwa_aln(self.config)]
def output(self):
lst = []
for s in self.config['samples']:
lst.append(luigi.LocalTarget(self.config['temp_dir'].rstrip('/')+'/clearing/'+s['name']+'/'+s['name']+'.sam'))
return lst
def run(self):
fl = []
cmd_base = ['bwa', 'samse', utils.rem_suffix(self.config['clear-ref'])]
for s in self.config['samples']:
l ={}
l['ref'] = utils.rem_suffix(self.config['clear-ref'])
l['sai'] = self.config['temp_dir'].rstrip('/')+'/clearing/'+s['name']+'/'+s['name']+'.sai'
l['fq'] = self.config['temp_dir'].rstrip('/')+'/clearing/'+s['name']+'/'+s['name']+'.fastq'
l['sam'] = self.config['temp_dir'].rstrip('/')+'/clearing/'+s['name']+'/'+s['name']+'.sam'
fl.append(l)
with Pool(int(self.config['threads'])) as p:
p.map(self.exec_samse, fl)
########################
class unmaped(luigi.Task):
config = luigi.DictParameter()
@staticmethod
def exec_unmaped(files):
cmd = ['samtools', 'view', '-f', '4', '-Sb', files['sam']]
with open(files['bam'], 'w') as f:
subprocess.call(cmd, stdout = f)
def requires(self):
return [bwa_samse(self.config)]
def run(self):
fl = []
for s in self.config['samples']:
fl.append({'sam':self.config['temp_dir'].rstrip('/')+'/clearing/'+s['name']+'/'+s['name']+'.sam', 'bam':self.config['temp_dir'].rstrip('/')+'/clearing/'+s['name']+'/'+s['name']+'_cleared.bam'})
with Pool(int(self.config['threads'])) as p:
p.map(self.exec_unmaped, fl)
def output(self):
lst = []
for s in self.config['samples']:
lst.append(luigi.LocalTarget(self.config['temp_dir'].rstrip('/')+'/clearing/'+s['name']+'/'+s['name']+'_cleared.bam'))
return lst
class sam2fq(luigi.Task):
config = luigi.DictParameter()
@staticmethod
def sam2fq_exec(files):
cmd = ['samtools', 'bam2fq', files['in']]
with open(files['out'], 'w') as f:
subprocess.call(cmd, stdout = f)
def requires(self):
return [unmaped(self.config)]
def run(self):
fl = []
for s in self.config['samples']:
fl.append({'out':self.config['temp_dir'].rstrip('/')+'/clearing/'+s['name']+'/'+s['name']+'_cleared.fastq', 'in':self.config['temp_dir'].rstrip('/')+'/clearing/'+s['name']+'/'+s['name']+'_cleared.bam'})
with Pool(int(self.config['threads'])) as p:
p.map(self.sam2fq_exec, fl)
def output(self):
lst = []
for s in self.config['samples']:
lst.append(luigi.LocalTarget(self.config['temp_dir'].rstrip('/')+'/clearing/'+s['name']+'/'+s['name']+'_cleared.fastq'))
return lst
class cutadapt(luigi.Task):
# params:
# Adapter (string)
# qualit (int)
# quality_base (int) (pherd)
# len to skip (int)
# file list (strings)
# output dir name string - it can be temporarry? :)
# fired for each file
config = luigi.DictParameter()
def requires(self):
return [sam2fq(self.config)]
def output(self):
lst = []
for s in self.config['samples']:
for q in self.config['minlen']:
lst.append(luigi.LocalTarget(self.config['odir'].rstrip('/')+'/'+q+'/'+s['name']+'_cleared.fastq'))
return lst
def run(self):
#preparing command to run, not in single line due to file list
#if output dependant from quality and quality as list separate it in function!
lst = []
for s in self.config['samples']:
for q in self.config['minlen']:
lst.append(["cutadapt", '--minimum-length', q, '-O', self.config['overlap'], "-a", self.config['adapter'],
'-o', self.config['odir'].rstrip('/')+'/'+q+'/'+s['name']+'.fastq',
self.config['temp_dir'].rstrip('/')+'/clearing/'+s['name']+'/'+s['name']+'_cleared.fastq'
])
#subprocess.call(cmd)
with Pool(int(self.config['threads'])) as p:
p.map(utils.executor_pipe, lst)
|
py | 7dfc2b0a75e11e37817b7e0aca4d825d4c8b84c8 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Tests for model_search.ensembler."""
from absl import logging
from absl.testing import parameterized
from model_search import ensembler
from model_search.architecture import architecture_utils
from model_search.proto import ensembling_spec_pb2
from model_search.proto import phoenix_spec_pb2
import tensorflow.compat.v2 as tf
_NONADAPTIVE_GRAPH_NODES_PRIORS = [
'Phoenix/Ensembler/AddN',
'Phoenix/Ensembler/truediv',
'Phoenix/Ensembler/truediv/y',
'Phoenix/Ensembler/StopGradient',
'Phoenix/Ensembler/StopGradient_1',
'zeros',
'zeros_1',
]
_NONADAPTIVE_GRAPH_NODES_PRIORS_WEIGHTED = [
'Phoenix/Ensembler/StopGradient',
'Phoenix/Ensembler/StopGradient_1',
'Phoenix/Ensembler/concat',
'Phoenix/Ensembler/concat/axis',
'Phoenix/Ensembler/dense/BiasAdd',
'Phoenix/Ensembler/dense/BiasAdd/ReadVariableOp',
'Phoenix/Ensembler/dense/MatMul',
'Phoenix/Ensembler/dense/MatMul/ReadVariableOp',
'Phoenix/Ensembler/dense/bias',
'Phoenix/Ensembler/dense/bias/Assign',
'Phoenix/Ensembler/dense/bias/Initializer/zeros',
'Phoenix/Ensembler/dense/bias/IsInitialized/VarIsInitializedOp',
'Phoenix/Ensembler/dense/bias/Read/ReadVariableOp',
'Phoenix/Ensembler/dense/kernel',
'Phoenix/Ensembler/dense/kernel/Assign',
'Phoenix/Ensembler/dense/kernel/Initializer/random_uniform',
'Phoenix/Ensembler/dense/kernel/Initializer/random_uniform/RandomUniform',
'Phoenix/Ensembler/dense/kernel/Initializer/random_uniform/max',
'Phoenix/Ensembler/dense/kernel/Initializer/random_uniform/min',
'Phoenix/Ensembler/dense/kernel/Initializer/random_uniform/mul',
'Phoenix/Ensembler/dense/kernel/Initializer/random_uniform/shape',
'Phoenix/Ensembler/dense/kernel/Initializer/random_uniform/sub',
'Phoenix/Ensembler/dense/kernel/IsInitialized/VarIsInitializedOp',
'Phoenix/Ensembler/dense/kernel/Read/ReadVariableOp',
'zeros',
'zeros_1',
]
_SEARCH_GRAPH_NODES = [
'dense/BiasAdd',
'dense/BiasAdd/ReadVariableOp',
'dense/MatMul',
'dense/MatMul/ReadVariableOp',
'dense/bias',
'dense/bias/Assign',
'dense/bias/Initializer/zeros',
'dense/bias/IsInitialized/VarIsInitializedOp',
'dense/bias/Read/ReadVariableOp',
'dense/kernel',
'dense/kernel/Assign',
'dense/kernel/Initializer/random_uniform',
'dense/kernel/Initializer/random_uniform/RandomUniform',
'dense/kernel/Initializer/random_uniform/max',
'dense/kernel/Initializer/random_uniform/min',
'dense/kernel/Initializer/random_uniform/mul',
'dense/kernel/Initializer/random_uniform/shape',
'dense/kernel/Initializer/random_uniform/sub',
'dense/kernel/IsInitialized/VarIsInitializedOp',
'dense/kernel/Read/ReadVariableOp',
'zeros',
]
_ADAPTIVE_AVERAGE_NODE_PRIORS = [
'Phoenix/Ensembler/AddN',
'Phoenix/Ensembler/truediv',
'Phoenix/Ensembler/truediv/y',
'Phoenix/Ensembler/StopGradient',
'dense/BiasAdd',
'dense/BiasAdd/ReadVariableOp',
'dense/MatMul',
'dense/MatMul/ReadVariableOp',
'dense/bias',
'dense/bias/Assign',
'dense/bias/Initializer/zeros',
'dense/bias/IsInitialized/VarIsInitializedOp',
'dense/bias/Read/ReadVariableOp',
'dense/kernel',
'dense/kernel/Assign',
'dense/kernel/Initializer/random_uniform',
'dense/kernel/Initializer/random_uniform/RandomUniform',
'dense/kernel/Initializer/random_uniform/max',
'dense/kernel/Initializer/random_uniform/min',
'dense/kernel/Initializer/random_uniform/mul',
'dense/kernel/Initializer/random_uniform/shape',
'dense/kernel/Initializer/random_uniform/sub',
'dense/kernel/IsInitialized/VarIsInitializedOp',
'dense/kernel/Read/ReadVariableOp',
'zeros',
'zeros_1',
]
_RESIDUAL_AVERAGE_PRIOR = [
'Phoenix/Ensembler/AddN',
'Phoenix/Ensembler/truediv',
'Phoenix/Ensembler/truediv/y',
'Phoenix/Ensembler/StopGradient',
'dense/BiasAdd',
'dense/BiasAdd/ReadVariableOp',
'dense/MatMul',
'dense/MatMul/ReadVariableOp',
'dense/bias',
'dense/bias/Assign',
'dense/bias/Initializer/zeros',
'dense/bias/IsInitialized/VarIsInitializedOp',
'dense/bias/Read/ReadVariableOp',
'dense/kernel',
'dense/kernel/Assign',
'dense/kernel/Initializer/random_uniform',
'dense/kernel/Initializer/random_uniform/RandomUniform',
'dense/kernel/Initializer/random_uniform/max',
'dense/kernel/Initializer/random_uniform/min',
'dense/kernel/Initializer/random_uniform/mul',
'dense/kernel/Initializer/random_uniform/shape',
'dense/kernel/Initializer/random_uniform/sub',
'dense/kernel/IsInitialized/VarIsInitializedOp',
'dense/kernel/Read/ReadVariableOp',
'zeros',
'zeros_1',
]
_ADAPTIVE_WEIGHTED_PRIORS = [
'Phoenix/Ensembler/StopGradient',
'Phoenix/Ensembler/concat',
'Phoenix/Ensembler/concat/axis',
'Phoenix/Ensembler/dense_1/BiasAdd',
'Phoenix/Ensembler/dense_1/BiasAdd/ReadVariableOp',
'Phoenix/Ensembler/dense_1/MatMul',
'Phoenix/Ensembler/dense_1/MatMul/ReadVariableOp',
'Phoenix/Ensembler/dense_1/bias',
'Phoenix/Ensembler/dense_1/bias/Assign',
'Phoenix/Ensembler/dense_1/bias/Initializer/zeros',
'Phoenix/Ensembler/dense_1/bias/IsInitialized/VarIsInitializedOp',
'Phoenix/Ensembler/dense_1/bias/Read/ReadVariableOp',
'Phoenix/Ensembler/dense_1/kernel',
'Phoenix/Ensembler/dense_1/kernel/Assign',
'Phoenix/Ensembler/dense_1/kernel/Initializer/random_uniform',
'Phoenix/Ensembler/dense_1/kernel/Initializer/random_uniform/RandomUniform',
'Phoenix/Ensembler/dense_1/kernel/Initializer/random_uniform/max',
'Phoenix/Ensembler/dense_1/kernel/Initializer/random_uniform/min',
'Phoenix/Ensembler/dense_1/kernel/Initializer/random_uniform/mul',
'Phoenix/Ensembler/dense_1/kernel/Initializer/random_uniform/shape',
'Phoenix/Ensembler/dense_1/kernel/Initializer/random_uniform/sub',
'Phoenix/Ensembler/dense_1/kernel/IsInitialized/VarIsInitializedOp',
'Phoenix/Ensembler/dense_1/kernel/Read/ReadVariableOp',
'dense/BiasAdd',
'dense/BiasAdd/ReadVariableOp',
'dense/MatMul',
'dense/MatMul/ReadVariableOp',
'dense/bias',
'dense/bias/Assign',
'dense/bias/Initializer/zeros',
'dense/bias/IsInitialized/VarIsInitializedOp',
'dense/bias/Read/ReadVariableOp',
'dense/kernel',
'dense/kernel/Assign',
'dense/kernel/Initializer/random_uniform',
'dense/kernel/Initializer/random_uniform/RandomUniform',
'dense/kernel/Initializer/random_uniform/max',
'dense/kernel/Initializer/random_uniform/min',
'dense/kernel/Initializer/random_uniform/mul',
'dense/kernel/Initializer/random_uniform/shape',
'dense/kernel/Initializer/random_uniform/sub',
'dense/kernel/IsInitialized/VarIsInitializedOp',
'dense/kernel/Read/ReadVariableOp',
'zeros',
'zeros_1',
]
_RESIDUAL_WEIGHTED_PRIOR = [
'Phoenix/Ensembler/StopGradient',
'Phoenix/Ensembler/concat',
'Phoenix/Ensembler/concat/axis',
'Phoenix/Ensembler/dense_1/BiasAdd',
'Phoenix/Ensembler/dense_1/BiasAdd/ReadVariableOp',
'Phoenix/Ensembler/dense_1/MatMul',
'Phoenix/Ensembler/dense_1/MatMul/ReadVariableOp',
'Phoenix/Ensembler/dense_1/bias',
'Phoenix/Ensembler/dense_1/bias/Assign',
'Phoenix/Ensembler/dense_1/bias/Initializer/zeros',
'Phoenix/Ensembler/dense_1/bias/IsInitialized/VarIsInitializedOp',
'Phoenix/Ensembler/dense_1/bias/Read/ReadVariableOp',
'Phoenix/Ensembler/dense_1/kernel',
'Phoenix/Ensembler/dense_1/kernel/Assign',
'Phoenix/Ensembler/dense_1/kernel/Initializer/random_uniform',
'Phoenix/Ensembler/dense_1/kernel/Initializer/random_uniform/RandomUniform',
'Phoenix/Ensembler/dense_1/kernel/Initializer/random_uniform/max',
'Phoenix/Ensembler/dense_1/kernel/Initializer/random_uniform/min',
'Phoenix/Ensembler/dense_1/kernel/Initializer/random_uniform/mul',
'Phoenix/Ensembler/dense_1/kernel/Initializer/random_uniform/shape',
'Phoenix/Ensembler/dense_1/kernel/Initializer/random_uniform/sub',
'Phoenix/Ensembler/dense_1/kernel/IsInitialized/VarIsInitializedOp',
'Phoenix/Ensembler/dense_1/kernel/Read/ReadVariableOp',
'dense/BiasAdd',
'dense/BiasAdd/ReadVariableOp',
'dense/MatMul',
'dense/MatMul/ReadVariableOp',
'dense/bias',
'dense/bias/Assign',
'dense/bias/Initializer/zeros',
'dense/bias/IsInitialized/VarIsInitializedOp',
'dense/bias/Read/ReadVariableOp',
'dense/kernel',
'dense/kernel/Assign',
'dense/kernel/Initializer/random_uniform',
'dense/kernel/Initializer/random_uniform/RandomUniform',
'dense/kernel/Initializer/random_uniform/max',
'dense/kernel/Initializer/random_uniform/min',
'dense/kernel/Initializer/random_uniform/mul',
'dense/kernel/Initializer/random_uniform/shape',
'dense/kernel/Initializer/random_uniform/sub',
'dense/kernel/IsInitialized/VarIsInitializedOp',
'dense/kernel/Read/ReadVariableOp',
'zeros',
'zeros_1',
]
class EnsemblerTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.named_parameters(
{
'testcase_name':
'nonadaptive_average_priors',
'combining_type':
ensembling_spec_pb2.EnsemblingSpec.AVERAGE_ENSEMBLE,
'search_type':
ensembling_spec_pb2.EnsemblingSpec.NONADAPTIVE_ENSEMBLE_SEARCH,
'priors':
2,
'logits':
0,
'output_graph':
_NONADAPTIVE_GRAPH_NODES_PRIORS,
}, {
'testcase_name':
'intermix_average_priors',
'combining_type':
ensembling_spec_pb2.EnsemblingSpec.AVERAGE_ENSEMBLE,
'search_type':
ensembling_spec_pb2.EnsemblingSpec
.INTERMIXED_NONADAPTIVE_ENSEMBLE_SEARCH,
'priors':
2,
'logits':
0,
'output_graph':
_NONADAPTIVE_GRAPH_NODES_PRIORS,
}, {
'testcase_name':
'nonadaptive_average_search',
'combining_type':
ensembling_spec_pb2.EnsemblingSpec.AVERAGE_ENSEMBLE,
'search_type':
ensembling_spec_pb2.EnsemblingSpec.NONADAPTIVE_ENSEMBLE_SEARCH,
'priors':
0,
'logits':
1,
'output_graph':
_SEARCH_GRAPH_NODES,
}, {
'testcase_name':
'intermix_average_search',
'combining_type':
ensembling_spec_pb2.EnsemblingSpec.AVERAGE_ENSEMBLE,
'search_type':
ensembling_spec_pb2.EnsemblingSpec
.INTERMIXED_NONADAPTIVE_ENSEMBLE_SEARCH,
'priors':
0,
'logits':
1,
'output_graph':
_SEARCH_GRAPH_NODES,
}, {
'testcase_name':
'adaptive_average_search',
'combining_type':
ensembling_spec_pb2.EnsemblingSpec.AVERAGE_ENSEMBLE,
'search_type':
ensembling_spec_pb2.EnsemblingSpec.ADAPTIVE_ENSEMBLE_SEARCH,
'priors':
0,
'logits':
1,
'output_graph':
_SEARCH_GRAPH_NODES,
}, {
'testcase_name':
'residual_average_search',
'combining_type':
ensembling_spec_pb2.EnsemblingSpec.AVERAGE_ENSEMBLE,
'search_type':
ensembling_spec_pb2.EnsemblingSpec.RESIDUAL_ENSEMBLE_SEARCH,
'priors':
0,
'logits':
1,
'output_graph':
_SEARCH_GRAPH_NODES,
}, {
'testcase_name':
'nonadaptive_weighted',
'combining_type':
ensembling_spec_pb2.EnsemblingSpec.WEIGHTED_ENSEMBLE,
'search_type':
ensembling_spec_pb2.EnsemblingSpec.NONADAPTIVE_ENSEMBLE_SEARCH,
'priors':
2,
'logits':
0,
'output_graph':
_NONADAPTIVE_GRAPH_NODES_PRIORS_WEIGHTED,
}, {
'testcase_name':
'intermix_weighted',
'combining_type':
ensembling_spec_pb2.EnsemblingSpec.WEIGHTED_ENSEMBLE,
'search_type':
ensembling_spec_pb2.EnsemblingSpec
.INTERMIXED_NONADAPTIVE_ENSEMBLE_SEARCH,
'priors':
2,
'logits':
0,
'output_graph':
_NONADAPTIVE_GRAPH_NODES_PRIORS_WEIGHTED,
}, {
'testcase_name':
'adaptive_average_prior',
'combining_type':
ensembling_spec_pb2.EnsemblingSpec.AVERAGE_ENSEMBLE,
'search_type':
ensembling_spec_pb2.EnsemblingSpec.ADAPTIVE_ENSEMBLE_SEARCH,
'priors':
1,
'logits':
1,
'output_graph':
_ADAPTIVE_AVERAGE_NODE_PRIORS,
}, {
'testcase_name':
'residual_average_prior',
'combining_type':
ensembling_spec_pb2.EnsemblingSpec.AVERAGE_ENSEMBLE,
'search_type':
ensembling_spec_pb2.EnsemblingSpec.RESIDUAL_ENSEMBLE_SEARCH,
'priors':
1,
'logits':
1,
'output_graph':
_RESIDUAL_AVERAGE_PRIOR,
}, {
'testcase_name':
'adaptive_weighted_prior',
'combining_type':
ensembling_spec_pb2.EnsemblingSpec.WEIGHTED_ENSEMBLE,
'search_type':
ensembling_spec_pb2.EnsemblingSpec.ADAPTIVE_ENSEMBLE_SEARCH,
'priors':
1,
'logits':
1,
'output_graph':
_ADAPTIVE_WEIGHTED_PRIORS,
}, {
'testcase_name':
'residual_weighted_prior',
'combining_type':
ensembling_spec_pb2.EnsemblingSpec.WEIGHTED_ENSEMBLE,
'search_type':
ensembling_spec_pb2.EnsemblingSpec.RESIDUAL_ENSEMBLE_SEARCH,
'priors':
1,
'logits':
1,
'output_graph':
_RESIDUAL_WEIGHTED_PRIOR,
})
def test_ensembler(self, combining_type, search_type, priors, logits,
output_graph):
# Force graph mode
with tf.compat.v1.Graph().as_default():
spec = phoenix_spec_pb2.PhoenixSpec()
spec.ensemble_spec.combining_type = combining_type
spec.ensemble_spec.ensemble_search_type = search_type
ensembler_instance = ensembler.Ensembler(spec)
priors_logits_specs = []
search_logits_specs = []
if priors:
for _ in range(priors):
spec = architecture_utils.LogitsSpec(logits=tf.zeros([20, 10]))
priors_logits_specs.append(spec)
if logits:
spec = architecture_utils.LogitsSpec(
logits=tf.keras.layers.Dense(10)(tf.zeros([20, 10])))
search_logits_specs.append(spec)
_ = ensembler_instance.bundle_logits(
priors_logits_specs=priors_logits_specs,
search_logits_specs=search_logits_specs,
logits_dimension=10)
nodes = tf.compat.v1.get_default_graph().as_graph_def().node
logging.info([node.name for node in nodes])
self.assertCountEqual([n.name for n in nodes], output_graph)
if __name__ == '__main__':
tf.enable_v2_behavior()
tf.test.main()
|
py | 7dfc2c19c5e91a3229c82941a14028c742d4a764 | import logging
from datetime import datetime
import threading
import time
from .database import Database
# ========================================
class DataLogger:
# ========================================
def __init__(self, loggingFrequency=50):
# dd/mm/YY H:M:S
#self.tablename = datetime.now().strftime("%d%m%Y%H%M%S")
self.tablename = "mytable"
self.columnsdict = {
"id": "INTEGER PRIMARY KEY AUTOINCREMENT",
"time": "TEXT",
"throttle": "INT",
"pitch": "INT",
"yaw": "INT",
"roll": "INT",
"armed": "INT",
"engine1": "INT",
"engine2": "INT",
"engine3": "INT",
"engine4": "INT",
"angx": "FLOAT",
"angy": "FLOAT",
"head": "FLOAT",
}
#Database.delete_table(self.tablename)
print(f"Creating table {self.tablename}")
Database.create_table(self.tablename, self.columnsdict)
self.thread = threading.Thread(target=self._run)
self.thread.daemon = True
self.stopThread = False
self.cmds = [1000, 1000, 1000, 1000]
self.motors = [1000, 1000, 1000, 1000]
self.attitude = [0.0, 0.0, 0.0]
self.armed = 0
self.dt = (1.0/loggingFrequency)
# ========================================
def _run(self):
while not self.stopThread:
t = time.time()
Database.insert(
self.tablename, [{
"time": str(t), "throttle": self.cmds[3], "pitch": self.cmds[1],
"roll": self.cmds[0], "yaw": self.cmds[2], "armed": self.armed,
"engine1": self.motors[0],"engine2": self.motors[1], "engine3": self.motors[2],
"engine4": self.motors[3], "angx": self.attitude[0],
"angy": self.attitude[1], "head": self.attitude[2]
}])
time.sleep(self.dt)
# ========================================
def start(self):
self.thread.start()
# ========================================
def stop(self):
self.stopThread = False
|
py | 7dfc2c427e4a06ada834d2cf76be8d5928a0c732 | #------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
from atom.api import Int, Typed
from enaml.widgets.stack import ProxyStack
from .QtCore import QTimer, QEvent, Signal
from .QtGui import QStackedWidget, QPixmap
from .q_pixmap_painter import QPixmapPainter
from .q_pixmap_transition import (
QDirectedTransition, QSlideTransition, QWipeTransition, QIrisTransition,
QFadeTransition, QCrossFadeTransition
)
from .qt_constraints_widget import QtConstraintsWidget
from .qt_stack_item import QtStackItem
TRANSITION_TYPE = {
'slide': QSlideTransition,
'wipe': QWipeTransition,
'iris': QIrisTransition,
'fade': QFadeTransition,
'crossfade': QCrossFadeTransition,
}
TRANSITION_DIRECTION = {
'left_to_right': QDirectedTransition.LeftToRight,
'right_to_left': QDirectedTransition.RightToLeft,
'top_to_bottom': QDirectedTransition.TopToBottom,
'bottom_to_top': QDirectedTransition.BottomToTop,
}
def make_transition(transition):
""" Make a QPixmapTransition from an Enaml Transition.
Parameters
----------
transition : Transition
The Enaml Transition object.
Returns
-------
result : QPixmapTransition
A QPixmapTransition to use as the transition.
"""
qtransition = TRANSITION_TYPE[transition.type]()
qtransition.setDuration(transition.duration)
if isinstance(qtransition, QDirectedTransition):
qtransition.setDirection(TRANSITION_DIRECTION[transition.direction])
return qtransition
class QStack(QStackedWidget):
""" A QStackedWidget subclass which adds support for transitions.
"""
#: A signal emitted when a LayoutRequest event is posted to the
#: stack widget. This will typically occur when the size hint of
#: the stack is no longer valid.
layoutRequested = Signal()
def __init__(self, *args, **kwargs):
""" Initialize a QStack.
Parameters
----------
*args, **kwargs
The positional and keyword arguments needed to initalize
a QStackedWidget.
"""
super(QStack, self).__init__(*args, **kwargs)
self._painter = None
self._transition = None
self._transition_index = 0
#--------------------------------------------------------------------------
# Private API
#--------------------------------------------------------------------------
def _onTransitionFinished(self):
""" A signal handler for the `finished` signal of the transition.
This method resets the internal painter and triggers the normal
index change for the stacked widget.
"""
painter = self._painter
if painter is not None:
painter.setTargetWidget(None)
self._painter = None
self.setCurrentIndex(self._transition_index)
# This final show() makes sure the underlyling widget is visible.
# If transitions are being fired rapidly, it's possible that the
# current index and the transition index will be the same when
# the call above is invoked. In such cases, Qt short circuits the
# evaluation and the current widget is not shown.
self.currentWidget().show()
def _runTransition(self):
""" A private method which runs the transition effect.
The `_transition_index` attribute should be set before calling
this method. If no transition object exists for this widget,
then it is equivalent to calling `setCurrentIndex`. If the new
index is not different from the current index the transition
will not be performed.
"""
from_index = self.currentIndex()
to_index = self._transition_index
# If the index hasn't changed, there is nothing to update.
if from_index == to_index:
return
# If there is no transition applied, just change the index.
transition = self._transition
if transition is None:
self.setCurrentIndex(to_index)
return
# Otherwise, grab the pixmaps for the start and ending states
# and set them on the transtion. The widgets are resized to the
# current size so that the pixmaps are grabbed in a good state.
src_widget = self.widget(from_index)
dst_widget = self.widget(to_index)
size = self.size()
src_widget.resize(size)
dst_widget.resize(size)
src_pixmap = QPixmap.grabWidget(src_widget)
dst_pixmap = QPixmap.grabWidget(dst_widget)
out_pixmap = QPixmap(size)
transition.setPixmaps(src_pixmap, dst_pixmap, out_pixmap)
# Hide both of the constituent widgets so that the painter has
# a clean widget on which to draw.
src_widget.setVisible(False)
dst_widget.setVisible(False)
# Hookup the pixmap painter and start the transition.
painter = self._painter = QPixmapPainter()
painter.setTargetWidget(self)
transition.pixmapUpdated.connect(painter.drawPixmap)
transition.start()
#--------------------------------------------------------------------------
# Public API
#--------------------------------------------------------------------------
def event(self, event):
""" A custom event handler which handles LayoutRequest events.
When a LayoutRequest event is posted to this widget, it will
emit the `layoutRequested` signal. This allows an external
consumer of this widget to update their external layout.
"""
res = super(QStack, self).event(event)
if event.type() == QEvent.LayoutRequest:
self.layoutRequested.emit()
return res
def transition(self):
""" Get the transition installed on this widget.
Returns
-------
result : QPixmapTransition or None
The pixmap transition installed on this widget, or None if
no transition is being used.
"""
return self._transition
def setTransition(self, transition):
""" Set the transition to be used by this widget.
Parameters
----------
transition : QPixmapTransition or None
The transition to use when changing between widgets on this
stack or None if no transition should be used.
"""
old = self._transition
if old is not None:
old.finished.disconnect(self._onTransitionFinished)
self._transition = transition
if transition is not None:
transition.finished.connect(self._onTransitionFinished)
def transitionTo(self, index):
""" Transition the stack widget to the given index.
If there is no transition object is installed on the widget
this is equivalent to calling `setCurrentIndex`. Otherwise,
the change will be animated using the installed transition.
Parameters
----------
index : int
The index of the target transition widget.
"""
if index < 0 or index >= self.count():
return
self._transition_index = index
if self.transition() is not None:
QTimer.singleShot(0, self._runTransition)
else:
self.setCurrentIndex(index)
#: Cyclic notification guard
INDEX_FLAG = 0x1
class QtStack(QtConstraintsWidget, ProxyStack):
""" A Qt implementation of an Enaml Stack.
"""
#: A reference to the widget created by the proxy.
widget = Typed(QStack)
#: Cyclic notification guards
_guard = Int(0)
#--------------------------------------------------------------------------
# Initialization API
#--------------------------------------------------------------------------
def create_widget(self):
""" Create the underlying QStack widget.
"""
self.widget = QStack(self.parent_widget())
def init_widget(self):
""" Initialize the underlying control.
"""
super(QtStack, self).init_widget()
self.set_transition(self.declaration.transition)
def init_layout(self):
""" Initialize the layout of the underlying control.
"""
super(QtStack, self).init_layout()
widget = self.widget
for item in self.stack_items():
widget.addWidget(item)
# Bypass the transition effect during initialization.
widget.setCurrentIndex(self.declaration.index)
widget.layoutRequested.connect(self.on_layout_requested)
widget.currentChanged.connect(self.on_current_changed)
#--------------------------------------------------------------------------
# Utility Methods
#--------------------------------------------------------------------------
def stack_items(self):
""" Get the stack items defined on the control.
"""
for d in self.declaration.stack_items():
w = d.proxy.widget
if w is not None:
yield w
#--------------------------------------------------------------------------
# Child Events
#--------------------------------------------------------------------------
def child_added(self, child):
""" Handle the child added event for a QtStack.
"""
super(QtStack, self).child_added(child)
if isinstance(child, QtStackItem):
for index, dchild in enumerate(self.children()):
if child is dchild:
self.widget.insertWidget(index, child.widget)
def child_removed(self, child):
""" Handle the child removed event for a QtStack.
"""
super(QtStack, self).child_removed(child)
if isinstance(child, QtStackItem):
self.widget.removeWidget(child.widget)
#--------------------------------------------------------------------------
# Signal Handlers
#--------------------------------------------------------------------------
def on_layout_requested(self):
""" Handle the `layoutRequested` signal from the QStack.
"""
self.size_hint_updated()
def on_current_changed(self):
""" Handle the `currentChanged` signal from the QStack.
"""
if not self._guard & INDEX_FLAG:
self._guard |= INDEX_FLAG
try:
self.declaration.index = self.widget.currentIndex()
finally:
self._guard &= ~INDEX_FLAG
#--------------------------------------------------------------------------
# Widget Update Methods
#--------------------------------------------------------------------------
def set_index(self, index):
""" Set the current index of the underlying widget.
"""
if not self._guard & INDEX_FLAG:
self._guard |= INDEX_FLAG
try:
self.widget.transitionTo(index)
finally:
self._guard &= ~INDEX_FLAG
def set_transition(self, transition):
""" Set the transition on the underlying widget.
"""
if transition:
self.widget.setTransition(make_transition(transition))
else:
self.widget.setTransition(None)
|
py | 7dfc2d76b8e80222fb0de2ce648cce292497c529 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Client class for connecting to Sonoff Device."""
'''
Explanation of how Autoslide works:
The Autoslide door has a new WiFi interface, which is produced by iTEAD, using there eWelink technology and IoTgo platform.
This is the same technology as used in their sonoff devices, so we can learn a lot from the common sonoff device usage.
Assuming you can get the Autoslide WiFi module to connect to your WiFi network (I had trouble, and had to use the
eWelink app in "legacy" mode to get it to connect), you should be able to register the device using the Autoslide app
(or the eWelink app - both work, for registration).
Once you have control using the Autoslide app, you can now use this client for connecting and controlling your
Autoslide doors via MQTT, or command line.
NOTE: You often get a strange situation where you can send commands to the Autoslide (and they work), but you receive
nothing back but a Timeout message. The client message you get is shown below:
Received data: {
"apikey": "530303a6-cf2c-4246-894c-50855b00e6d8",
"deviceid": "100050a4f3",
"error": 504,
"reason": "Request Timeout",
"sequence": "3"
}
This seems to be something to do with iTEAD's servers. You can get connection back by power cycling the Autoslide,
and reconnecting the app/client.
Power cycling the Autoslide reconnects the Autoslide's websocket client to iTEAD's servers, so you can pick up again (if you are still connected).
Do not hit the servers with too many commands too quickly, max is about 1 per second. You will get Timeouts if you send commands too quickly.
command line options:
client.get_config()
client.set_mode(mode, device=0)
client.trigger_door(trigger, device=0)
client.set_option(option, value, device=0)
where mode is a number (0=auto, 1=stacker, 2=lock, 3=pet)
trigger is a number (stack=4, pet=3, outdoor=1, indoor=2, none=0)
option is a letter (or descriptor) 'a':'mode', #0=auto, 1=stacker, 2=lock, 3=pet
'd':'unknown',
'e':'75%_power', #(0=ON)
'f':'slam_shut', #(0=ON)
'g':'unknown',
'h':'heavy_door', #(0=ON)
'i':'stacker_mode', #(0=ON)
'j':'door_delay', #(in seconds)
'k':'unknown',
'l':'notifications' #(0=ON
value is a number 0=ON, 1=OFF, or for delay a number in seconds.
device is either an index number (device 0,1,2 etc), a deviceid (as given by get_config), or the device description you assigned in the Autoslide app (eg "Patio Door")
default device is '0', so if you only have one ewelink device, you can leave device out.
eWelink devices include sonoff switches and other iTEAD devices, so if you have an Autoslide, and other iTEAD devises, you have more then one device, so you need to
specify the Autoslide device
If you do supply an asyncio loop, you will need to start the loop, with the client.login() - like this:
loop.run_until_complete(client.login())
You can then control the door by publishing to the mqtt topics.
Examples:
mosquitto_pub -t "/ewelink_command/10005d73ab/door_trigger" -m "3"
mosquitto_pub -t "/ewelink_command/10005d73ab/set_mode" -m "3"
The actual configuration of all this is gleaned from web sources on the sonoff ewelink protocol, and reverse engineering the data.
I have no engineering documents to go on, so some of the parameters are a best guess as to what they do.
please update me if you have better information, or find out what the 'unknown' parameters are/do. One of them is probably for right hand door vs left hand door,
but I don't know which, and I don't want to mess with my working door to find out.
Nick Waterton P.Eng.
'''
import asyncio
import time
import json
import datetime
from jinja2 import Environment, BaseLoader
from MsgTemplate import MsgTemplate
import logging
logger = logging.getLogger('Main.'+__name__)
class Default():
""" An eweclient class for connecting to unknown devices
Also used as the base class for a device, just override the sections that are not default for your new device.
To use this you can publish to /ewelink_command/deviceid/set_<param> <value> (or just <param>) via mqtt, or use the
command line option client.send_command(deviceid, command='set_<param>, value=<value>) or
just client.send_command(deviceid, command=<param>, value=<value>)
You can also send json using set_json (mqtt or command line).
deviceid can be the actual device id, or the device name (eg. "Patio Door", "Switch 1" etc.)
subscribing to /ewelink_status/# will show all the parameters published
There are some special commands:
set_switch <on or off>
set_led <on or off>
set_json <json string>
delete_timers
get_config
that work on most devices (possibly not all).
Example:
mosquitto_pub -t "/ewelink_command/10003a430d/set_switch" -m "off"
mosquitto_pub -t "/ewelink_command/Switch 1 POW/set_switch" -m "on"
mosquitto_pub -t "/ewelink_command/Switch 1 POW/get_config" -m ""
"""
productModel = ["Default"] #model list used for identifying the correct class
device_type = 'switch' #device type for template rendering
triggers =[ ] #things used to trigger device eg 'switch'
settings ={ } #(read/write options) #if you don't fill this in, it will be autopopulated, but won't show params that don't exist yet
other_params ={ "fwVersion": "fwVersion", #(read only stuff)
"rssi": "rssi",
"staMac": "staMac"
}
numerical_params=[ ] #all basic parameters are assumed to be strings unless you include the parameter name here (in which case it's converted to an int)
timers_supported=[ 'delay', 'repeat', 'once', 'duration']
__version__ = '1.0.1'
def __init__(self, parent, deviceid, device, productModel, initial_parameters={}):
self.logger = logging.getLogger('Main.'+__class__.__name__)
#self.logger.debug('Started Class: %s, version: %s' % (__class__.__name__, __version__))
self._parent = parent
self._deviceid = deviceid
self._config = device
self.loop = asyncio.get_event_loop()
self._update_settings(self._config)
self.load_template()
self._productModel = productModel #we are created as this kind of productModel if there is more than one kind of model(one of self.productModel list)
for param, value in initial_parameters.items():
pass
self.q = asyncio.Queue()
self.loop.create_task(self._process_queue())
self.logger.debug('Created %s Device V:%s, model: %s' % (self.__class__.__name__,self.__version__,self._productModel))
async def _process_queue(self):
while True:
try:
command, message = await self.q.get()
if command is None:
#self.logger.debug('deviceid: %s, got EXIT command' % self.deviceid)
self.q.task_done()
raise RuntimeError('task completed')
self.logger.debug('deviceid: %s, got command from queue: %s, %s' % (self.deviceid, command, message))
func = self._on_message(command, message)
if func:
asyncio.run_coroutine_threadsafe(func,self.loop)
self.q.task_done()
except Exception as e:
self.logger.debug('deviceid: %s, process queue exited: %s' % (self.deviceid,e))
break
def _update_settings(self,data):
for param in data['params']:
if param not in self.settings.keys() and param not in self.other_params.keys():
self.logger.debug('adding %s to settings' % param)
self.settings[param]=param
def pprint(self,obj):
"""Pretty JSON dump of an object."""
return self._parent.pprint(obj)
@property
def config(self):
return self._config
@property
def deviceid(self):
return self._deviceid
@property
def name(self):
return self._config['name']
def is_number(self, s):
try:
float(s)
return True
except ValueError:
return False
def convert_json(self, message, throw_error=False):
'''
converts message to dictionary if it's a valid json string.
Does not convert single numbers, returns them unchanged.
If throw_error is set, raises JSONDecodeError on invalid json
'''
if isinstance(message, str):
try:
if not self.is_number(message):
message = json.loads(message.replace("'",'"'))
except json.JSONDecodeError:
if throw_error:
raise
return message
def _on_message(self, command, message):
self.logger.info("%s CLIENT: Received Command: %s, device: %s, Setting: %s" % (__class__.__name__,command, self.deviceid, message))
func = None
json_message = message
message = self.convert_json(message.lower()) #make case insensitive as "ON" does not work ("on" does)
for param, description in self.settings.items():
if command == param or command == description:
self.logger.debug('Setting parameter for device: %s: %s, %s' % (self.deviceid, param, message))
func = self._setparameter(param, message)
break
else:
if 'set_switch' in command:
self.logger.debug('set switch mode: %s, %s' % (self.deviceid,message))
func = self._setparameter('switch', message)
elif 'set_led' in command:
self.logger.debug('set led mode: %s, %s' % (self.deviceid,message)) #example: mosquitto_pub -t "/ewelink_command/100050xxxx/set_led" -m "on" or mosquitto_pub -t "/ewelink_command/Switch 1/set_led" -m "off"
func = self._setparameter('sledOnline', message)
elif 'send_json' in command:
'''
Sets "parms" to whatever you send as a json string (not dict)
You can use this to send custom json parameters to the device (if you know the format)
'''
self.logger.debug('send_json: for device %s' % self.deviceid)
try:
func = self._sendjson(self.convert_json(json_message, True))
except json.JSONDecodeError as e:
self.log.error('Your json is invalid: {}, Error: {}'.format(json_message, e))
elif 'get_config' in command:
self.logger.debug('get_config: for device %s' % self.deviceid)
func = self._getparameter()
elif 'add_timer' in command:
self.logger.debug('add_timer: for device %s' % self.deviceid)
func = self._addtimer(message)
elif 'list_timer' in command:
self.logger.debug('list_timers: for device %s' % self.deviceid)
func = self._list_timers()
elif 'del_timer' in command:
self.logger.debug('delete_timer: for device %s' % self.deviceid)
func = self._del_timer(message)
elif 'clear_timers' in command:
self.logger.debug('clear_timers: for device %s' % self.deviceid)
func = self._sendjson({'timers': []})
else:
func = self._on_message_default(command, message)
return func
def _on_message_default(self, command, message):
'''
Default which can be overridden by a class for processing special functions for the device while retaining the basic
commands
'''
self.logger.warn('Command: %s not found' % command)
return None
async def _list_timers(self):
await self._getparameter(waitResponse=True)
timers = self._config['params'].get('timers', None)
if timers:
for num, timer in enumerate(timers):
self.logger.info('deviceid: %s, timer %d: type:%s at:%s' % (self.deviceid, num, timer.get('coolkit_timer_type',timer['type']), timer['at']))
else:
self.logger.info('deviceid: %s, no timers configured' % self.deviceid)
async def _del_timer(self, message):
await self._list_timers()
deleted = 0
timers = self._config['params'].get('timers', None)
if timers:
nums_string = message.replace(',',' ').split()
nums = sorted([int(num) for num in nums_string if num.isdigit()], reverse=True)
for num in nums:
try:
assert num < len(timers)
del_timer = timers.pop(num)
deleted +=1
self.logger.info('deviceid: %s, timer %d: type:%s at:%s DELETED' % (self.deviceid, num, del_timer.get('coolkit_timer_type',del_timer['type']), del_timer['at']))
except AssertionError as e:
self.logger.error('deviceid: %s, problem deleting timer %d, error %s' % (self.deviceid, num, e))
else:
self.logger.warn('deviceid: %s, can\'t delete timers: %s no timers found' % (self.deviceid,message))
if deleted > 0:
self.logger.debug('deviceid: %s,deleted %d timers' % (self.deviceid,deleted))
func = await self._sendjson({'timers':timers})
else:
func = None
return func
async def _addtimer(self, message):
'''
NOTE Not all devices support all types of timers...
You can set up to 8 timers, but only 1 loop timer
see comments for message format
'''
await self._list_timers()
org_message = message
message = message.split(' ')
timer_type = message.pop(0)
if timer_type not in self.timers_supported:
self.logger.error('timer setting type is incorrect, must be one of %s, you sent: %s' % (timer_type, self.timers_supported))
return None
timers = {}
timers['timers'] = self._config['params'].get('timers', [])
if len(timers['timers'])+1 > 8:
self.logger.error('deviceid: %s,Cannot set more than 8 timers' % self.deviceid)
return None
self.logger.debug('adding timer: %s, %s' % (len(timers['timers'])+1, org_message))
if timer_type == 'delay':
#"countdown" Timer format is 'delay period (channel) switch (manual)' where 'manual' is for TH16/10 to disable auto control and can be left off normally
auto = True
try:
assert len(message) >= 2
period = message.pop(0)
if 'CH' in self._productModel: #ie there is more than one channel
channel = message.pop(0)
assert channel.isdigit()
channel = int(channel)
switch = message.pop(0)
if len(message) >= 0:
auto = False
assert period.isdigit()
assert int(period) > 0
assert switch in ['on','off']
except (AssertionError, IndexError) as e:
self.logger.error('delay timer format is "delay period (channel) switch (manual)" - channel and manual are optional, the rest are mandatory, you sent: %s, error: %s' % (org_message,e))
return None
timer = self._create_timer('delay', switch, period, channel=channel, auto=auto)
timers['timers'].append(timer)
elif timer_type == 'repeat':
#"Scheduled" Timer format is "repeat at_time(cron format) (channel) switch (manual)' - channel and manual are optional
#Example cron time (5:00pm EST, every Monday) "0 22 * * 1" (12:05pm EST every week day) "5 17 * * 1,1,3,4,5"
auto = True
at_time = ''
channel = None
try:
assert len(message) >= 2
switch = message.pop()
while switch not in ['on','off'] and len(message) > 0:
switch = message.pop()
auto = False
if 'CH' in self._productModel: #ie there is more than one channel
channel = message.pop()
assert channel.isdigit()
channel = int(channel)
if len(message) > 0:
at_time = ' '.join(message)
assert self._parent._validate_cron(at_time)
assert switch in ['on','off']
except (AssertionError, IndexError) as e:
self.logger.error('repeat timer format is "repeat at_time(cron format) (channel) switch (manual)" - manual is optional, you sent: %s error: %s' % (org_message,e))
return None
timer = self._create_timer('repeat', switch, at_time, channel=channel, auto=auto)
timers['timers'].append(timer)
elif timer_type == 'once':
#"Scheduled" Timer format is "once at_time(ISO format) (channel) switch 9manual)' - channel and manual are optional
auto = True
at_time = ''
channel = None
try:
assert len(message) >= 2
at_time = message.pop(0)
switch = message.pop()
while switch not in ['on','off'] and len(message) > 0:
switch = message.pop()
auto = False
if 'CH' in self._productModel: #ie there is more than one channel
channel = message.pop()
assert channel.isdigit()
channel = int(channel)
assert self._parent._validate_iso8601(at_time)
assert switch in ['on','off']
except (AssertionError, IndexError) as e:
self.logger.error('once timer format is "once at_time(ISO format) (channel) switch (manual)" - channel and manual are optional, you sent: %s error: %s' % (org_message,e))
return None
timer = self._create_timer('once', switch, at_time, channel=channel, auto=auto)
timers['timers'].append(timer)
elif timer_type == 'duration':
#"loop" Timer format is 'duration at_time(ISO UTC) on_time off_time switch_on (switch_off) (manual)' switch_off is optional, manual is optional
at_time = message.pop(0).upper()
off_switch = None
auto = True
try:
assert len(message) >= 3
if len(message) >= 5:
auto = False
if len(message) >= 3:
on_duration = message.pop(0)
off_duration = message.pop(0)
on_switch = message.pop(0)
if len(message) >= 1:
off_switch = message.pop(0)
assert self._parent._validate_iso8601(at_time)
assert on_duration.isdigit()
assert off_duration.isdigit()
assert on_switch in ['on','off']
assert off_switch in ['on','off', None]
except (AssertionError, IndexError) as e:
self.logger.error('duration timer format is "duration ISO_time on_duration off_duration on_switch (off_switch) (manual)" ISO format eg is 2019-01-18T13:52:58.030Z - off_switch and manual are optional, the rest are mandatory, you sent: %s error:%s' % (org_message,e))
return None
timer = self._create_timer('duration', on_switch, at_time, on_duration, off_duration, off_switch, auto=auto)
timers['timers'].append(timer)
func = await self._sendjson(timers)
return func
def _create_timer(self, type='delay', on_switch='on', at_time='', on_duration='0', off_duration='0', off_switch=None, channel=None, auto=True):
'''
create timer dictionary
type is delay, repeat or duration
'''
timer = {}
if type == 'duration':
timer['at']=' '.join([at_time, on_duration, off_duration])
elif type == 'delay':
timer['at'] = (datetime.datetime.utcnow() + datetime.timedelta(minutes=int(at_time))).isoformat()[:23]+"Z"
timer['period'] = at_time
else:
timer['at'] = at_time
timer['coolkit_timer_type']=type
if off_switch:
if auto and 'mainSwitch' in self.settings:
timer['startDo']={'switch':on_switch, 'mainSwitch':on_switch}
timer['endDo']={'switch':off_switch, 'mainSwitch':off_switch}
else:
timer['startDo']={'switch':on_switch}
timer['endDo']={'switch':off_switch}
else:
if auto and 'mainSwitch' in self.settings:
timer['do']={'switch':on_switch, 'mainSwitch':on_switch}
else:
if channel is None:
timer['do']={'switch':on_switch}
else:
timer['do']={'outlet': channel,'switch':on_switch}
timer['enabled']=1
timer['mId']='87d1dfdf-e9cb-d9ee-af2a-42362079e6a4'
timer['type']= type if any(s in type for s in('duration', 'repeat')) else 'once'
return timer
def load_template(self):
# Output state change reporting template.
self.pub_topic = MsgTemplate(
topic='/ewelink_status/{{deviceid}}/{{param}}',
payload='{{value}}',
)
# Input on/off command template.
self.msg_on_off = MsgTemplate(
topic='/ewelink/{{deviceid}}/{{param}}/set',
payload='{ "cmd" : "{{value.lower()}}" }',
)
# Update the MQTT topics and payloads from the config file.
if self._parent._configuration.get('mqtt', None) is not None:
self.pub_topic.load_config(self._parent._configuration['mqtt'][self.device_type], 'state_topic', 'state_payload', qos=None)
def _publish(self, param, value):
topic = self.pub_topic.render_topic({'deviceid':self.deviceid, 'name':self.name, 'param':param, 'value':value})
#message=self.pub_topic.to_json(value)
message = self.pub_topic.render_payload({'deviceid':self.deviceid,'value':value, 'param':param, 'name':self.name})
self._parent._publish(self.deviceid, topic, message)
async def _sendjson(self, message):
''' send a dictionary of parameters as a json string '''
if isinstance(message, str):
await self._parent._sendjson(self.deviceid, message)
else:
await self._parent._sendjson(self.deviceid, json.dumps(message))
async def _getparameter(self, params=[], waitResponse=False):
await self._parent._getparameter(self.deviceid, params, waitResponse)
async def _setparameter(self, param, targetState, update_config=True, waitResponse=False):
if param not in self.settings.keys():
for p,v in self.settings.items():
if param == v:
param = p
break
else:
self.logger.warn('deviceid: %s, parameter: %s not found in device settings, sending anyway' % (deviceid, param))
if param in self.numerical_params:
targetState = int(targetState)
await self._parent._setparameter(self.deviceid, param, targetState, update_config, waitResponse)
def _handle_notification(self, data):
'''
receive status in action key 'update' or 'sysmsg'
'''
self.logger.debug("Received data %s" % data)
if data.get('error', 0) != 0:
self.logger.warn('Not Processing error')
return
self._parent.update(self._config, data)
self._config['update']=time.time()
try:
update = data['params']
if data.get('action', None):
if 'update' in data['action']:
self._update_settings(data)
self.logger.debug("Action Update: Publishing: %s" % (update))
self._publish_config(update)
self._publish('status', "OK")
elif 'sysmsg' in data['action']:
for param, value in update.items():
self.logger.debug("Sysmsg Update: Publishing: %s:%s" % (param, value))
self._publish(param, value)
if 'online' in param:
if value == True:
self._parent._update_config = True
self._publish('status', "OK")
elif data.get('params', None):
self._update_settings(data)
self.logger.debug("Params Update: Publishing: %s" % (update))
self._publish_config(update)
else:
self.logger.debug("No Action to Publish")
except KeyError:
pass
def _publish_config(self, data):
'''
_publish dictionary passed in data
'''
settings = self.settings.copy()
settings.update(self.other_params) # make dictionary of settings and other_params
for param, value in data.items():
if param in settings.keys():
self._publish(settings[param], value)
else:
#_publish all other parameters (online, fw version etc)
self._publish(param, value)
self._publish('last_update', time.ctime())
def send_command(self, command, message):
'''
Only used for command line (API) options, not strictly necessary if only mqtt is used, but uses the same format as mqtt
'''
asyncio.run_coroutine_threadsafe(self.q.put((command, message)),self.loop)
def set_parameter(self, param, value=None):
asyncio.run_coroutine_threadsafe(self._setparameter(param, value),self.loop)
def set_switch(self, mode):
asyncio.run_coroutine_threadsafe(self._setparameter('switch', mode),self.loop)
def set_led(self, value):
asyncio.run_coroutine_threadsafe(self._setparameter('sledOnline', value),self.loop)
def delete_timers(self):
asyncio.run_coroutine_threadsafe(self._setparameter('timers', []),self.loop)
class Autoslide(Default):
"""An eweclient class for connecting to Autoslide automatic doors."""
productModel = ["WFA-1"] #model list used for identifying the correct class
triggers =[ 'b']
settings ={ 'a':'mode', #0=auto, 1=stacker, 2=lock, 3=pet
'b':'command', #app trigger none=0, 1=inside, 2=outside, 3=pet, 4=stacker
'd':'unknown_d',
'e':'75_percent', #(0=ON)
'f':'slam_shut', #(0=ON)
'g':'unknown_g',
'h':'heavy_door', #(0=ON)
'i':'stacker_mode', #(0=ON)
'j':'delay', #door delay (in seconds)
'k':'unknown_k',
'l':'notifications', #(0=ON)
'sledOnline':'sledOnline', #turn LED indicator on or off
}
other_params ={ 'c':'locked',
'm':'open_closed_locked',
'n':'trigger', #door trigger source none=0, 1=inside, 2=outside, 3=pet, 4=stacker
"fwVersion": "fwVersion",
"rssi": "rssi",
"staMac": "staMac"
}
timers_supported=[ 'delay', 'repeat']
__version__ = '1.0'
def __init__(self, parent, deviceid, device, productModel, initial_parameters={}):
self.logger = logging.getLogger('Main.'+__class__.__name__)
#self.logger.debug('Started Class: %s, version: %s' % (__class__.__name__, __version__))
self._parent = parent
self._deviceid = deviceid
self._config = device
self._productModel = productModel #we are created as this kind of productModel if there is more than one kind of model(one of self.productModel list)
self._org_delay = None
self._delay_person = None
self._locked = None
self._mode = None
self._hold_open_running = False
self._restore_delay_task = None
self.load_template()
for param, value in initial_parameters.items():
if param == 'delay_person':
self._delay_person = value
self.q = asyncio.Queue()
self.loop = asyncio.get_event_loop()
self.loop.create_task(self._process_queue())
self.logger.debug('Created %s Device V:%s, model: %s' % (self.__class__.__name__ ,self.__version__,self._productModel))
def delay_person(self, delay_person=None):
if delay_person is not None:
self._delay_person = delay_person
return self._delay_person
def _on_message_default(self, command, message):
self.logger.info("%s CLIENT: Received Command: %s, device: %s, Setting: %s" % (__class__.__name__,command, self.deviceid, message))
func = None
if 'door_trigger_delay' in command:
#3=pet, 2=outdoor, 1=indoor, 4=stacker
trigger, delay = message.split(' ')
self.logger.debug('Triggering Door Delay: %s, %s, %s' % (trigger, self.deviceid, delay))
func = self._hold_open(trigger, delay)
elif 'set_delay_person' in command:
#set delay for person different from pet
self.logger.debug('setting delay_person to: %s' % message)
if message == 'None':
self._delay_person = None
else:
self._delay_person = message
func = self._getparameter()
elif 'door_trigger' in command:
#3=pet, 2=outdoor, 1=indoor, 4=stacker
self.logger.debug('Triggering Door: %s, %s' % (self.deviceid,message))
func = self._setparameter('b', message)
elif 'set_mode' in command:
#a=mode, 0=auto, 1=stacker, 2=lock, 3=pet
self.logger.debug('set_mode: Door %s, %s' % (self.deviceid,message)) #example: mosquitto_pub -t "/ewelink_command/100050xxxx/set_mode" -m "3" or mosquitto_pub -t "/ewelink_command/Patio Door/set_mode" -m "3"
func = self._setparameter('a', message)
elif 'set_option' in command:
#options are (0=ON)
'''
a=mode, 0=auto, 1=stacker, 2=lock, 3=pet
d=unknown
e=75% power
f=Slam Shut
g=unknown
h=Heavy door
i=Stacker Mode
j=Door Delay (in seconds)
k=unknown
l=Notifications
'''
option, setting = message.split()
self.logger.debug('set_option: Door %s, %s to %s' % (self.deviceid, option, setting))
func = self._setparameter(option, setting)
else:
func = super()._on_message_default(command, message)
return func
async def _setparameter(self, param, targetState, update_config=True, waitResponse=False):
'''
a= mode, 0=auto, 1=stacker, 2=lock, 3=pet
b= app command stack=4, pet=3, outdoor=1, indoor=2, none=0
c= locked (1) or unlocked (0)
settings:
d=
e= 75% power (0=On)
f= Slam Shut (0=On)
g=
h= Heavy Door (0=On)
i= Stacker mode (0=On)
j= Door open period in seconds
k=
l= Notifications (0=On)
m= closed+locked (2), closed(1), open (0)
n= local command (received by controller) pet = 3, outdoor=1, indoor=2, none=0
'''
if param not in self.settings.keys():
for p,v in self.settings.items():
if param == v:
param = p
break
else:
self.logger.error('deviceid: %s, incorrect parameter: %s, parameters must be one of %s' % (self.deviceid, param, self.settings))
return
if param == 'j' and len(targetState) == 1:
targetState = '0'+targetState
if param == 'b':
self._config['params']['b']=targetState
self._config['b_update']=time.time() #time app was last triggered
await super()._setparameter(param, targetState, update_config, waitResponse)
def _handle_notification(self, data):
'''
receive door status in action key 'update' or 'sysmsg'
'''
self.logger.debug("Received data %s" % data)
if data.get('error', 0) != 0:
self.logger.warn('Not Processing error')
return
self._parent.update(self._config, data)
self._config['update']=time.time()
try:
update = data['params']
if data.get('action', None):
if 'update' in data['action']:
self.logger.debug("Action Update: Publishing: %s" % (update))
self._publish_config(update)
#self._publish('status', "OK")
#handle circumstance where door delay for person trigger is different from default (ie Pet) trigger
if self._delay_person:
c = update.get('c',None)
m = update.get('m',None)
n = update.get('n',None)
b = self._config.get('b','3') #b is last app trigger
b_update = self._config.get('b_update',0) #this is when it was last triggered
self.logger.debug('ShowNotification: Got b_update: %s' % b_update)
if c == '0' and m == '1' and n == '0': #if not triggered locally
if time.time()-b_update < 2: #if app was triggered within the last 2 seconds
n = b
else: #not triggered by app, and n='0', so manual pull
n = '1'
self.config['b_update'] = time.time()
if n in ['1','2']: #non-app person trigger
self.logger.debug('ShowNotification: adding delay to door trigger: %s, %s, %s' % (update['n'], self.deviceid, self._delay_person))
self.loop.create_task(self._hold_open(update['n'], self._delay_person))
elif 'sysmsg' in data['action']:
for param, value in update.items():
self.logger.debug("Sysmsg Update: Publishing: %s:%s" % (param, value))
self._publish(param, value)
if 'online' in param:
if value == True:
self._parent._update_config = True
elif data.get('params', None):
self.logger.debug("Params Update: Publishing: %s" % (update))
self._publish_config(update)
else:
self.logger.debug("No Action to Publish")
except KeyError:
pass
def _publish_config(self, data):
'''
publish dictionary passed in data
'''
settings = self.settings.copy()
settings.update(self.other_params) # make dictionary of settings and other_params
for param, value in sorted(data.items()):
if param in settings.keys():
self._publish(settings[param], value)
if param == 'a':
self._mode = value
elif param == 'c':
self._locked = value
elif param == 'j':
if self._delay_person is None:
self._publish('delay_person', value) #_publish person delay value same as default
else:
self._publish('delay_person', self._delay_person)
elif param == 'm': # closed (2), opening(1), closing (0)
if value == '0':
if self._locked == '1' or self._mode == '1':
self._publish('moving', '3') #open
else:
self._publish('moving', '2') #closing
self._publish('closed', '0')
elif value == '1':
self._publish('closed', '0')
if self._mode == '1':
self._publish('moving', '3') #open
else:
self._publish('moving', '1') #opening
elif value == '2':
self._publish('closed', '1')
self._publish('moving', '0') #closed
else:
self._publish('closed', value) #should never get here
self._publish('moving', '1')
else:
#_publish all other parameters (online, fw version etc)
self._publish(param, value)
self._publish('last_update', time.ctime())
async def _hold_open(self, trigger='0', delay=5):
if self._hold_open_running:
self.logger.debug('hold_open: already running - ignoring trigger')
return
delay = str(delay)
#self.logger.debug('hold_open: self._config: %s' % self.pprint(self._config))
if trigger == '0':
trigger = self._config['params'].get('n','0')
if trigger == '0':
trigger = self._config['params'].get('b','0')
if trigger == '0':
trigger = '1'
self._hold_open_running = True
self.logger.debug('hold_open: triggering door')
await self._setparameter('b', trigger)
#await asyncio.sleep(2)
org_delay = self._config['params']['j']
self.logger.debug('hold_open: orig delay: %s' % org_delay)
if int(delay) != int(org_delay):
if self._restore_delay_task:
self._restore_delay_task.cancel()
self.logger.debug('hold_open: cancelled _restore_delay_task')
else:
if len(delay) < 2:
delay = '0'+delay
if not self._org_delay:
self._org_delay = org_delay
self.logger.debug('hold_open: saved org_delay')
await asyncio.sleep(2) #dont send commands too quickly, but before door is fully open
self.logger.debug('hold_open: updating delay')
await self._setparameter('j', delay, update_config=False)
self._restore_delay_task = self.loop.create_task(self._restore_delay(delay))
self._hold_open_running = False
async def _restore_delay(self, delay=2):
self.logger.debug('restore_delay: scheduled, waiting')
try:
await asyncio.sleep(int(delay)) #change delay back when closing, so wait for m == 2 (closed)
while True:
m = self._config['params'].get('m','0')
await asyncio.sleep(1)
if m == '2':
break
self.logger.debug('restore_delay: got org_delay: %s' % self._org_delay)
await self._setparameter('j', self._org_delay, update_config=False)
self._org_delay = None
except asyncio.CancelledError:
self.logger.debug('restore_delay: cancelled')
pass
self._restore_delay_task = None
def set_mode(self, mode):
asyncio.run_coroutine_threadsafe(self._setparameter('a', mode),self.loop)
def trigger_door(self, trigger):
asyncio.run_coroutine_threadsafe(self._setparameter('b', trigger),self.loop)
def trigger_door_delay(self, trigger, delay=1):
asyncio.run_coroutine_threadsafe(self._trigger_door_delay(trigger, delay),self.loop)
def set_option(self, option, value):
asyncio.run_coroutine_threadsafe(self._setparameter(option, value),self.loop)
class BasicSwitch(Default):
"""An eweclient class for connecting to Sonoff Basic Switch"""
productModel = ["Basic","Basic2"] #model list used for identifying the correct class
#this is available in self._productModel to allow different responses depending on the model created
triggers =[ "switch"]
settings ={ "pulse" : "pulse", #'on', 'off' #param reported:topic to _publish to (sometimes parameters are just letters)
"pulseWidth": "pulseWidth", #int in ms
"sledOnline": "sledOnline", #'on', 'off'
"startup" : "startup", #'on', 'off'
"switch" : "switch" #'on', 'off'
}
other_params ={ "init": "init", #int 1 (not sure what this is)
"fwVersion": "fwVersion",
"rssi": "rssi",
"staMac": "staMac"
}
numerical_params=["pulseWidth"] #all basic parameters are assumed to be strings unless you include the parameter name here (in which case it's converted to an int)
timers_supported=[ 'delay', 'repeat', 'duration']
__version__ = '1.0'
class PowSwitch(Default):
"""An eweclient class for connecting to Sonoff Switch with Power Monitoring"""
productModel = ["Pow","Pow2","S31"] #model list used for identifying the correct class
#this is available in self._productModel to allow different responses depending on the model created
triggers =[ "switch"]
settings ={ "sledOnline": "sledOnline", #'on', 'off'
"startup" : "startup", #'on', 'off'
"switch" : "switch" , #'on', 'off'
"alarmPValue": "alarmPValue", #Pow2 and S31 set Power Limit [min, max] -1 = off (this is a floating point value, min 0.1)
"alarmVValue": "alarmVValue", #Pow2 and S31 set Voltage Limit [min, max] -1 = off
"alarmCValue": "alarmCValue", #Pow2 and S31 set Current Limit [min, max] -1 = off
"alarmType" : "alarmType", #Pow2 and S31 report alarms set "p|v|c" (when tripped limit is reported as above) no alarm set is "pvc" oddly enough
"endTime" : "endTime", #ISO Zulu (UTC) Time
"hundredDaysKwh": "hundredDaysKwh", #'get', 'start', 'stop'
"oneKwh" : "oneKwh", #'get', 'start', 'stop'
"startTime" : "startTime", #ISO Zulu (UTC) Time
"timeZone" : "timeZone" #current timezone offset from UTC (EST= -5)
}
other_params ={ "init": "init", #int 1 (not sure what this is)
"power" : "power", #reported power consumption (W)
"voltage" : "voltage", #Pow2 and S31 reported Voltage (V)
"current" : "current", #Pow2 and S31 reported Current (A)
"fwVersion": "fwVersion",
"rssi": "rssi",
"staMac": "staMac"
}
timers_supported=[ 'delay', 'repeat', 'duration']
__version__ = '1.0'
class TH16Switch(Default):
"""An eweclient class for connecting to Sonoff Switch with Environment Monitoring"""
productModel = ["TH16", "TH10"] #model list used for identifying the correct class
#this is available in self._productModel to allow different responses depending on the model created
triggers =[ "switch", "mainSwitch"]
settings ={ "sledOnline": "sledOnline", #'on', 'off'
"startup" : "startup", #'on', 'off'
"switch" : "switch" , #'on', 'off' switching the switch manually turns off temp/humidity switching
"mainSwitch": "mainSwitch", #'on', 'off' Seems to enable/disable control by temp or humidity
"deviceType": "deviceType", # switching mode "normal" (manual switch), "temperature" or ,"humidity" use trigger values for on or off, set to "normal" to disable temp or humid modes
}
other_params ={ "init": "init", #int 1 (not sure what this is)
"currentHumidity" : "currentHumidity", #reported Temperature (deg C)
"currentTemperature" : "currentTemperature", #reported humidity (%)
"sensorType": "sensorType", # Type of sensor (example "AM2301")
"fwVersion": "fwVersion",
"rssi": "rssi",
"staMac": "staMac"
}
timers_supported=[ 'delay', 'repeat', 'duration']
__version__ = '1.0'
def _on_message_default(self, command, message):
'''
Default which can be overridden by a class for processing special functions for the device while retaining the basic
commands
Here process commands for setting temperature and humidity triggers.
hi setting must be lower or the same as low setting
The high Switch is always the opposite of the low switch (so no need to send high switch)
format:
topic: set_temperature
message: low on|off high so for example "20 on 26" is turn on at 20 deg C and off at 26 deg C.
topic: set_humidity
as above, but with humidity values
topic: set_manual (set deviceType="normal", mainSwitch="off" for manual mode)
'''
if 'set_temperature' in command or 'set_humidity' in command:
message = message.lower().split(" ")
if len(message) == 2:
low = hi = message[0]
low_switch = 'on' if message[1] == 'on' else 'off'
hi_switch = 'on' if low_switch == 'off' else 'off'
if len(message) >= 3:
low = message[0]
low_switch = 'on' if message[1] == 'on' else 'off'
hi_switch = 'on' if low_switch == 'off' else 'off'
hi = message[2]
else:
self.logger.error('format of message is lo_value, switch, hi_value, or low/hi_value switch, you sent: %s' % message)
return None
if not low.isdigit() or not hi.isdigit() or int(low) > int(hi):
self.logger.error('low and high values must be numbers, and low must be lower or equal to high, with low first, you sent: %s' % message)
return None
temp = {}
temp["mainSwitch"]="on"
temp["targets"]=[]
temp["targets"].append({"reaction":{"switch": hi_switch if hi_switch == 'on' else 'off'}, "targetHigh": hi}) # high goes first in the list
temp["targets"].append({"reaction":{"switch": low_switch if low_switch == 'on' else 'off'}, "targetLow": low})
if 'temperature' in command:
self.logger.debug('set_temperature switching: for device %s to (low) %s degC:%s (hi) %s degC:%s' % (self.deviceid, low, low_switch, hi, hi_switch))
temp["deviceType"]="temperature"
if 'humidity' in command:
self.logger.debug('set_humidity switching: for device %s to (low) %s degC:%s (hi) %s degC:%s' % (self.deviceid, low, low_switch, hi, hi_switch))
temp["deviceType"]="humidity"
self.logger.info('sending: %s' % self.pprint(temp))
func = self._sendjson(temp)
elif 'set_manual' in command:
self.logger.debug('set_manual switching: for device %s' % self.deviceid)
temp = { "deviceType": "normal","mainSwitch": "off"}
func = self._sendjson(temp)
else:
func = super()._on_message_default(command, message)
return func
class LEDBulb(Default):
"""An eweclient class for connecting to Sonoff Led Bulb B1"""
productModel = ["B1"] #model list used for identifying the correct class
#this is available in self._productModel to allow different responses depending on the model created
triggers =[ ]
settings ={ "channel0": "white_cold", #"0-255", in colour mode (2) these are set to 0
"channel1": "white_warm", #"0-255", in colour mode (2) these are set to 0
"channel2": "red", #"0-255", regular RGB, in white mode (1), these are set to 0
"channel3": "green", #"0-255",
"channel4": "blue", #"0-255",
#These just seem to be indicators for the app, changing them makes no difference to the bulb
"state": "state", #"on","off", - this does turn the bulb on and off if changed, but does not report the state correctly
"type": "type", #"middle", "warm", "cold", setting of slider switch on app (does not change bulb)
"zyx_mode": "mode", #colour mode 2=colour, 1=white, mode setting in app (does not change bulb) this is a numerical value
}
other_params ={ "fwVersion": "fwVersion",
"rssi": "rssi",
"staMac": "staMac"
}
numerical_params=["zyx_mode"] #all basic parameters are assumed to be strings unless you include the parameter name here (in which case it's converted to an int)
timers_supported=[ 'delay', 'repeat', 'once']
__version__ = '1.0'
class FourChannelSwitch(Default):
"""An eweclient class for connecting to Sonoff 4 Channel Switch"""
productModel = ["4CH Pro"] #model list used for identifying the correct class
#this is available in self._productModel to allow different responses depending on the model created
triggers =[ ]
settings ={ "pulse" : "pulse", #'on', 'off' #param reported:topic to _publish to (sometimes parameters are just letters)
"pulseWidth": "pulseWidth", #int in ms
"sledOnline": "sledOnline", #'on', 'off'
"configure" : "configure", #list of switches (4 of them)
"switches" : "switches" #list of switches (4 of them)
}
other_params ={ "init": "init", #int 1 (not sure what this is)
"fwVersion": "fwVersion",
"rssi": "rssi",
"staMac": "staMac"
}
numerical_params=["pulseWidth"] #all basic parameters are assumed to be strings unless you include the parameter name here (in which case it's converted to an int)
timers_supported=[ 'delay', 'repeat', 'once']
__version__ = '1.0'
async def _setparameter(self, param, targetState, update_config=True, waitResponse=False):
'''
4 channel switch has two special parameters "configure" and "switches" which are lists of channel:switch dicts. like this:
"configure":[{"outlet": 0,"startup": "off"},...]
"switches":[{"outlet": 0,"switch": "off"},...]
so need to handle these parameters differently
'''
try:
if param == 'configure' or param == 'switches':
values = targetState.split()
if len(values) % 2 == 0: #even number of setting/value pairs
if param == 'configure':
target = 'startup'
else:
target = 'switch'
setting = [{'outlet':int(values[x]) if values[x].isdigit and values[x] in ['0','1','2','3'] else None,
target:values[x+1] if values[x+1] in ['on','off'] else None} for x in range(0, len(values), 2)]
for check in setting:
if check['outlet'] is None or check.get('startup', 'OK') is None or check.get('switch','OK') is None:
raise valueError('not a valid channel or setting')
else:
raise ValueError('not an even number of pairs')
targetState = setting
except ValueError as e:
self.logger.error('deviceid: %s, must be an even number of channel (number), setting (on|off) pairs for param: %s, you sent: %s : error: %s' % (self.deviceid,param,targetState,e))
return
await super()._setparameter(param, targetState, update_config, waitResponse)
|
py | 7dfc2e6dae89febeb695876cad24a5f820288787 | import os
import time
import datetime
import socket
import pickle as pkl
import argparse
import yaml
from utils import split_data
import numpy as np
from sklearn.model_selection import train_test_split
from tensorflow.keras.layers import Conv2D, BatchNormalization, MaxPool2D, MaxPooling2D, Flatten, Dense, Dropout, Activation
import tensorflow as tf
def process_images(image, label):
# Normalize images to have a mean of 0 and standard deviation of 1
image = tf.image.per_image_standardization(image)
# Resize images to 224x224
image = tf.image.resize(image, (224,224))
return image, label
def format_data(X_train, X_test, X_valid, y_train, y_test, y_valid):
X_train = np.array(X_train)
X_test = np.array(X_test)
X_valid = np.array(X_valid)
y_train = np.array(y_train)
y_test = np.array(y_test)
y_valid = np.array(y_valid)
#One hot encoding classes array
y_train = tf.one_hot(y_train, 10)
y_test = tf.one_hot(y_test, 10)
y_valid = tf.one_hot(y_valid, 10)
#Making the images 3 dimensions
X_train = X_train[..., np.newaxis]
X_test = X_test[..., np.newaxis]
X_valid = X_valid[..., np.newaxis]
#Create Tensorflow dataset representation
train_ds = tf.data.Dataset.from_tensor_slices((X_train, y_train))
test_ds = tf.data.Dataset.from_tensor_slices((X_test, y_test))
valid_ds = tf.data.Dataset.from_tensor_slices((X_valid, y_valid))
#Dataset partition sizes
train_ds_size = tf.data.experimental.cardinality(train_ds).numpy()
test_ds_size = tf.data.experimental.cardinality(test_ds).numpy()
valid_ds_size = tf.data.experimental.cardinality(valid_ds).numpy()
#Data processing pipeline
train_ds = (train_ds
.map(process_images)
.shuffle(buffer_size=train_ds_size)
.batch(batch_size=32, drop_remainder=True))
test_ds = (test_ds
.map(process_images)
.shuffle(buffer_size=train_ds_size)
.batch(batch_size=32, drop_remainder=True))
valid_ds = (valid_ds
.map(process_images)
.shuffle(buffer_size=train_ds_size)
.batch(batch_size=32, drop_remainder=True))
return train_ds, test_ds, valid_ds
def train_alexNet_model(processed_data_path, model_save_path, test_size, validation_size, input_size, n_classes, epochs):
X_train, X_test, X_valid, y_train, y_test, y_valid = split_data(processed_data_path, test_size, 42, validation_size, True)
train_ds, test_ds, valid_ds = format_data(X_train, X_test, X_valid, y_train, y_test, y_valid)
#Model architecture
model = tf.keras.models.Sequential([
# 1st Convolutional Layer
Conv2D(filters=96, kernel_size=(11,11), strides=(4,4), activation='relu', input_shape=input_size, padding="valid"),
MaxPool2D(pool_size=(2,2), strides=(2,2),padding="valid"),
BatchNormalization(),
# 2nd Convolutional Layer
Conv2D(filters=256, kernel_size=(11,11), strides=(1,1), activation='relu', padding="valid"),
MaxPool2D(pool_size=(2,2), strides=(2,2), padding="valid"),
BatchNormalization(),
# 3rd Convolutional Layer
Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding="valid"),
BatchNormalization(),
# 4th Convolutional Layer
Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding="valid"),
BatchNormalization(),
# 5th Convolutional Layer
Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), activation='relu', padding="valid"),
BatchNormalization(),
MaxPool2D(pool_size=(2,2), strides=(2,2), padding="valid"),
Flatten(),
#1st Dense Layer
Dense(4096, activation='relu', input_shape=input_size),
Dropout(0.4),
#2nd Dense Layer
Dense(4096, activation='relu'),
Dropout(0.4),
#3rd Dense Layer
Dense(1000, activation='relu'),
Dropout(0.4),
Dense(n_classes, activation='softmax')
])
model.compile(loss='categorical_crossentropy', optimizer="adam", metrics=['accuracy'])
t1 = time.time()
model.fit(train_ds,
epochs=epochs,
validation_data=valid_ds)
t2 = time.time()
model.save(model_save_path)
return t2-t1
if __name__ == "__main__":
np.random.seed(42)
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config_file", default="configs/config.yaml", type=str, help = "Path to the configuration file")
args = parser.parse_args()
training_cfg = yaml.safe_load(open(args.config_file))["features_based_training"]
if not os.path.exists(training_cfg['save_model_path']):
os.makedirs(training_cfg['save_model_path'])
algorithm_args = training_cfg['algorithm']['args']
input_size_tuple = tuple(algorithm_args['input_size'])
feature_type = training_cfg['path_to_data'].split("/")
model_path_name = "{}{}_{}.h5".format(training_cfg['save_model_path'],training_cfg['algorithm']['name'],feature_type[-1])
#train AlexNet
print("Training AlexNet for "+str(feature_type[-1]))
runTime = train_alexNet_model(training_cfg['path_to_data'], model_path_name, test_size=algorithm_args['test_size'], validation_size=algorithm_args['validation_size'], input_size=input_size_tuple, n_classes=algorithm_args['n_classes'], epochs=algorithm_args['epochs'])
with open("logs/logs.csv", "a") as myfile:
myfile.write("{:%Y-%m-%d %H:%M:%S},{},{},{},{:.2f}\n".format(datetime.datetime.now(),"Training AlexNet for "+str(feature_type[-1]),socket.gethostname(),os.cpu_count(),runTime))
|
py | 7dfc2f00afee0d26282348903a786a8dfe9f37c2 | import numpy as np
from scipy.optimize import minimize_scalar
from pytriqs.gf import inverse
from pytriqs.utility.bound_and_bisect import bound_and_bisect
from pytriqs.utility.dichotomy import dichotomy
from ..greensfunctions import MatsubaraGreensFunction
class GLocalCommon(MatsubaraGreensFunction):
"""
parent class for GLocal for different schemes, needs __init__(...) and
calculate(self, selfenergy, mu, w1, w2, filling = None, dmu_max = None)
where mu is a blockmatrix of structure gf_struct
"""
def __init__(self, *args, **kwargs):
MatsubaraGreensFunction.__init__(self, *args, **kwargs)
self.filling_with_old_mu = None
self.last_found_mu_number = None
self.last_found_density = None
self.mu_maxiter = 10000
self.mu_dx = 1
self.filling = None
self.dmu_max = 10
if 'parameters' in kwargs.keys():
for key, val in kwargs.items():
if key == 'filling':
self.filling = val
if key == 'dmu_max':
self.dmu_max = val
def calc_dyson(self, weissfield, selfenergy):
self << inverse(inverse(weissfield) - selfenergy)
def set(self, selfenergy, mu):
"""
sets GLocal using calculate(self, mu, selfenergy, w1, w2, n_mom), uses either filling or mu
mu can be either of blockmatrix-type or scalar
"""
if self.filling is None:
assert type(mu) in [float, int,
complex], "Unexpected type or class of mu."
self.calculate(selfenergy, self.make_matrix(mu))
else:
mu = self.find_and_set_mu(
self.filling, selfenergy, mu, self.dmu_max)
return mu
def find_and_set_mu(self, filling, selfenergy, mu0, dmu_max):
"""
Assumes a diagonal-mu basis
"""
# TODO place mu in center of gap
if not filling is None:
self.filling_with_old_mu = self.total_density().real
def f(mu): return self._set_mu_get_filling(selfenergy, mu)
f = FunctionWithMemory(f)
self.last_found_mu_number, self.last_found_density = bound_and_bisect(
f, mu0, filling, dx=self.mu_dx, x_name="mu", y_name="filling", maxiter=self.mu_maxiter, verbosity=self.verbosity, xtol=1e-4)
new_mu, limit_applied = self.limit(
self.last_found_mu_number, mu0, dmu_max)
if limit_applied:
self.calculate(selfenergy, self.make_matrix(new_mu))
return new_mu
def _set_mu_get_filling(self, selfenergy, mu):
"""
needed for find_and_set_mu
"""
self.calculate(selfenergy, self.make_matrix(mu))
d = self.total_density().real
return d
def limit(self, x, x0, dxlim):
"""
returns the element in [x0-dxlim, x0+dxlim] that is closest to x and whether it is unequal
to x
"""
if abs(x - x0) > dxlim:
return x0 + dxlim * np.sign(x - x0), True
return x, False
def make_number(self, matrix):
"""
converts matrix to number using the first entry
"""
for key, val in matrix.items():
number = val[0, 0]
break
return number
def make_matrix(self, number):
"""
converts number to blockmatrix in GLocal basis multiplying by 1
"""
mat = dict()
for bname, bsize in zip(self.blocknames, self.blocksizes):
mat[bname] = np.identity(bsize) * number
return mat
class WeissFieldCommon(MatsubaraGreensFunction):
def calc_dyson(self, glocal, selfenergy):
self << inverse(inverse(glocal.get_as_BlockGf()) +
selfenergy.get_as_BlockGf())
def calc_selfconsistency(self, glocal, selfenergy, mu):
self.calc_dyson(glocal, selfenergy)
class SelfEnergyCommon(MatsubaraGreensFunction):
def calc_dyson(self, weissfield, glocal):
self << inverse(weissfield) - inverse(glocal)
class FunctionWithMemory:
"""
a lambda with memory; memory needed due to bound_and_bisect bound finding algorithm of triqs
some values are evaluated multiple times
"""
def __init__(self, function):
self.f = function
self.x = []
self.y = []
def __call__(self, x):
is_evaluated = False
for i, x_i in enumerate(self.x):
if x_i == x:
is_evaluated = True
break
if is_evaluated:
y = self.y[i]
else:
y = self.f(x)
self.x.append(x)
self.y.append(y)
return y
|
py | 7dfc2f655be986373ed7a34702bd51e3292e72ed | __author__ = 'licheng'
"""
This interface provides access to four datasets:
1) refclef
2) refcoco
3) refcoco+
4) refcocog
split by unc and google
The following API functions are defined:
REFER - REFER api class
getRefIds - get ref ids that satisfy given filter conditions.
getAnnIds - get ann ids that satisfy given filter conditions.
getImgIds - get image ids that satisfy given filter conditions.
getCatIds - get category ids that satisfy given filter conditions.
loadRefs - load refs with the specified ref ids.
loadAnns - load anns with the specified ann ids.
loadImgs - load images with the specified image ids.
loadCats - load category names with the specified category ids.
getRefBox - get ref's bounding box [x, y, w, h] given the ref_id
showRef - show image, segmentation or box of the referred object with the ref
getMask - get mask and area of the referred object given ref
showMask - show mask of the referred object given ref
"""
import sys
import os.path as osp
import json
import pickle
import time
import itertools
import skimage.io as io
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon, Rectangle
from pprint import pprint
import numpy as np
from external import mask
# import cv2
# from skimage.measure import label, regionprops
class REFER():
def __init__(self, data_root, dataset='refcoco', splitBy='unc'):
# provide data_root folder which contains refclef, refcoco, refcoco+
# and refcocog
# also provide dataset name and splitBy information
# e.g., dataset = 'refcoco', splitBy = 'unc'
print('loading dataset %s into memory...' % dataset)
self.ROOT_DIR = osp.abspath(osp.dirname(__file__))
self.DATA_DIR = osp.join(data_root, dataset)
if dataset in ['refcoco', 'refcoco+', 'refcocog']:
self.IMAGE_DIR = osp.join(data_root,
'images/mscoco/images/train2014')
elif dataset == 'refclef':
self.IMAGE_DIR = osp.join(data_root,
'images/saiapr_tc-12')
else:
print('No refer dataset is called [%s]' % dataset)
sys.exit()
# load refs from data/dataset/refs(dataset).json
tic = time.time()
ref_file = osp.join(self.DATA_DIR, 'refs('+splitBy+').p')
self.data = {}
self.data['dataset'] = dataset
self.data['refs'] = pickle.load(open(ref_file, 'rb'))
# load annotations from data/dataset/instances.json
instances_file = osp.join(self.DATA_DIR, 'instances.json')
instances = json.load(open(instances_file, 'r'))
self.data['images'] = instances['images']
self.data['annotations'] = instances['annotations']
self.data['categories'] = instances['categories']
# create index
self.createIndex()
print('DONE (t=%.2fs)' % (time.time()-tic))
def createIndex(self):
# create sets of mapping
# 1) Refs: {ref_id: ref}
# 2) Anns: {ann_id: ann}
# 3) Imgs: {image_id: image}
# 4) Cats: {category_id: category_name}
# 5) Sents: {sent_id: sent}
# 6) imgToRefs: {image_id: refs}
# 7) imgToAnns: {image_id: anns}
# 8) refToAnn: {ref_id: ann}
# 9) annToRef: {ann_id: ref}
# 10) catToRefs: {category_id: refs}
# 11) sentToRef: {sent_id: ref}
# 12) sentToTokens: {sent_id: tokens}
print('creating index...')
# fetch info from instances
Anns, Imgs, Cats, imgToAnns = {}, {}, {}, {}
for ann in self.data['annotations']:
Anns[ann['id']] = ann
imgToAnns[ann['image_id']] = \
imgToAnns.get(ann['image_id'], []) + [ann]
for img in self.data['images']:
Imgs[img['id']] = img
for cat in self.data['categories']:
Cats[cat['id']] = cat['name']
# fetch info from refs
Refs, imgToRefs, refToAnn, annToRef, catToRefs = {}, {}, {}, {}, {}
Sents, sentToRef, sentToTokens = {}, {}, {}
for ref in self.data['refs']:
# ids
ref_id = ref['ref_id']
ann_id = ref['ann_id']
category_id = ref['category_id']
image_id = ref['image_id']
# add mapping related to ref
Refs[ref_id] = ref
imgToRefs[image_id] = imgToRefs.get(image_id, []) + [ref]
catToRefs[category_id] = catToRefs.get(category_id, []) + [ref]
refToAnn[ref_id] = Anns[ann_id]
annToRef[ann_id] = ref
# add mapping of sent
for sent in ref['sentences']:
Sents[sent['sent_id']] = sent
sentToRef[sent['sent_id']] = ref
sentToTokens[sent['sent_id']] = sent['tokens']
# create class members
self.Refs = Refs
self.Anns = Anns
self.Imgs = Imgs
self.Cats = Cats
self.Sents = Sents
self.imgToRefs = imgToRefs
self.imgToAnns = imgToAnns
self.refToAnn = refToAnn
self.annToRef = annToRef
self.catToRefs = catToRefs
self.sentToRef = sentToRef
self.sentToTokens = sentToTokens
print('index created.')
def getRefIds(self, image_ids=[], cat_ids=[], ref_ids=[], split=''):
image_ids = image_ids if type(image_ids) == list else [image_ids]
cat_ids = cat_ids if type(cat_ids) == list else [cat_ids]
ref_ids = ref_ids if type(ref_ids) == list else [ref_ids]
if len(image_ids)==len(cat_ids)==len(ref_ids)==len(split)==0:
refs = self.data['refs']
else:
if not len(image_ids) == 0:
refs = [self.imgToRefs[image_id] for image_id in image_ids]
else:
refs = self.data['refs']
if not len(cat_ids) == 0:
refs = [ref for ref in refs if ref['category_id'] in cat_ids]
if not len(ref_ids) == 0:
refs = [ref for ref in refs if ref['ref_id'] in ref_ids]
if not len(split) == 0:
if split in ['testA', 'testB', 'testC']:
# we also consider testAB, testBC, ...
refs = [ref for ref in refs if split[-1] in ref['split']]
elif split in ['testAB', 'testBC', 'testAC']:
# rarely used I guess...
refs = [ref for ref in refs if ref['split'] == split]
elif split == 'test':
refs = [ref for ref in refs if 'test' in ref['split']]
elif split == 'train' or split == 'val':
refs = [ref for ref in refs if ref['split'] == split]
else:
print('No such split [%s]' % split)
sys.exit()
ref_ids = [ref['ref_id'] for ref in refs]
return ref_ids
def getAnnIds(self, image_ids=[], cat_ids=[], ref_ids=[]):
image_ids = image_ids if type(image_ids) == list else [image_ids]
cat_ids = cat_ids if type(cat_ids) == list else [cat_ids]
ref_ids = ref_ids if type(ref_ids) == list else [ref_ids]
if len(image_ids) == len(cat_ids) == len(ref_ids) == 0:
ann_ids = [ann['id'] for ann in self.data['annotations']]
else:
if not len(image_ids) == 0:
lists = [self.imgToAnns[image_id] for image_id in image_ids
if image_id in self.imgToAnns] # list of [anns]
anns = list(itertools.chain.from_iterable(lists))
else:
anns = self.data['annotations']
if not len(cat_ids) == 0:
anns = [ann for ann in anns if ann['category_id'] in cat_ids]
ann_ids = [ann['id'] for ann in anns]
if not len(ref_ids) == 0:
ids = set(ann_ids).intersection(
set([self.Refs[ref_id]['ann_id'] for ref_id in ref_ids])
)
return ann_ids
def getImgIds(self, ref_ids=[]):
ref_ids = ref_ids if type(ref_ids) == list else [ref_ids]
if not len(ref_ids) == 0:
image_ids = list(set([self.Refs[ref_id]['image_id']
for ref_id in ref_ids]))
else:
image_ids = list(self.Imgs.keys())
return image_ids
def getCatIds(self):
return list(self.Cats.keys())
def loadRefs(self, ref_ids=[]):
if type(ref_ids) == list:
return [self.Refs[ref_id] for ref_id in ref_ids]
elif type(ref_ids) == int:
return [self.Refs[ref_ids]]
def loadAnns(self, ann_ids=[]):
if type(ann_ids) == list:
return [self.Anns[ann_id] for ann_id in ann_ids]
elif type(ann_ids) == int or type(ann_ids) == str:
return [self.Anns[ann_ids]]
def loadImgs(self, image_ids=[]):
if type(image_ids) == list:
return [self.Imgs[image_id] for image_id in image_ids]
elif type(image_ids) == int:
return [self.Imgs[image_ids]]
def loadCats(self, cat_ids=[]):
if type(cat_ids) == list:
return [self.Cats[cat_id] for cat_id in cat_ids]
elif type(cat_ids) == int:
return [self.Cats[cat_ids]]
def getRefBox(self, ref_id):
ref = self.Refs[ref_id]
ann = self.refToAnn[ref_id]
return ann['bbox'] # [x, y, w, h]
def showRef(self, ref, seg_box='seg'):
ax = plt.gca()
# show image
image = self.Imgs[ref['image_id']]
I = io.imread(osp.join(self.IMAGE_DIR, image['file_name']))
ax.imshow(I)
# show refer expression
for sid, sent in enumerate(ref['sentences']):
print('%s. %s' % (sid+1, sent['sent']))
# show segmentations
if seg_box == 'seg':
ann_id = ref['ann_id']
ann = self.Anns[ann_id]
polygons = []
color = []
c = 'none'
if type(ann['segmentation'][0]) == list:
# polygon used for refcoco*
for seg in ann['segmentation']:
print(seg)
poly = np.array(seg).reshape((int(len(seg)/2), 2))
polygons.append(Polygon(poly, True, alpha=0.4))
color.append(c)
p = PatchCollection(polygons, facecolors=color,
edgecolors=(1,1,0,0), linewidths=3, alpha=1)
ax.add_collection(p) # thick yellow polygon
p = PatchCollection(polygons, facecolors=color,
edgecolors=(1,0,0,0), linewidths=1, alpha=1)
ax.add_collection(p) # thin red polygon
else:
# mask used for refclef
rle = ann['segmentation']
m = mask.decode(rle)
img = np.ones( (m.shape[0], m.shape[1], 3) )
color_mask = np.array([2.0,166.0,101.0])/255
for i in range(3):
img[:,:,i] = color_mask[i]
ax.imshow(np.dstack( (img, m*0.5) ))
# show bounding-box
elif seg_box == 'box':
ann_id = ref['ann_id']
ann = self.Anns[ann_id]
bbox = self.getRefBox(ref['ref_id'])
box_plot = Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3],
fill=False, edgecolor='green', linewidth=3)
ax.add_patch(box_plot)
def getMask(self, ref):
# return mask, area and mask-center
ann = self.refToAnn[ref['ref_id']]
image = self.Imgs[ref['image_id']]
if type(ann['segmentation'][0]) == list: # polygon
rle = mask.frPyObjects(ann['segmentation'],
image['height'], image['width'])
else:
rle = ann['segmentation']
m = mask.decode(rle)
m = np.sum(m, axis=2) # there could be several binary map as multi segs
m = m.astype(np.uint8) # convert to np.uint8
# compute area
area = sum(mask.area(rle)) # should be close to ann['area']
return {'mask': m, 'area': area}
def showMask(self, ref):
M = self.getMask(ref)
msk = M['mask']
ax = plt.gca()
ax.imshow(msk)
if __name__ == '__main__':
refer = REFER(data_root='data', dataset='refcocog', splitBy='google')
ref_ids = refer.getRefIds()
print((len(ref_ids)))
print(len(refer.Imgs))
print(len(refer.imgToRefs))
ref_ids = refer.getRefIds(split='train')
print('There are %s training referred objects.' % len(ref_ids))
for ref_id in ref_ids[:5]:
ref = refer.loadRefs(ref_id)[0]
if len(ref['sentences']) < 2:
continue
pprint(ref)
print('The label is %s.' % refer.Cats[ref['category_id']])
plt.figure()
refer.showRef(ref, seg_box='box')
plt.show()
# plt.figure()
# refer.showMask(ref)
# plt.show()
|
py | 7dfc2fe42f8551b1922fc43c20756606df150e21 | from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name="index"),
path('index/', views.main_view, name="main_page"),
] |
py | 7dfc31ced872c1e4e6f6e82112faf90c417eb3be | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod
import os.path
from setting.Config import Config
#import setting.Setting
import dataset
import glob
#import sqlite3
# 抽象クラス
class DbInitializer(metaclass=ABCMeta):
def __init__(self):
self._path_dir_root = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))))
#self.__setting = setting.Setting.Setting()
self._path_dir_this = os.path.abspath(os.path.dirname(__file__))
self.__db = None
def Initialize(self):
self._CreateDb()
self._ConnectDb()
self.Db.query('PRAGMA foreign_keys = false')
self._CreateTable()
self._InsertInitData()
self.Db.query('PRAGMA foreign_keys = true')
#@abstractmethod
def _CreateDb(self):
#print('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')
#print(self.DbFilePath)
if not os.path.isfile(self.DbFilePath):
with open(self.DbFilePath, 'w') as f: pass
def _ConnectDb(self):
self.__class__.Db = dataset.connect('sqlite:///' + self.DbFilePath, engine_kwargs={'pool_pre_ping':True})
# テーブル作成(CreateTable文)
#@abstractmethod
def _CreateTable(self):
self.__CreateTableBySql()
self.__CreateTableByPy()
# 初期値の挿入(Insert文)
#@abstractmethod
def _InsertInitData(self):
self.__InsertBySql()
self.__InsertByTsv()
self.__InsertByPy()
@property
def DbId(self): return self.__class__.__name__.replace(super().__thisclass__.__name__, '')
@property
def DbFileName(self): return 'Github.' + self.DbId + '.sqlite3'
#def DbFileName(self): return 'GitHub.' + self.DbId + '.sqlite3'
@property
def DbFilePath(self):
#print(dir(self.__setting))
#return os.path.join(self.__setting.PathDb, self.DbFileName)
#print(Config()['Path']['Db'])
#print(Config().PathDb)
#print(self.DbFileName)
#return os.path.join(Config().PathDb, self.DbFileName)
return os.path.join(Config()['Path']['Db'], self.DbFileName)
@property
def Db(self): return self.__class__.Db
# sqlite3.ProgrammingError: SQLite objects created in a thread can only be used in that same thread.The object was created in thread id 1972434016 and this is thread id 1995735040
#try:
#except sqlite3.ProgrammingError as e:
# self.__ConnectDb()
#return self.__class__.Db
# SQLファイルによるテーブル作成
def __CreateTableBySql(self):
for path_sql in self.__GetCreateTableSqlFilePaths():
self.__ExecuteSqlFile(dbname, path_sql)
# Pythonコードによるテーブル作成
def __CreateTableByPy(self):
self.__ActionByPy(action='create')
"""
path_root = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))))
path_insert_py = os.path.join(path_root, 'database/init/{0}/create/py/'.format(self.DbId))
if os.path.isdir(path_insert_py):
import importlib
namespace_insert_py = path_insert_py.replace('/', '.')
module = importlib.import_module(namespace_insert_py + 'Creater')
creater = module.Creater(self.DbFilePath)
creater.Create()
"""
# SQLファイルによる挿入
def __InsertBySql(self):
for path_sql in self.__GetCreateTableSqlFilePaths():
self.__ExecuteSqlFile(dbname, path_sql)
# TSVファイルによる挿入
def __InsertByTsv(self):
for path_tsv in self.__GetInsertTsvFilePaths():
table_name = os.path.splitext(table_name)[0]
loader = database.TsvLoader.TsvLoader()
loader.ToSqlite3(path_tsv, self.DbFilePath, table_name)
# Pythonコードによる挿入
def __InsertByPy(self):
self.__ActionByPy(action='insert')
"""
#path_insert_py = os.path.join(self._path_dir_root, 'database/init/{0}/insert/py/'.format(self.DbId))
path_root = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))))
path_insert_py = os.path.join(path_root, 'database/init/{0}/insert/py/'.format(self.DbId))
if os.path.isdir(path_insert_py):
import importlib
namespace_insert_py = path_insert_py.replace('/', '.')
module = importlib.import_module(namespace_insert_py + 'Inserter')
inserter = module.Inserter(self.DbFilePath)
inserter.Insert()
"""
"""
# Pythonコードによる処理実行
def __ActionByPy(self, action='insert'):
if action not in {'create', 'insert'}: raise Exception('引数actionはcreate,insertのいずれかのみ有効。: {0}'.format(action))
path_root = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))))
path_insert_py = os.path.join(path_root, 'database/init/{0}/{1}/py/'.format(self.DbId, action))
if os.path.isdir(path_insert_py):
# モジュール読込
import importlib
namespace_insert_py = path_insert_py.replace('/', '.')
module_name = action[0].upper() + action[1:] + 'r' # Create[r], Inserte[r], Delete[r]
module = importlib.import_module(namespace_insert_py + module_name)
# クラスのインスタンス生成
class_name = module_name
cls = module[module_name](self.DbFilePath)
# メソッドの取得と実行
method_name = module_name[:-1] # Create, Insert, Delete
method = getattr(cls, method_name)
method()
"""
# Pythonコードによる処理実行
def __ActionByPy(self, action='insert'):
path, namespace, module_name, class_name, method_name = self.__GetIds_ActionByPy(action)
if os.path.isdir(path):
# モジュール読込
import importlib
module = importlib.import_module(namespace_insert_py + module_name)
# クラスのインスタンス生成
#cls = module[module_name](self.DbFilePath)
cls = getattr(module, class_name)
##############################################################
# 引数は何にするか。現状、DbPath, dataset.connect(), client。これをビジネスロジック化によりclient渡し不要にしたい。
#ins = cls(self.DbFilePath)
ins = cls(self.Db)
##############################################################
# メソッドの取得と実行
#method = getattr(cls, method_name)
method = getattr(ins, method_name)
method()
def __GetIds_ActionByPy(self, action='insert'):
self.__CheckActionName(action)
path_root = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))))
path_l_py = 'database/init/{0}/{1}/py/'.format(self.DbId, action)
path_py = os.path.join(path_root, path_l_py)
namespace = path_l_py.replace('/', '.')
module_name = action[0].upper() + action[1:] + 'r' # Create[r], Inserte[r], Delete[r]
class_name = module_name
method_name = module_name[:-1] # Create, Insert, Delete
return path_py, namespace, module_name, class_name, method_name
def __CheckActionName(self, action):
valid_names = {'create', 'insert'}
if action not in valid_names: raise Exception('引数actionは{0}のいずれかのみ有効。: {1}'.format(valid_names, action))
# パス取得(テーブル作成用SQLファイル)
def __GetCreateTableSqlFilePaths(self):
path = os.path.join(self._path_dir_this, self.DbId, 'create', 'table', 'sql')
for path_sql in glob.glob(os.path.join(path + '*.sql')): yield path_sql
# パス取得(初期値挿入用TSVファイル)
def __GetInsertTsvFilePaths(self):
path = os.path.join(self._path_dir_this, self.DbId, 'insert', 'tsv')
for path_tsv in glob.glob(os.path.join(path + '*.tsv')): yield path_tsv
# パス取得(初期値挿入用SQLファイル)
def __GetInsertSqlFilePaths(self):
path = os.path.join(self._path_dir_this, self.DbId, 'insert', 'sql')
for path_tsv in glob.glob(os.path.join(path + '*.sql')): yield path_tsv
# SQLファイル発行
def __ExecuteSqlFile(self, sql_path):
with open(sql_path, 'r') as f:
sql = f.read()
self.__class__.Db.query(sql)
"""
def Initialize(self):
db = None
print(self.DbId)
print(self.DbFileName)
# if not os.path.isfile(self.__files[dbname]):
if os.path.isfile(self.DbFilePath):
db = dataset.connect('sqlite:///' + self.DbFilePath)
else:
# 空ファイル作成
with open(self.DbFilePath, 'w') as f: pass
# DB接続
db = dataset.connect('sqlite:///' + self.DbFilePath)
db.query('PRAGMA foreign_keys = false')
# テーブル作成(CreateTable文)
for path_sql in self.__GetCreateTableSqlFilePaths():
self.__ExecuteSqlFile(dbname, path_sql)
# 初期値の挿入(Insert文)
for path_tsv in self.__GetInsertTsvFilePaths():
table_name = os.path.splitext(table_name)[0]
loader = database.TsvLoader.TsvLoader()
loader.ToSqlite3(path_tsv, self.DbFilePath, table_name)
db.query('PRAGMA foreign_keys = true')
return db
"""
"""
# パス取得(テーブル作成用SQLファイル)
def __GetCreateTableSqlFilePaths(self, dbname):
path = os.path.join(self._path_dir_this, dbname, 'sql', 'create')
for path_sql in glob.glob(os.path.join(path + '*.sql')): yield path_sql
# パス取得(初期値挿入用TSVファイル)
def __GetInsertTsvFilePaths(self, dbname):
path = os.path.join(self._path_dir_this, dbname, 'tsv')
for path_tsv in glob.glob(os.path.join(path + '*.tsv')): yield path_tsv
return self.__dbs[dbname]
# SQLファイル発行
def __ExecuteSqlFile(self, dbname, sql_path):
with open(sql_path, 'r') as f:
sql = f.read()
self.__dbs[dbname].query(sql)
"""
|
py | 7dfc32615282b0051423b1ca224d57647ca11b22 | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.text
import typing
from abc import abstractmethod
from ..uno.x_interface import XInterface as XInterface_8f010a43
if typing.TYPE_CHECKING:
from .text_column import TextColumn as TextColumn_9b3f0ae0
class XTextColumns(XInterface_8f010a43):
"""
manages columns within the object.
The values used are relative. So it is not necessary to know the width of the object. The sum of the relative width values depends on the object and is defined in \"ReferenceValue.\"
See Also:
`API XTextColumns <https://api.libreoffice.org/docs/idl/ref/interfacecom_1_1sun_1_1star_1_1text_1_1XTextColumns.html>`_
"""
__ooo_ns__: str = 'com.sun.star.text'
__ooo_full_ns__: str = 'com.sun.star.text.XTextColumns'
__ooo_type_name__: str = 'interface'
__pyunointerface__: str = 'com.sun.star.text.XTextColumns'
@abstractmethod
def getColumnCount(self) -> int:
"""
"""
@abstractmethod
def getColumns(self) -> 'typing.Tuple[TextColumn_9b3f0ae0, ...]':
"""
returns the column description of the object.
"""
@abstractmethod
def getReferenceValue(self) -> int:
"""
As described above, the width values are relative.
"""
@abstractmethod
def setColumnCount(self, nColumns: int) -> None:
"""
sets the number of columns.
The minimum is 1 column.
"""
@abstractmethod
def setColumns(self, Columns: 'typing.Tuple[TextColumn_9b3f0ae0, ...]') -> None:
"""
sets the descriptors of all columns.
The number of members in the sequence must be the same as the number of columns of the object.
"""
__all__ = ['XTextColumns']
|
py | 7dfc34c6ec19419c072db52c3ed3c082c6c62eac | # Copyright (c) 2015-2019 Chris Withers
# See LICENSE.txt for license details.
import os
from setuptools import setup, find_packages
base_dir = os.path.dirname(__file__)
setup(
name='mortar_mixins',
author='Chris Withers',
version='2.3.4',
author_email='[email protected]',
license='MIT',
description="SQLAlchemy mixins for use with Postgres.",
long_description=open(os.path.join(base_dir, 'README.rst')).read(),
url='https://github.com/Mortar/mortar_mixins',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
],
packages=find_packages(exclude=['tests']),
include_package_data=True,
zip_safe=False,
install_requires = (
'SQLAlchemy',
'psycopg2',
),
extras_require=dict(
test=['pytest', 'coverage', 'mortar_rdb', 'testfixtures', 'coveralls'],
build=['setuptools-git', 'wheel', 'twine']
),
)
|
py | 7dfc357aadda140cf3866987b64f88eed13f815d | from cereal import car
from selfdrive.car.hyundai.values import DBC, STEER_THRESHOLD, FEATURES, EV_HYBRID
from selfdrive.car.interfaces import CarStateBase
from opendbc.can.parser import CANParser
from selfdrive.config import Conversions as CV
GearShifter = car.CarState.GearShifter
class CarState(CarStateBase):
def update(self, cp, cp_cam):
ret = car.CarState.new_message()
ret.doorOpen = any([cp.vl["CGW1"]['CF_Gway_DrvDrSw'], cp.vl["CGW1"]['CF_Gway_AstDrSw'],
cp.vl["CGW2"]['CF_Gway_RLDrSw'], cp.vl["CGW2"]['CF_Gway_RRDrSw']])
ret.seatbeltUnlatched = cp.vl["CGW1"]['CF_Gway_DrvSeatBeltSw'] == 0
ret.wheelSpeeds.fl = cp.vl["WHL_SPD11"]['WHL_SPD_FL'] * CV.KPH_TO_MS
ret.wheelSpeeds.fr = cp.vl["WHL_SPD11"]['WHL_SPD_FR'] * CV.KPH_TO_MS
ret.wheelSpeeds.rl = cp.vl["WHL_SPD11"]['WHL_SPD_RL'] * CV.KPH_TO_MS
ret.wheelSpeeds.rr = cp.vl["WHL_SPD11"]['WHL_SPD_RR'] * CV.KPH_TO_MS
ret.vEgoRaw = (ret.wheelSpeeds.fl + ret.wheelSpeeds.fr + ret.wheelSpeeds.rl + ret.wheelSpeeds.rr) / 4.
ret.vEgo, ret.aEgo = self.update_speed_kf(ret.vEgoRaw)
ret.standstill = ret.vEgoRaw < 0.1
ret.steeringAngle = cp.vl["SAS11"]['SAS_Angle']
ret.steeringRate = cp.vl["SAS11"]['SAS_Speed']
ret.yawRate = cp.vl["ESP12"]['YAW_RATE']
ret.leftBlinker = cp.vl["CGW1"]['CF_Gway_TSigLHSw'] != 0
ret.rightBlinker = cp.vl["CGW1"]['CF_Gway_TSigRHSw'] != 0
ret.steeringTorque = cp.vl["MDPS12"]['CR_Mdps_StrColTq']
ret.steeringTorqueEps = cp.vl["MDPS12"]['CR_Mdps_OutTq']
ret.steeringPressed = abs(ret.steeringTorque) > STEER_THRESHOLD
ret.steerWarning = cp.vl["MDPS12"]['CF_Mdps_ToiUnavail'] != 0
# cruise state
ret.cruiseState.available = True
ret.cruiseState.enabled = cp.vl["SCC12"]['ACCMode'] != 0
ret.cruiseState.standstill = cp.vl["SCC11"]['SCCInfoDisplay'] == 4.
if ret.cruiseState.enabled:
is_set_speed_in_mph = int(cp.vl["CLU11"]["CF_Clu_SPEED_UNIT"])
speed_conv = CV.MPH_TO_MS if is_set_speed_in_mph else CV.KPH_TO_MS
ret.cruiseState.speed = cp.vl["SCC11"]['VSetDis'] * speed_conv
else:
ret.cruiseState.speed = 0
# TODO: Find brake pressure
ret.brake = 0
ret.brakePressed = cp.vl["TCS13"]['DriverBraking'] != 0
# TODO: Check this
ret.brakeLights = bool(cp.vl["TCS13"]['BrakeLight'] or ret.brakePressed)
if self.CP.carFingerprint in EV_HYBRID:
ret.gas = cp.vl["E_EMS11"]['Accel_Pedal_Pos'] / 256.
ret.gasPressed = ret.gas > 0
else:
ret.gas = cp.vl["EMS12"]['PV_AV_CAN'] / 100
ret.gasPressed = bool(cp.vl["EMS16"]["CF_Ems_AclAct"])
# TODO: refactor gear parsing in function
# Gear Selection via Cluster - For those Kia/Hyundai which are not fully discovered, we can use the Cluster Indicator for Gear Selection,
# as this seems to be standard over all cars, but is not the preferred method.
if self.CP.carFingerprint in FEATURES["use_cluster_gears"]:
if cp.vl["CLU15"]["CF_Clu_InhibitD"] == 1:
ret.gearShifter = GearShifter.drive
elif cp.vl["CLU15"]["CF_Clu_InhibitN"] == 1:
ret.gearShifter = GearShifter.neutral
elif cp.vl["CLU15"]["CF_Clu_InhibitP"] == 1:
ret.gearShifter = GearShifter.park
elif cp.vl["CLU15"]["CF_Clu_InhibitR"] == 1:
ret.gearShifter = GearShifter.reverse
else:
ret.gearShifter = GearShifter.unknown
# Gear Selecton via TCU12
elif self.CP.carFingerprint in FEATURES["use_tcu_gears"]:
gear = cp.vl["TCU12"]["CUR_GR"]
if gear == 0:
ret.gearShifter = GearShifter.park
elif gear == 14:
ret.gearShifter = GearShifter.reverse
elif gear > 0 and gear < 9: # unaware of anything over 8 currently
ret.gearShifter = GearShifter.drive
else:
ret.gearShifter = GearShifter.unknown
# Gear Selecton - This is only compatible with optima hybrid 2017
elif self.CP.carFingerprint in FEATURES["use_elect_gears"]:
gear = cp.vl["ELECT_GEAR"]["Elect_Gear_Shifter"]
if gear in (5, 8): # 5: D, 8: sport mode
ret.gearShifter = GearShifter.drive
elif gear == 6:
ret.gearShifter = GearShifter.neutral
elif gear == 0:
ret.gearShifter = GearShifter.park
elif gear == 7:
ret.gearShifter = GearShifter.reverse
else:
ret.gearShifter = GearShifter.unknown
# Gear Selecton - This is not compatible with all Kia/Hyundai's, But is the best way for those it is compatible with
else:
gear = cp.vl["LVR12"]["CF_Lvr_Gear"]
if gear in (5, 8): # 5: D, 8: sport mode
ret.gearShifter = GearShifter.drive
elif gear == 6:
ret.gearShifter = GearShifter.neutral
elif gear == 0:
ret.gearShifter = GearShifter.park
elif gear == 7:
ret.gearShifter = GearShifter.reverse
else:
ret.gearShifter = GearShifter.unknown
ret.stockAeb = cp.vl["FCA11"]['FCA_CmdAct'] != 0
ret.stockFcw = cp.vl["FCA11"]['CF_VSM_Warn'] == 2
# save the entire LKAS11 and CLU11
self.lkas11 = cp_cam.vl["LKAS11"]
self.clu11 = cp.vl["CLU11"]
self.park_brake = cp.vl["CGW1"]['CF_Gway_ParkBrakeSw']
self.steer_state = cp.vl["MDPS12"]['CF_Mdps_ToiActive'] # 0 NOT ACTIVE, 1 ACTIVE
self.lead_distance = cp.vl["SCC11"]['ACC_ObjDist']
return ret
@staticmethod
def get_can_parser(CP):
signals = [
# sig_name, sig_address, default
("WHL_SPD_FL", "WHL_SPD11", 0),
("WHL_SPD_FR", "WHL_SPD11", 0),
("WHL_SPD_RL", "WHL_SPD11", 0),
("WHL_SPD_RR", "WHL_SPD11", 0),
("YAW_RATE", "ESP12", 0),
("CF_Gway_DrvSeatBeltInd", "CGW4", 1),
("CF_Gway_DrvSeatBeltSw", "CGW1", 0),
("CF_Gway_DrvDrSw", "CGW1", 0), # Driver Door
("CF_Gway_AstDrSw", "CGW1", 0), # Passenger door
("CF_Gway_RLDrSw", "CGW2", 0), # Rear reft door
("CF_Gway_RRDrSw", "CGW2", 0), # Rear right door
("CF_Gway_TSigLHSw", "CGW1", 0),
("CF_Gway_TurnSigLh", "CGW1", 0),
("CF_Gway_TSigRHSw", "CGW1", 0),
("CF_Gway_TurnSigRh", "CGW1", 0),
("CF_Gway_ParkBrakeSw", "CGW1", 0),
("CYL_PRES", "ESP12", 0),
("CF_Clu_CruiseSwState", "CLU11", 0),
("CF_Clu_CruiseSwMain", "CLU11", 0),
("CF_Clu_SldMainSW", "CLU11", 0),
("CF_Clu_ParityBit1", "CLU11", 0),
("CF_Clu_VanzDecimal" , "CLU11", 0),
("CF_Clu_Vanz", "CLU11", 0),
("CF_Clu_SPEED_UNIT", "CLU11", 0),
("CF_Clu_DetentOut", "CLU11", 0),
("CF_Clu_RheostatLevel", "CLU11", 0),
("CF_Clu_CluInfo", "CLU11", 0),
("CF_Clu_AmpInfo", "CLU11", 0),
("CF_Clu_AliveCnt1", "CLU11", 0),
("ACCEnable", "TCS13", 0),
("BrakeLight", "TCS13", 0),
("DriverBraking", "TCS13", 0),
("ESC_Off_Step", "TCS15", 0),
("CF_Lvr_GearInf", "LVR11", 0), # Transmission Gear (0 = N or P, 1-8 = Fwd, 14 = Rev)
("CR_Mdps_StrColTq", "MDPS12", 0),
("CF_Mdps_ToiActive", "MDPS12", 0),
("CF_Mdps_ToiUnavail", "MDPS12", 0),
("CF_Mdps_FailStat", "MDPS12", 0),
("CR_Mdps_OutTq", "MDPS12", 0),
("SAS_Angle", "SAS11", 0),
("SAS_Speed", "SAS11", 0),
("FCA_CmdAct", "FCA11", 0),
("CF_VSM_Warn", "FCA11", 0),
("MainMode_ACC", "SCC11", 0),
("VSetDis", "SCC11", 0),
("SCCInfoDisplay", "SCC11", 0),
("ACC_ObjDist", "SCC11", 0),
("ACCMode", "SCC12", 1),
]
checks = [
# address, frequency
("MDPS12", 50),
("TCS13", 50),
("TCS15", 10),
("CLU11", 50),
("ESP12", 100),
("CGW1", 10),
("CGW4", 5),
("WHL_SPD11", 50),
("SAS11", 100),
("SCC11", 50),
("SCC12", 50),
("FCA11", 50),
]
if CP.carFingerprint in EV_HYBRID:
signals += [
("Accel_Pedal_Pos", "E_EMS11", 0),
]
checks += [
("E_EMS11", 50),
]
else:
signals += [
("PV_AV_CAN", "EMS12", 0),
("CF_Ems_AclAct", "EMS16", 0),
]
checks += [
("EMS12", 100),
("EMS16", 100),
]
if CP.carFingerprint in FEATURES["use_cluster_gears"]:
signals += [
("CF_Clu_InhibitD", "CLU15", 0),
("CF_Clu_InhibitP", "CLU15", 0),
("CF_Clu_InhibitN", "CLU15", 0),
("CF_Clu_InhibitR", "CLU15", 0),
]
checks += [
("CLU15", 5)
]
elif CP.carFingerprint in FEATURES["use_tcu_gears"]:
signals += [
("CUR_GR", "TCU12", 0)
]
checks += [
("TCU12", 100)
]
elif CP.carFingerprint in FEATURES["use_elect_gears"]:
signals += [("Elect_Gear_Shifter", "ELECT_GEAR", 0)]
checks += [("ELECT_GEAR", 20)]
else:
signals += [
("CF_Lvr_Gear", "LVR12", 0)
]
checks += [
("LVR12", 100)
]
return CANParser(DBC[CP.carFingerprint]['pt'], signals, checks, 0)
@staticmethod
def get_cam_can_parser(CP):
signals = [
# sig_name, sig_address, default
("CF_Lkas_Bca_R", "LKAS11", 0),
("CF_Lkas_LdwsSysState", "LKAS11", 0),
("CF_Lkas_SysWarning", "LKAS11", 0),
("CF_Lkas_LdwsLHWarning", "LKAS11", 0),
("CF_Lkas_LdwsRHWarning", "LKAS11", 0),
("CF_Lkas_HbaLamp", "LKAS11", 0),
("CF_Lkas_FcwBasReq", "LKAS11", 0),
("CF_Lkas_HbaSysState", "LKAS11", 0),
("CF_Lkas_FcwOpt", "LKAS11", 0),
("CF_Lkas_HbaOpt", "LKAS11", 0),
("CF_Lkas_FcwSysState", "LKAS11", 0),
("CF_Lkas_FcwCollisionWarning", "LKAS11", 0),
("CF_Lkas_FusionState", "LKAS11", 0),
("CF_Lkas_FcwOpt_USM", "LKAS11", 0),
("CF_Lkas_LdwsOpt_USM", "LKAS11", 0)
]
checks = []
return CANParser(DBC[CP.carFingerprint]['pt'], signals, checks, 2)
|
py | 7dfc382ec825eaaedfdca1f14ff324b87e1beb1e | import json
import os
import shutil
import tempfile
from contextlib import contextmanager
from datetime import datetime, timedelta
from decimal import Decimal
from django.conf import settings
from django.core import mail
from django.core.files.storage import default_storage as storage
import mock
import pytest
from waffle.testutils import override_switch
from PIL import Image
from olympia import amo
from olympia.addons.models import Addon, AddonUser, Preview
from olympia.amo.templatetags.jinja_helpers import user_media_path
from olympia.amo.tests import (
TestCase, addon_factory, user_factory, version_factory)
from olympia.amo.tests.test_helpers import get_addon_file, get_image_path
from olympia.amo.utils import image_size, utc_millesecs_from_epoch
from olympia.api.models import SYMMETRIC_JWT_TYPE, APIKey
from olympia.applications.models import AppVersion
from olympia.constants.base import VALIDATOR_SKELETON_RESULTS
from olympia.devhub import tasks
from olympia.files.models import File, FileUpload
from olympia.versions.models import Version
pytestmark = pytest.mark.django_db
def test_resize_icon_shrink():
""" Image should be shrunk so that the longest side is 32px. """
resize_size = 32
final_size = (32, 12)
_uploader(resize_size, final_size)
def test_resize_icon_enlarge():
""" Image stays the same, since the new size is bigger than both sides. """
resize_size = 350
final_size = (339, 128)
_uploader(resize_size, final_size)
def test_resize_icon_same():
""" Image stays the same, since the new size is the same. """
resize_size = 339
final_size = (339, 128)
_uploader(resize_size, final_size)
def test_resize_icon_list():
""" Resize multiple images at once. """
resize_size = [32, 339, 350]
final_size = [(32, 12), (339, 128), (339, 128)]
_uploader(resize_size, final_size)
def _uploader(resize_size, final_size):
img = get_image_path('mozilla.png')
original_size = (339, 128)
src = tempfile.NamedTemporaryFile(
mode='r+w+b', suffix='.png', delete=False, dir=settings.TMP_PATH)
if not isinstance(final_size, list):
final_size = [final_size]
resize_size = [resize_size]
uploadto = user_media_path('addon_icons')
try:
os.makedirs(uploadto)
except OSError:
pass
for rsize, expected_size in zip(resize_size, final_size):
# resize_icon moves the original
shutil.copyfile(img, src.name)
src_image = Image.open(src.name)
assert src_image.size == original_size
dest_name = os.path.join(uploadto, '1234')
with mock.patch('olympia.amo.utils.pngcrush_image') as pngcrush_mock:
return_value = tasks.resize_icon(src.name, dest_name, [rsize])
dest_image = '%s-%s.png' % (dest_name, rsize)
assert pngcrush_mock.call_count == 1
assert pngcrush_mock.call_args_list[0][0][0] == dest_image
assert image_size(dest_image) == expected_size
# original should have been moved to -original
orig_image = '%s-original.png' % dest_name
assert os.path.exists(orig_image)
# Return value of the task should be a dict with an icon_hash key
# containing the 8 first chars of the md5 hash of the source file,
# which is bb362450b00f0461c6bddc6b97b3c30b.
assert return_value == {'icon_hash': 'bb362450'}
os.remove(dest_image)
assert not os.path.exists(dest_image)
os.remove(orig_image)
assert not os.path.exists(orig_image)
shutil.rmtree(uploadto)
assert not os.path.exists(src.name)
@pytest.mark.django_db
@mock.patch('olympia.amo.utils.pngcrush_image')
def test_recreate_previews(pngcrush_image_mock):
addon = addon_factory()
# Set up the preview so it has files in the right places.
preview_no_original = Preview.objects.create(addon=addon)
with storage.open(preview_no_original.image_path, 'w') as dest:
shutil.copyfileobj(open(get_image_path('preview_landscape.jpg')), dest)
with storage.open(preview_no_original.thumbnail_path, 'w') as dest:
shutil.copyfileobj(open(get_image_path('mozilla.png')), dest)
# And again but this time with an "original" image.
preview_has_original = Preview.objects.create(addon=addon)
with storage.open(preview_has_original.image_path, 'w') as dest:
shutil.copyfileobj(open(get_image_path('preview_landscape.jpg')), dest)
with storage.open(preview_has_original.thumbnail_path, 'w') as dest:
shutil.copyfileobj(open(get_image_path('mozilla.png')), dest)
with storage.open(preview_has_original.original_path, 'w') as dest:
shutil.copyfileobj(open(get_image_path('teamaddons.jpg')), dest)
tasks.recreate_previews([addon.id])
assert preview_no_original.reload().sizes == {
'image': [533, 400], 'thumbnail': [533, 400]}
# Check no resize for full size, but resize happened for thumbnail
assert (storage.size(preview_no_original.image_path) ==
storage.size(get_image_path('preview_landscape.jpg')))
assert (storage.size(preview_no_original.thumbnail_path) !=
storage.size(get_image_path('mozilla.png')))
assert preview_has_original.reload().sizes == {
'image': [2400, 1600], 'thumbnail': [640, 427],
'original': [3000, 2000]}
# Check both full and thumbnail changed, but original didn't.
assert (storage.size(preview_has_original.image_path) !=
storage.size(get_image_path('preview_landscape.jpg')))
assert (storage.size(preview_has_original.thumbnail_path) !=
storage.size(get_image_path('mozilla.png')))
assert (storage.size(preview_has_original.original_path) ==
storage.size(get_image_path('teamaddons.jpg')))
class ValidatorTestCase(TestCase):
def setUp(self):
# Because the validator calls dump_apps() once and then uses the json
# file to find out which appversions are valid, all tests running the
# validator need to create *all* possible appversions all tests using
# this class might need.
# 3.7a1pre is somehow required to exist by
# amo-validator.
# The other ones are app-versions we're using in our
# tests.
self.create_appversion('firefox', '2.0')
self.create_appversion('firefox', '3.6')
self.create_appversion('firefox', '3.7a1pre')
self.create_appversion('firefox', '38.0a1')
# Required for WebExtensions tests.
self.create_appversion('firefox', '*')
self.create_appversion('firefox', '42.0')
self.create_appversion('firefox', '42.*')
self.create_appversion('firefox', '43.0')
# Required for 57-specific tests.
self.create_appversion('android', '38.0a1')
self.create_appversion('android', '*')
self.create_appversion('firefox', '57.0')
# Required for Android tests.
self.create_appversion('android', '42.0')
self.create_appversion('android', '45.0')
def create_appversion(self, name, version):
return AppVersion.objects.create(
application=amo.APPS[name].id, version=version)
class TestValidator(ValidatorTestCase):
mock_sign_addon_warning = json.dumps({
"warnings": 1,
"errors": 0,
"messages": [
{"context": None,
"editors_only": False,
"description": "Add-ons which are already signed will be "
"re-signed when published on AMO. This will "
"replace any existing signatures on the add-on.",
"column": None,
"type": "warning",
"id": ["testcases_content", "signed_xpi"],
"file": "",
"tier": 2,
"message": "Package already signed",
"uid": "87326f8f699f447e90b3d5a66a78513e",
"line": None,
"compatibility_type": None},
]
})
def setUp(self):
super(TestValidator, self).setUp()
self.upload = FileUpload.objects.create(
path=get_addon_file('desktop.xpi'))
assert not self.upload.valid
def get_upload(self):
return FileUpload.objects.get(pk=self.upload.pk)
@mock.patch('olympia.devhub.tasks.run_validator')
def test_pass_validation(self, _mock):
_mock.return_value = '{"errors": 0}'
tasks.validate(self.upload, listed=True)
assert self.get_upload().valid
@mock.patch('olympia.devhub.tasks.run_validator')
def test_fail_validation(self, _mock):
_mock.return_value = '{"errors": 2}'
tasks.validate(self.upload, listed=True)
assert not self.get_upload().valid
@mock.patch('olympia.devhub.tasks.run_validator')
def test_validation_error(self, _mock):
_mock.side_effect = Exception
self.upload.update(path=get_addon_file('desktop.xpi'))
assert self.upload.validation is None
tasks.validate(self.upload, listed=True)
self.upload.reload()
validation = self.upload.processed_validation
assert validation
assert validation['errors'] == 1
assert validation['messages'][0]['id'] == ['validator',
'unexpected_exception']
assert not self.upload.valid
@mock.patch('olympia.devhub.tasks.run_addons_linter')
def test_validation_error_webextension(self, _mock):
_mock.side_effect = Exception
self.upload.update(path=get_addon_file('valid_webextension.xpi'))
assert self.upload.validation is None
tasks.validate(self.upload, listed=True)
self.upload.reload()
validation = self.upload.processed_validation
assert validation
assert validation['errors'] == 1
assert validation['messages'][0]['id'] == [
'validator', 'unexpected_exception']
assert 'WebExtension' in validation['messages'][0]['message']
assert not self.upload.valid
@mock.patch('olympia.devhub.tasks.run_validator')
def test_validation_signing_warning(self, _mock):
"""If we sign addons, warn on signed addon submission."""
_mock.return_value = self.mock_sign_addon_warning
tasks.validate(self.upload, listed=True)
validation = json.loads(self.get_upload().validation)
assert validation['warnings'] == 1
assert len(validation['messages']) == 1
@mock.patch('olympia.devhub.tasks.statsd.incr')
def test_track_validation_stats(self, mock_statsd_incr):
tasks.validate(self.upload, listed=True)
mock_statsd_incr.assert_has_calls((
mock.call('devhub.validator.results.all.success'),
mock.call('devhub.validator.results.listed.success')))
def test_handle_file_validation_result_task_result_is_serializable(self):
addon = addon_factory()
self.file = addon.current_version.all_files[0]
assert not self.file.has_been_validated
file_validation_id = tasks.validate(
self.file, synchronous=True).get()
assert json.dumps(file_validation_id)
# Not `self.file.reload()`. It won't update the `validation` FK.
self.file = File.objects.get(pk=self.file.pk)
assert self.file.has_been_validated
class TestMeasureValidationTime(TestValidator):
def setUp(self):
super(TestMeasureValidationTime, self).setUp()
# Set created time back (just for sanity) otherwise the delta
# would be in the microsecond range.
self.upload.update(created=datetime.now() - timedelta(days=1))
@contextmanager
def statsd_timing_mock(self):
statsd_calls = {}
def capture_timing_call(metric, value):
statsd_calls[metric] = value
with mock.patch('olympia.devhub.tasks.statsd.timing') as mock_timing:
mock_timing.side_effect = capture_timing_call
yield statsd_calls
def approximate_upload_time(self):
upload_start = utc_millesecs_from_epoch(self.upload.created)
now = utc_millesecs_from_epoch()
return now - upload_start
def assert_milleseconds_are_close(self, actual_ms, calculated_ms,
fuzz=None):
if fuzz is None:
fuzz = Decimal(300)
assert (actual_ms >= (calculated_ms - fuzz) and
actual_ms <= (calculated_ms + fuzz))
def handle_upload_validation_result(self,
channel=amo.RELEASE_CHANNEL_LISTED):
validation = amo.VALIDATOR_SKELETON_RESULTS.copy()
tasks.handle_upload_validation_result(validation, self.upload.pk,
channel, False)
def test_track_upload_validation_results_time(self):
with self.statsd_timing_mock() as statsd_calls:
self.handle_upload_validation_result()
rough_delta = self.approximate_upload_time()
actual_delta = statsd_calls['devhub.validation_results_processed']
self.assert_milleseconds_are_close(actual_delta, rough_delta)
def test_track_upload_validation_results_with_file_size(self):
with self.statsd_timing_mock() as statsd_calls:
self.handle_upload_validation_result()
# This test makes sure storage.size() works on a real file.
rough_delta = self.approximate_upload_time()
actual_delta = statsd_calls[
'devhub.validation_results_processed_per_mb']
# This value should not be scaled because this package is under 1MB.
self.assert_milleseconds_are_close(actual_delta, rough_delta)
def test_scale_large_xpi_times_per_megabyte(self):
megabyte = Decimal(1024 * 1024)
file_size_in_mb = Decimal(5)
with mock.patch('olympia.devhub.tasks.storage.size') as mock_size:
mock_size.return_value = file_size_in_mb * megabyte
with self.statsd_timing_mock() as statsd_calls:
self.handle_upload_validation_result()
# Validation times for files larger than 1MB should be scaled.
rough_delta = self.approximate_upload_time()
rough_scaled_delta = Decimal(rough_delta) / file_size_in_mb
actual_scaled_delta = statsd_calls[
'devhub.validation_results_processed_per_mb']
self.assert_milleseconds_are_close(actual_scaled_delta,
rough_scaled_delta)
def test_measure_small_files_in_separate_bucket(self):
with mock.patch('olympia.devhub.tasks.storage.size') as mock_size:
mock_size.return_value = 500 # less than 1MB
with self.statsd_timing_mock() as statsd_calls:
self.handle_upload_validation_result()
rough_delta = self.approximate_upload_time()
actual_delta = statsd_calls[
'devhub.validation_results_processed_under_1mb']
self.assert_milleseconds_are_close(actual_delta, rough_delta)
def test_measure_large_files_in_separate_bucket(self):
with mock.patch('olympia.devhub.tasks.storage.size') as mock_size:
mock_size.return_value = (2014 * 1024) * 5 # 5MB
with self.statsd_timing_mock() as statsd_calls:
self.handle_upload_validation_result()
rough_delta = self.approximate_upload_time()
actual_delta = statsd_calls[
'devhub.validation_results_processed_over_1mb']
self.assert_milleseconds_are_close(actual_delta, rough_delta)
def test_do_not_calculate_scaled_time_for_empty_files(self):
with mock.patch('olympia.devhub.tasks.storage.size') as mock_size:
mock_size.return_value = 0
with self.statsd_timing_mock() as statsd_calls:
self.handle_upload_validation_result()
assert 'devhub.validation_results_processed_per_mb' not in statsd_calls
def test_ignore_missing_upload_paths_for_now(self):
with mock.patch('olympia.devhub.tasks.storage.exists') as mock_exists:
mock_exists.return_value = False
with self.statsd_timing_mock() as statsd_calls:
self.handle_upload_validation_result()
assert 'devhub.validation_results_processed' in statsd_calls
assert 'devhub.validation_results_processed_per_mb' not in statsd_calls
assert ('devhub.validation_results_processed_under_1mb' not in
statsd_calls)
class TestTrackValidatorStats(TestCase):
def setUp(self):
super(TestTrackValidatorStats, self).setUp()
patch = mock.patch('olympia.devhub.tasks.statsd.incr')
self.mock_incr = patch.start()
self.addCleanup(patch.stop)
def result(self, **overrides):
result = VALIDATOR_SKELETON_RESULTS.copy()
result.update(overrides)
return json.dumps(result)
def test_count_all_successes(self):
tasks.track_validation_stats(self.result(errors=0))
self.mock_incr.assert_any_call(
'devhub.validator.results.all.success'
)
def test_count_all_errors(self):
tasks.track_validation_stats(self.result(errors=1))
self.mock_incr.assert_any_call(
'devhub.validator.results.all.failure'
)
def test_count_listed_results(self):
tasks.track_validation_stats(self.result(metadata={'listed': True}))
self.mock_incr.assert_any_call(
'devhub.validator.results.listed.success'
)
def test_count_unlisted_results(self):
tasks.track_validation_stats(self.result(metadata={'listed': False}))
self.mock_incr.assert_any_call(
'devhub.validator.results.unlisted.success'
)
class TestRunAddonsLinter(ValidatorTestCase):
def setUp(self):
super(TestRunAddonsLinter, self).setUp()
valid_path = get_addon_file('valid_webextension.xpi')
invalid_path = get_addon_file('invalid_webextension_invalid_id.xpi')
self.valid_upload = FileUpload.objects.create(path=valid_path)
self.invalid_upload = FileUpload.objects.create(path=invalid_path)
def get_upload(self, upload):
return FileUpload.objects.get(pk=upload.pk)
@mock.patch('olympia.devhub.tasks.run_addons_linter')
def test_calls_run_linter(self, run_linter):
run_linter.return_value = '{"errors": 0}'
assert not self.valid_upload.valid
tasks.validate(self.valid_upload, listed=True)
upload = self.get_upload(self.valid_upload)
assert upload.valid, upload.validation
def test_run_linter_fail(self):
tasks.validate(self.invalid_upload, listed=True)
assert not self.get_upload(self.invalid_upload).valid
def test_run_linter_path_doesnt_exist(self):
with pytest.raises(ValueError) as exc:
tasks.run_addons_linter('doesntexist')
assert str(exc.value) == (
'Path "doesntexist" is not a file or directory or '
'does not exist.')
def test_run_linter_use_temporary_file(self):
TemporaryFile = tempfile.TemporaryFile
with mock.patch('olympia.devhub.tasks.tempfile.TemporaryFile') as tmpf:
tmpf.side_effect = lambda *a, **kw: TemporaryFile(*a, **kw)
# This is a relatively small add-on (1.2M) but we are using
# a temporary file for all our linter output.
result = json.loads(tasks.run_addons_linter(
get_addon_file('typo-gecko.xpi')
))
assert tmpf.call_count == 2
assert result['success']
assert result['warnings'] == 24
assert not result['errors']
class TestValidateFilePath(ValidatorTestCase):
def test_amo_validator_success(self):
result = tasks.validate_file_path(
None, get_addon_file('valid_firefox_addon.xpi'),
hash_=None, listed=True)
assert result['success']
assert not result['errors']
assert not result['warnings']
def test_amo_validator_fail_warning(self):
result = tasks.validate_file_path(
None, get_addon_file('invalid_firefox_addon_warning.xpi'),
hash_=None, listed=True)
assert not result['success']
assert not result['errors']
assert result['warnings']
def test_amo_validator_fail_error(self):
result = tasks.validate_file_path(
None, get_addon_file('invalid_firefox_addon_error.xpi'),
hash_=None, listed=True)
assert not result['success']
assert result['errors']
assert not result['warnings']
def test_amo_validator_addons_linter_success(self):
result = tasks.validate_file_path(
None, get_addon_file('valid_webextension.xpi'),
hash_=None, listed=True, is_webextension=True)
assert result['success']
assert not result['errors']
assert not result['warnings']
def test_amo_validator_addons_linter_error(self):
# This test assumes that `amo-validator` doesn't correctly
# validate a invalid id in manifest.json
result = tasks.validate_file_path(
None, get_addon_file('invalid_webextension_invalid_id.xpi'),
hash_=None, listed=True, is_webextension=True)
assert not result['success']
assert result['errors']
assert not result['warnings']
class TestWebextensionIncompatibilities(ValidatorTestCase):
fixtures = ['base/addon_3615']
def setUp(self):
self.addon = Addon.objects.get(pk=3615)
# valid_webextension.xpi has version 1.0 so mock the original version
self.addon.update(guid='[email protected]')
self.addon.current_version.update(version='0.9')
self.update_files(
version=self.addon.current_version,
filename='delicious_bookmarks-2.1.106-fx.xpi')
def update_files(self, **kw):
for version in self.addon.versions.all():
for file in version.files.all():
file.update(**kw)
def test_webextension_upgrade_is_annotated(self):
assert all(f.is_webextension is False
for f in self.addon.current_version.all_files)
file_ = get_addon_file('valid_webextension.xpi')
upload = FileUpload.objects.create(path=file_, addon=self.addon)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['is_upgrade_to_webextension']
expected = ['validation', 'messages', 'webext_upgrade']
assert upload.processed_validation['messages'][0]['id'] == expected
assert upload.processed_validation['warnings'] == 1
assert upload.valid
def test_new_webextension_is_not_annotated(self):
"""https://github.com/mozilla/addons-server/issues/3679"""
previous_file = self.addon.current_version.all_files[-1]
previous_file.is_webextension = True
previous_file.status = amo.STATUS_AWAITING_REVIEW
previous_file.save()
file_ = get_addon_file('valid_webextension.xpi')
upload = FileUpload.objects.create(path=file_, addon=self.addon)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
validation = upload.processed_validation
assert 'is_upgrade_to_webextension' not in validation
expected = ['validation', 'messages', 'webext_upgrade']
assert not any(msg['id'] == expected for msg in validation['messages'])
assert validation['warnings'] == 0
assert upload.valid
def test_webextension_webext_to_webext_not_annotated(self):
previous_file = self.addon.current_version.all_files[-1]
previous_file.is_webextension = True
previous_file.save()
file_ = get_addon_file('valid_webextension.xpi')
upload = FileUpload.objects.create(path=file_, addon=self.addon)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
validation = upload.processed_validation
assert 'is_upgrade_to_webextension' not in validation
expected = ['validation', 'messages', 'webext_upgrade']
assert not any(msg['id'] == expected for msg in validation['messages'])
assert validation['warnings'] == 0
assert upload.valid
def test_webextension_no_webext_no_warning(self):
file_ = amo.tests.AMOPaths().file_fixture_path(
'delicious_bookmarks-2.1.106-fx.xpi')
upload = FileUpload.objects.create(path=file_, addon=self.addon)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
validation = upload.processed_validation
assert 'is_upgrade_to_webextension' not in validation
expected = ['validation', 'messages', 'webext_upgrade']
assert not any(msg['id'] == expected for msg in validation['messages'])
def test_webextension_cannot_be_downgraded(self):
self.update_files(is_webextension=True)
file_ = amo.tests.AMOPaths().file_fixture_path(
'delicious_bookmarks-2.1.106-fx.xpi')
upload = FileUpload.objects.create(path=file_, addon=self.addon)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
expected = ['validation', 'messages', 'webext_downgrade']
validation = upload.processed_validation
assert validation['messages'][0]['id'] == expected
assert validation['messages'][0]['type'] == 'error'
def test_webextension_downgrade_only_warning_unlisted(self):
self.update_files(is_webextension=True)
self.make_addon_unlisted(self.addon)
file_ = amo.tests.AMOPaths().file_fixture_path(
'delicious_bookmarks-2.1.106-fx.xpi')
upload = FileUpload.objects.create(path=file_, addon=self.addon)
tasks.validate(upload, listed=False)
upload.refresh_from_db()
expected = ['validation', 'messages', 'webext_downgrade']
validation = upload.processed_validation
assert validation['messages'][0]['id'] == expected
assert validation['messages'][0]['type'] == 'warning'
assert validation['errors'] == 0
def test_webextension_cannot_be_downgraded_ignore_deleted_version(self):
"""Make sure even deleting the previous version does not prevent
the downgrade error."""
file_ = amo.tests.AMOPaths().file_fixture_path(
'delicious_bookmarks-2.1.106-fx.xpi')
self.update_files(is_webextension=True)
deleted_version = version_factory(
addon=self.addon, file_kw={'is_webextension': False})
deleted_version.delete()
upload = FileUpload.objects.create(path=file_, addon=self.addon)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
expected = ['validation', 'messages', 'webext_downgrade']
validation = upload.processed_validation
assert validation['messages'][0]['id'] == expected
assert validation['messages'][0]['type'] == 'error'
def test_no_upgrade_annotation_no_version(self):
"""Make sure there's no workaround the downgrade error."""
self.addon.update(guid='guid@xpi')
file_ = amo.tests.AMOPaths().file_fixture_path(
'delicious_bookmarks-no-version.xpi')
self.update_files(is_webextension=True)
deleted_version = version_factory(
addon=self.addon, file_kw={'is_webextension': False})
deleted_version.delete()
upload = FileUpload.objects.create(path=file_, addon=self.addon)
upload.addon.version = None
upload.addon.save()
upload.save(update_fields=('version',))
upload.refresh_from_db()
tasks.validate(upload, listed=True)
upload.refresh_from_db()
expected = [u'testcases_installrdf', u'_test_rdf', u'missing_addon']
validation = upload.processed_validation
assert validation['messages'][0]['id'] == expected
assert validation['messages'][0]['type'] == 'error'
class TestLegacyAddonRestrictions(ValidatorTestCase):
def setUp(self):
super(TestLegacyAddonRestrictions, self).setUp()
def test_submit_legacy_addon_restricted(self):
file_ = get_addon_file('valid_firefox_addon.xpi')
upload = FileUpload.objects.create(path=file_)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 1
expected = ['validation', 'messages', 'legacy_addons_restricted']
assert upload.processed_validation['messages'][0]['id'] == expected
assert not upload.valid
def test_submit_legacy_extension_not_a_new_addon(self):
file_ = get_addon_file('valid_firefox_addon.xpi')
addon = addon_factory(version_kw={'version': '0.1'})
upload = FileUpload.objects.create(path=file_, addon=addon)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 0
assert upload.processed_validation['messages'] == []
assert upload.valid
def test_submit_legacy_extension_1st_version_in_that_channel(self):
file_ = get_addon_file('valid_firefox_addon.xpi')
addon = addon_factory(
version_kw={'version': '0.1',
'channel': amo.RELEASE_CHANNEL_UNLISTED})
upload = FileUpload.objects.create(path=file_, addon=addon)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 1
expected = ['validation', 'messages', 'legacy_addons_restricted']
assert upload.processed_validation['messages'][0]['id'] == expected
assert not upload.valid
def test_submit_legacy_extension_1st_version_in_that_channel_reverse(self):
file_ = get_addon_file('valid_firefox_addon.xpi')
addon = addon_factory(
version_kw={'version': '0.1',
'channel': amo.RELEASE_CHANNEL_LISTED})
upload = FileUpload.objects.create(path=file_, addon=addon)
tasks.validate(upload, listed=False)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 1
expected = ['validation', 'messages', 'legacy_addons_restricted']
assert upload.processed_validation['messages'][0]['id'] == expected
assert not upload.valid
def test_submit_webextension(self):
file_ = get_addon_file('valid_webextension.xpi')
upload = FileUpload.objects.create(path=file_)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 0
assert upload.processed_validation['messages'] == []
assert upload.valid
def test_submit_legacy_extension_targets_older_firefox_stricly(self):
file_ = get_addon_file('valid_firefox_addon_strict_compatibility.xpi')
upload = FileUpload.objects.create(path=file_)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 0
assert upload.processed_validation['messages'] == []
assert upload.valid
def test_submit_non_extension(self):
file_ = get_addon_file('searchgeek-20090701.xml')
upload = FileUpload.objects.create(path=file_)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 0
assert upload.processed_validation['messages'] == []
assert upload.valid
def test_restrict_firefox_53_alpha(self):
data = {
'messages': [],
'errors': 0,
'detected_type': 'extension',
'metadata': {
'is_webextension': False,
'is_extension': True,
'strict_compatibility': True,
'applications': {
'firefox': {
'max': '53a1'
}
}
}
}
results = tasks.annotate_legacy_addon_restrictions(
data, is_new_upload=True)
assert results['errors'] == 1
assert len(results['messages']) > 0
assert results['messages'][0]['id'] == [
'validation', 'messages', 'legacy_addons_restricted']
def test_restrict_themes(self):
data = {
'messages': [],
'errors': 0,
'detected_type': 'theme',
'metadata': {
'is_extension': False,
'strict_compatibility': False,
'applications': {
'firefox': {
'max': '54.0'
}
}
}
}
results = tasks.annotate_legacy_addon_restrictions(
data, is_new_upload=True)
assert results['errors'] == 1
assert len(results['messages']) > 0
assert results['messages'][0]['id'] == [
'validation', 'messages', 'legacy_addons_restricted']
def test_submit_legacy_upgrade(self):
# Works because it's not targeting >= 57.
file_ = get_addon_file('valid_firefox_addon.xpi')
addon = addon_factory(version_kw={'version': '0.1'})
upload = FileUpload.objects.create(path=file_, addon=addon)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 0
assert upload.processed_validation['messages'] == []
assert upload.valid
def test_submit_legacy_upgrade_targeting_firefox_57(self):
# Should error since it's a legacy extension targeting 57.
file_ = get_addon_file('valid_firefox_addon_targeting_57.xpi')
addon = addon_factory(version_kw={'version': '0.1'})
upload = FileUpload.objects.create(path=file_, addon=addon)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 1
assert len(upload.processed_validation['messages']) == 1
assert upload.processed_validation['messages'][0]['type'] == 'error'
assert upload.processed_validation['messages'][0]['id'] == [
'validation', 'messages', 'legacy_addons_max_version']
assert not upload.valid
def test_submit_legacy_upgrade_targeting_57_strict_compatibility(self):
# Should error just like if it didn't have strict compatibility, that
# does not matter: it's a legacy extension, it should not target 57.
file_ = get_addon_file(
'valid_firefox_addon_targeting_57_strict_compatibility.xpi')
addon = addon_factory(version_kw={'version': '0.1'})
upload = FileUpload.objects.create(path=file_, addon=addon)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 1
assert len(upload.processed_validation['messages']) == 1
assert upload.processed_validation['messages'][0]['type'] == 'error'
assert upload.processed_validation['messages'][0]['id'] == [
'validation', 'messages', 'legacy_addons_max_version']
assert not upload.valid
def test_submit_legacy_upgrade_targeting_star(self):
# Should not error: extensions with a maxversion of '*' don't get the
# error, the manifest parsing code will rewrite it as '56.*' instead.
file_ = get_addon_file('valid_firefox_addon_targeting_star.xpi')
addon = addon_factory(version_kw={'version': '0.1'})
upload = FileUpload.objects.create(path=file_, addon=addon)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 0
assert upload.processed_validation['messages'] == []
assert upload.valid
def test_submit_webextension_upgrade_targeting_firefox_57(self):
# Should not error: it's targeting 57 but it's a webextension.
file_ = get_addon_file('valid_webextension_targeting_57.xpi')
addon = addon_factory(version_kw={'version': '0.1'},
file_kw={'is_webextension': True})
upload = FileUpload.objects.create(path=file_, addon=addon)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 0
messages = upload.processed_validation['messages']
# 4 messages because the add-on uses some incompatible APIs
# but we don't care about that for this test.
assert len(messages) == 4
assert messages[0]['message'] == ('"strict_max_version" '
'not required.')
assert upload.valid
def test_submit_dictionary_upgrade_targeting_firefox_57(self):
# Should not error: non-extensions types are not affected by the
# restriction, even if they target 57.
file_ = get_addon_file('dictionary_targeting_57.xpi')
addon = addon_factory(version_kw={'version': '0.1'},
type=amo.ADDON_DICT)
upload = FileUpload.objects.create(path=file_, addon=addon)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 0
assert upload.processed_validation['messages'] == []
assert upload.valid
def test_submit_legacy_targeting_multiple_including_firefox_57(self):
# By submitting a legacy extension targeting multiple apps, this add-on
# avoids the restriction for new uploads, but it should still trigger
# the one for legacy extensions targeting 57 or higher.
data = {
'messages': [],
'errors': 0,
'detected_type': 'extension',
'metadata': {
'is_webextension': False,
'is_extension': True,
'applications': {
'firefox': {
'max': '57.0'
},
'thunderbird': {
'max': '45.0'
}
}
}
}
results = tasks.annotate_legacy_addon_restrictions(
data.copy(), is_new_upload=True)
assert results['errors'] == 1
assert len(results['messages']) > 0
assert results['messages'][0]['id'] == [
'validation', 'messages', 'legacy_addons_max_version']
results = tasks.annotate_legacy_addon_restrictions(
data.copy(), is_new_upload=False)
assert results['errors'] == 1
assert len(results['messages']) > 0
assert results['messages'][0]['id'] == [
'validation', 'messages', 'legacy_addons_max_version']
def test_allow_upgrade_submission_targeting_firefox_and_thunderbird(self):
# This should work regardless because it also
# targets Firefox (it's a legacy one, but it targets Firefox < 57).
data = {
'messages': [],
'errors': 0,
'detected_type': 'extension',
'metadata': {
'is_webextension': False,
'is_extension': True,
'applications': {
'firefox': {
'max': '56.0'
},
'thunderbird': {
'max': '45.0'
}
}
}
}
results = tasks.annotate_legacy_addon_restrictions(
data.copy(), is_new_upload=False)
assert results['errors'] == 0
def test_disallow_thunderbird_seamonkey(self):
data = {
'messages': [],
'errors': 0,
'detected_type': 'extension',
'metadata': {
'is_webextension': False,
'is_extension': True,
'applications': {
'thunderbird': {
'max': '45.0'
}
}
}
}
results = tasks.annotate_legacy_addon_restrictions(
data.copy(), is_new_upload=True)
assert results['errors'] == 1
assert len(results['messages']) > 0
assert results['messages'][0]['id'] == [
'validation', 'messages', 'thunderbird_and_seamonkey_migration']
def test_dont_disallow_webextensions(self):
# Webextensions should not be disallowed.
data = {
'messages': [],
'errors': 0,
'detected_type': 'extension',
'metadata': {
'is_webextension': True,
'is_extension': True,
}
}
results = tasks.annotate_legacy_addon_restrictions(
data.copy(), is_new_upload=True)
assert results['errors'] == 0
@override_switch('disallow-legacy-submissions', active=True)
def test_legacy_submissions_disabled(self):
file_ = get_addon_file('valid_firefox_addon.xpi')
upload = FileUpload.objects.create(path=file_)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 1
expected = ['validation', 'messages', 'legacy_addons_unsupported']
assert upload.processed_validation['messages'][0]['id'] == expected
assert not upload.valid
@override_switch('disallow-legacy-submissions', active=True)
def test_legacy_updates_disabled(self):
file_ = get_addon_file('valid_firefox_addon.xpi')
addon = addon_factory(version_kw={'version': '0.1'})
upload = FileUpload.objects.create(path=file_, addon=addon)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 1
expected = ['validation', 'messages', 'legacy_addons_unsupported']
assert upload.processed_validation['messages'][0]['id'] == expected
assert not upload.valid
@override_switch('disallow-legacy-submissions', active=True)
def test_submit_webextension_okay_after_legacy_unsupported(self):
self.test_submit_webextension()
@override_switch('disallow-legacy-submissions', active=True)
def test_submit_non_extension_okay_after_legacy_unsupported(self):
self.test_submit_non_extension()
@mock.patch('olympia.devhub.tasks.send_html_mail_jinja')
def test_send_welcome_email(send_html_mail_jinja_mock):
tasks.send_welcome_email(3615, ['[email protected]'], {'omg': 'yes'})
send_html_mail_jinja_mock.assert_called_with(
('Mozilla Add-ons: Your add-on has been submitted to'
' addons.mozilla.org!'),
'devhub/email/submission.html',
'devhub/email/submission.txt',
{'omg': 'yes'},
recipient_list=['[email protected]'],
from_email=settings.ADDONS_EMAIL,
use_deny_list=False,
perm_setting='individual_contact')
class TestSubmitFile(TestCase):
fixtures = ['base/addon_3615']
def setUp(self):
super(TestSubmitFile, self).setUp()
self.addon = Addon.objects.get(pk=3615)
patcher = mock.patch('olympia.devhub.tasks.create_version_for_upload')
self.create_version_for_upload = patcher.start()
self.addCleanup(patcher.stop)
def create_upload(self, version='1.0'):
return FileUpload.objects.create(
addon=self.addon, version=version, validation='{"errors":0}',
automated_signing=False)
@mock.patch('olympia.devhub.tasks.FileUpload.passed_all_validations', True)
def test_file_passed_all_validations(self):
upload = self.create_upload()
tasks.submit_file(self.addon.pk, upload.pk, amo.RELEASE_CHANNEL_LISTED)
self.create_version_for_upload.assert_called_with(
self.addon, upload, amo.RELEASE_CHANNEL_LISTED)
@mock.patch('olympia.devhub.tasks.FileUpload.passed_all_validations',
False)
def test_file_not_passed_all_validations(self):
upload = self.create_upload()
tasks.submit_file(self.addon.pk, upload.pk, amo.RELEASE_CHANNEL_LISTED)
assert not self.create_version_for_upload.called
class TestCreateVersionForUpload(TestCase):
fixtures = ['base/addon_3615']
def setUp(self):
super(TestCreateVersionForUpload, self).setUp()
self.addon = Addon.objects.get(pk=3615)
self.create_version_for_upload = (
tasks.create_version_for_upload.non_atomic)
self.mocks = {}
for key in ['Version.from_upload', 'parse_addon']:
patcher = mock.patch('olympia.devhub.tasks.%s' % key)
self.mocks[key] = patcher.start()
self.addCleanup(patcher.stop)
self.user = user_factory()
def create_upload(self, version='1.0'):
return FileUpload.objects.create(
addon=self.addon, version=version, user=self.user,
validation='{"errors":0}', automated_signing=False)
def test_file_passed_all_validations_not_most_recent(self):
upload = self.create_upload()
newer_upload = self.create_upload()
newer_upload.update(created=datetime.today() + timedelta(hours=1))
# Check that the older file won't turn into a Version.
self.create_version_for_upload(self.addon, upload,
amo.RELEASE_CHANNEL_LISTED)
assert not self.mocks['Version.from_upload'].called
# But the newer one will.
self.create_version_for_upload(self.addon, newer_upload,
amo.RELEASE_CHANNEL_LISTED)
self.mocks['Version.from_upload'].assert_called_with(
newer_upload, self.addon, [amo.FIREFOX.id, amo.ANDROID.id],
amo.RELEASE_CHANNEL_LISTED,
parsed_data=self.mocks['parse_addon'].return_value)
def test_file_passed_all_validations_version_exists(self):
upload = self.create_upload()
Version.objects.create(addon=upload.addon, version=upload.version)
# Check that the older file won't turn into a Version.
self.create_version_for_upload(self.addon, upload,
amo.RELEASE_CHANNEL_LISTED)
assert not self.mocks['Version.from_upload'].called
def test_file_passed_all_validations_most_recent_failed(self):
upload = self.create_upload()
newer_upload = self.create_upload()
newer_upload.update(created=datetime.today() + timedelta(hours=1),
valid=False,
validation=json.dumps({"errors": 5}))
self.create_version_for_upload(self.addon, upload,
amo.RELEASE_CHANNEL_LISTED)
assert not self.mocks['Version.from_upload'].called
def test_file_passed_all_validations_most_recent(self):
upload = self.create_upload(version='1.0')
newer_upload = self.create_upload(version='0.5')
newer_upload.update(created=datetime.today() + timedelta(hours=1))
# The Version is created because the newer upload is for a different
# version_string.
self.create_version_for_upload(self.addon, upload,
amo.RELEASE_CHANNEL_LISTED)
self.mocks['parse_addon'].assert_called_with(
upload, self.addon, user=self.user)
self.mocks['Version.from_upload'].assert_called_with(
upload, self.addon, [amo.FIREFOX.id, amo.ANDROID.id],
amo.RELEASE_CHANNEL_LISTED,
parsed_data=self.mocks['parse_addon'].return_value)
def test_file_passed_all_validations_beta_string(self):
upload = self.create_upload(version='1.0-beta1')
self.create_version_for_upload(self.addon, upload,
amo.RELEASE_CHANNEL_LISTED)
self.mocks['parse_addon'].assert_called_with(
upload, self.addon, user=self.user)
self.mocks['Version.from_upload'].assert_called_with(
upload, self.addon, [amo.FIREFOX.id, amo.ANDROID.id],
amo.RELEASE_CHANNEL_LISTED,
parsed_data=self.mocks['parse_addon'].return_value)
def test_file_passed_all_validations_no_version(self):
upload = self.create_upload(version=None)
self.create_version_for_upload(self.addon, upload,
amo.RELEASE_CHANNEL_LISTED)
self.mocks['parse_addon'].assert_called_with(
upload, self.addon, user=self.user)
self.mocks['Version.from_upload'].assert_called_with(
upload, self.addon, [amo.FIREFOX.id, amo.ANDROID.id],
amo.RELEASE_CHANNEL_LISTED,
parsed_data=self.mocks['parse_addon'].return_value)
class TestAPIKeyInSubmission(TestCase):
def setUp(self):
self.user = user_factory()
s = '656b16a8ab71686fcfcd04d574bc28be9a1d8252141f54cfb5041709262b84f4'
self.key = APIKey.objects.create(
user=self.user,
type=SYMMETRIC_JWT_TYPE,
key='user:12345:678',
secret=s)
self.addon = addon_factory(users=[self.user],
version_kw={'version': '0.1'},
file_kw={'is_webextension': True})
self.file = get_addon_file('webextension_containing_api_key.xpi')
def test_api_key_in_new_submission_is_found(self):
upload = FileUpload.objects.create(path=self.file, user=self.user)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 1
messages = upload.processed_validation['messages']
assert len(messages) == 1
assert messages[0]['id'] == [
u'validation', u'messages', u'api_key_detected']
assert ('Your developer API key was found in the submitted '
'file.' in messages[0]['message'])
assert not upload.valid
# If the key has been revoked, there is no active key,
# so `get_jwt_key` raises `DoesNotExist`.
with pytest.raises(APIKey.DoesNotExist):
APIKey.get_jwt_key(user_id=self.user.id)
assert len(mail.outbox) == 1
assert ('Your AMO API credentials have been revoked'
in mail.outbox[0].subject)
assert mail.outbox[0].to[0] == self.user.email
def test_api_key_in_submission_is_found(self):
upload = FileUpload.objects.create(path=self.file, addon=self.addon,
user=self.user)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 1
messages = upload.processed_validation['messages']
assert len(messages) == 1
assert messages[0]['id'] == [
u'validation', u'messages', u'api_key_detected']
assert ('Your developer API key was found in the submitted '
'file.' in messages[0]['message'])
assert not upload.valid
# If the key has been revoked, there is no active key,
# so `get_jwt_key` raises `DoesNotExist`.
with pytest.raises(APIKey.DoesNotExist):
APIKey.get_jwt_key(user_id=self.user.id)
assert len(mail.outbox) == 1
assert ('Your AMO API credentials have been revoked'
in mail.outbox[0].subject)
assert ('never share your credentials' in mail.outbox[0].body)
assert mail.outbox[0].to[0] == self.user.email
def test_coauthor_api_key_in_submission_is_found(self):
coauthor = user_factory()
AddonUser.objects.create(addon=self.addon, user_id=coauthor.id)
upload = FileUpload.objects.create(path=self.file, addon=self.addon,
user=coauthor)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 1
messages = upload.processed_validation['messages']
assert len(messages) == 1
assert messages[0]['id'] == [
u'validation', u'messages', u'api_key_detected']
assert ('The developer API key of a coauthor was found in the '
'submitted file.' in messages[0]['message'])
assert not upload.valid
# If the key has been revoked, there is no active key,
# so `get_jwt_key` raises `DoesNotExist`.
with pytest.raises(APIKey.DoesNotExist):
APIKey.get_jwt_key(user_id=self.user.id)
assert len(mail.outbox) == 1
assert ('Your AMO API credentials have been revoked'
in mail.outbox[0].subject)
assert ('never share your credentials' in mail.outbox[0].body)
# We submit as the coauthor, the leaked key is the one from 'self.user'
assert mail.outbox[0].to[0] == self.user.email
def test_api_key_already_revoked_by_developer(self):
self.key.update(is_active=None)
tasks.revoke_api_key(self.key.id)
# If the key has already been revoked, there is no active key,
# so `get_jwt_key` raises `DoesNotExist`.
with pytest.raises(APIKey.DoesNotExist):
APIKey.get_jwt_key(user_id=self.user.id)
def test_api_key_already_regenerated_by_developer(self):
self.key.update(is_active=None)
current_key = APIKey.new_jwt_credentials(user=self.user)
tasks.revoke_api_key(self.key.id)
key_from_db = APIKey.get_jwt_key(user_id=self.user.id)
assert current_key.key == key_from_db.key
assert current_key.secret == key_from_db.secret
def test_revoke_task_is_called(self):
mock_str = 'olympia.devhub.tasks.revoke_api_key'
wrapped = tasks.revoke_api_key
with mock.patch(mock_str, wraps=wrapped) as mock_revoke:
upload = FileUpload.objects.create(path=self.file, user=self.user)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
mock_revoke.apply_async.assert_called_with(
kwargs={'key_id': self.key.id}, countdown=120)
assert not upload.valid
def test_does_not_revoke_for_different_author(self):
different_author = user_factory()
upload = FileUpload.objects.create(path=self.file,
user=different_author)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 0
assert upload.valid
def test_does_not_revoke_safe_webextension(self):
file_ = get_addon_file('valid_webextension.xpi')
upload = FileUpload.objects.create(path=file_, user=self.user)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 0
assert upload.processed_validation['messages'] == []
assert upload.valid
def test_validation_finishes_if_containing_binary_content(self):
file_ = get_addon_file('webextension_containing_binary_files.xpi')
upload = FileUpload.objects.create(path=file_, user=self.user)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 0
assert upload.processed_validation['messages'] == []
assert upload.valid
def test_validation_finishes_if_containing_invalid_filename(self):
file_ = get_addon_file('invalid_webextension.xpi')
upload = FileUpload.objects.create(path=file_, user=self.user)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
# https://github.com/mozilla/addons-server/issues/8208
# causes this to be 1 (and invalid) instead of 0 (and valid).
# The invalid filename error is caught and raised outside of this
# validation task.
assert upload.processed_validation['errors'] == 1
assert not upload.valid
|
py | 7dfc3845fbea0e9a0c250656435995050b0d0677 | # -*- coding: utf-8 -*-
# Copyright (c) 2021, ifitwala and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestCourseSchedulingTool(unittest.TestCase):
pass
|
py | 7dfc38545f7be17f223d224babf5eabcac882592 | import re
import sys
import numpy
import pysam
from scipy.stats import scoreatpercentile, chisquare
def split_ranges(r):
if not r:
return r
p = re.compile('[:-]')
n = []
for part in r.split(","):
nums = [int(x) for x in p.split(part)]
for i in range(nums[0], nums[-1] + 1):
n.append(i)
return n
def process_groups(groups):
if not groups:
return None
pg = []
for group in groups.split(","):
ids = [int(x) for x in group.split(":")]
if len(ids) == 2:
pg.append(range(ids[0], ids[1] + 1))
else:
pg.append(ids)
return pg
def split_interval(interval):
chrom, coords = interval.split(":")
start, end = [int(x) for x in coords.replace(",", "").split("-")]
return chrom, start, end
def bam2numreads(bamfile):
result = pysam.idxstats(bamfile)
return numpy.sum([int(row.strip().split("\t")[2]) for row in result])
def _treesort(order, nodeorder, nodecounts, tree):
# From the Pycluster library, Michiel de Hoon
# Find the order of the nodes consistent with the hierarchical clustering
# tree, taking into account the preferred order of nodes.
nNodes = len(tree)
nElements = nNodes + 1
neworder = numpy.zeros(nElements)
clusterids = numpy.arange(nElements)
for i in range(nNodes):
i1 = tree[i].left
i2 = tree[i].right
if i1 < 0:
order1 = nodeorder[-i1 - 1]
count1 = nodecounts[-i1 - 1]
else:
order1 = order[i1]
count1 = 1
if i2 < 0:
order2 = nodeorder[-i2 - 1]
count2 = nodecounts[-i2 - 1]
else:
order2 = order[i2]
count2 = 1
# If order1 and order2 are equal, their order is determined
# by the order in which they were clustered
if i1 < i2:
if order1 < order2:
increase = count1
else:
increase = count2
for j in range(nElements):
clusterid = clusterids[j]
if clusterid == i1 and order1 >= order2:
neworder[j] += increase
if clusterid == i2 and order1 < order2:
neworder[j] += increase
if clusterid == i1 or clusterid == i2:
clusterids[j] = -i - 1
else:
if order1 <= order2:
increase = count1
else:
increase = count2
for j in range(nElements):
clusterid = clusterids[j]
if clusterid == i1 and order1 > order2:
neworder[j] += increase
if clusterid == i2 and order1 <= order2:
neworder[j] += increase
if clusterid == i1 or clusterid == i2:
clusterids[j] = -i - 1
return numpy.argsort(neworder)
def sort_tree(tree, order):
# Adapted from the Pycluster library, Michiel de Hoon
nnodes = len(tree)
nodeindex = 0
nodecounts = numpy.zeros(nnodes, int)
nodeorder = numpy.zeros(nnodes)
nodedist = numpy.array([node.distance for node in tree])
for nodeindex in range(nnodes):
min1 = tree[nodeindex].left
min2 = tree[nodeindex].right
if min1 < 0:
index1 = -min1 - 1
order1 = nodeorder[index1]
counts1 = nodecounts[index1]
nodedist[nodeindex] = max(nodedist[nodeindex], nodedist[index1])
else:
order1 = order[min1]
counts1 = 1
if min2 < 0:
index2 = -min2 - 1
order2 = nodeorder[index2]
counts2 = nodecounts[index2]
nodedist[nodeindex] = max(nodedist[nodeindex], nodedist[index2])
else:
order2 = order[min2]
counts2 = 1
counts = counts1 + counts2
nodecounts[nodeindex] = counts
nodeorder[nodeindex] = (counts1 * order1 + counts2 * order2) / counts
# Now set up order based on the tree structure
index = _treesort(order, nodeorder, nodecounts, tree)
return index
def normalize_data(data, percentile=75):
norm_data = {}
for track, ar in data.items():
flat = ar.flatten()
s = scoreatpercentile(flat[~numpy.isnan(flat)], percentile)
if s == 0:
sys.stderr.write(
"Error normalizing track {0} as score at percentile {1} is 0, normalizing to maximum value instead\n".format(
track, percentile))
x = ar / max(flat)
else:
x = ar / s
# x[x <= 0.5] = 0
x[x >= 1.0] = 1
norm_data[track] = x
return norm_data
def get_absolute_scale(scale, data, per_track=False):
try:
scale = float(scale)
return scale
except:
if type(scale) == type("") and scale.endswith("%"):
rel_scale = float(scale[:-1])
if per_track:
print "Hoe"
s = [scoreatpercentile(d, rel_scale) for d in data]
print s
return s
else:
d = numpy.array(data).flatten()
s = scoreatpercentile(d, rel_scale)
# Set the scale to the minimum non-zero value, otherwise
# the plot will show nothing
if s == 0:
try:
s = min(d[d > 0])
except:
s = 1.0
return s
def mirror_clusters(data, labels, cutoff=0.01):
"""
Merge mirrored profiles based on a chi2 test of the mean profiles
Only if the profile is mirrored over all data tracks
Returns the labels of the two matched mirrored tracks, if there is at least one match with a p-value
greater than the cutoff.
If not, return (None, None)
"""
n = len(set(labels))
if n == 1:
return (None, None)
mirror = dict([(i, {}) for i in range(n)])
for track in data.keys():
profiles = []
for i in range(n):
profiles.append(numpy.mean(data[track][labels == i], 0) + 1e-10)
for i in range(n - 1):
for j in range(i + 1, n):
p = chisquare(profiles[i], profiles[j][::-1])[1]
mirror[i].setdefault(j, []).append(p)
result = []
for i in mirror.keys():
for j in mirror[i].keys():
result.append([(i, j), mirror[i][j]])
for (i, j), ps in sorted(result, cmp=lambda a, b: cmp(numpy.mean(a[1]), numpy.mean(b[1])))[::-1]:
# print (i,j), ps, numpy.array(ps), cutoff
if (numpy.array(ps) >= cutoff).all():
return (i, j)
return (None, None)
|
py | 7dfc3874f45f281f61c9359d3050926e575eac08 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import sys
from tqdm import tqdm
import numpy as np
from scipy.special import softmax
import paddle
from paddle import inference
import paddlenlp as ppnlp
from paddlenlp.data import Stack, Tuple, Pad
from paddlenlp.datasets import load_dataset
from paddlenlp.utils.log import logger
sys.path.append('.')
from data import convert_example
# yapf: disable
parser = argparse.ArgumentParser()
parser.add_argument("--model_dir", type=str, required=True,
help="The directory to static model.")
parser.add_argument("--corpus_file", type=str, required=True,
help="The corpus_file path.")
parser.add_argument("--max_seq_length", default=64, type=int,
help="The maximum total input sequence length after tokenization. Sequences "
"longer than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--batch_size", default=32, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument('--device', choices=['cpu', 'gpu', 'xpu'], default="gpu",
help="Select which device to train model, defaults to gpu.")
parser.add_argument('--use_tensorrt', default=False, type=eval, choices=[True, False],
help='Enable to use tensorrt to speed up.')
parser.add_argument("--precision", default="fp32", type=str, choices=["fp32", "fp16", "int8"],
help='The tensorrt precision.')
parser.add_argument('--cpu_threads', default=10, type=int,
help='Number of threads to predict when using cpu.')
parser.add_argument('--enable_mkldnn', default=False, type=eval, choices=[True, False],
help='Enable to use mkldnn to speed up when using cpu.')
args = parser.parse_args()
# yapf: enable
class Predictor(object):
def __init__(self,
model_dir,
device="gpu",
max_seq_length=128,
batch_size=32,
use_tensorrt=False,
precision="fp32",
cpu_threads=10,
enable_mkldnn=False):
self.max_seq_length = max_seq_length
self.batch_size = batch_size
model_file = model_dir + "/inference.get_pooled_embedding.pdmodel"
params_file = model_dir + "/inference.get_pooled_embedding.pdiparams"
if not os.path.exists(model_file):
raise ValueError("not find model file path {}".format(model_file))
if not os.path.exists(params_file):
raise ValueError("not find params file path {}".format(params_file))
config = paddle.inference.Config(model_file, params_file)
if device == "gpu":
# set GPU configs accordingly
# such as intialize the gpu memory, enable tensorrt
config.enable_use_gpu(100, 0)
precision_map = {
"fp16": inference.PrecisionType.Half,
"fp32": inference.PrecisionType.Float32,
"int8": inference.PrecisionType.Int8
}
precision_mode = precision_map[precision]
if args.use_tensorrt:
config.enable_tensorrt_engine(max_batch_size=batch_size,
min_subgraph_size=30,
precision_mode=precision_mode)
elif device == "cpu":
# set CPU configs accordingly,
# such as enable_mkldnn, set_cpu_math_library_num_threads
config.disable_gpu()
if args.enable_mkldnn:
# cache 10 different shapes for mkldnn to avoid memory leak
config.set_mkldnn_cache_capacity(10)
config.enable_mkldnn()
config.set_cpu_math_library_num_threads(args.cpu_threads)
elif device == "xpu":
# set XPU configs accordingly
config.enable_xpu(100)
config.switch_use_feed_fetch_ops(False)
self.predictor = paddle.inference.create_predictor(config)
self.input_handles = [
self.predictor.get_input_handle(name)
for name in self.predictor.get_input_names()
]
self.output_handle = self.predictor.get_output_handle(
self.predictor.get_output_names()[0])
def predict(self, data, tokenizer):
"""
Predicts the data labels.
Args:
data (obj:`List(str)`): The batch data whose each element is a raw text.
tokenizer(obj:`PretrainedTokenizer`): This tokenizer inherits from :class:`~paddlenlp.transformers.PretrainedTokenizer`
which contains most of the methods. Users should refer to the superclass for more information regarding methods.
Returns:
results(obj:`dict`): All the predictions labels.
"""
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id), # input
Pad(axis=0, pad_val=tokenizer.pad_token_id), # segment
): fn(samples)
all_embeddings = []
examples = []
for idx, text in enumerate(tqdm(data)):
input_ids, segment_ids = convert_example(
text,
tokenizer,
max_seq_length=self.max_seq_length,
pad_to_max_seq_len=True)
examples.append((input_ids, segment_ids))
if (len(examples) > 100):
input_ids, segment_ids = batchify_fn(examples)
self.input_handles[0].copy_from_cpu(input_ids)
self.input_handles[1].copy_from_cpu(segment_ids)
self.predictor.run()
logits = self.output_handle.copy_to_cpu()
all_embeddings.append(logits)
examples = []
if (len(examples) > 0):
input_ids, segment_ids = batchify_fn(examples)
self.input_handles[0].copy_from_cpu(input_ids)
self.input_handles[1].copy_from_cpu(segment_ids)
self.predictor.run()
logits = self.output_handle.copy_to_cpu()
all_embeddings.append(logits)
all_embeddings = np.concatenate(all_embeddings, axis=0)
np.save('corpus_embedding', all_embeddings)
def read_text(file_path):
file = open(file_path)
id2corpus = {}
for idx, data in enumerate(file.readlines()):
id2corpus[idx] = data.strip()
return id2corpus
if __name__ == "__main__":
predictor = Predictor(args.model_dir, args.device, args.max_seq_length,
args.batch_size, args.use_tensorrt, args.precision,
args.cpu_threads, args.enable_mkldnn)
tokenizer = ppnlp.transformers.ErnieTokenizer.from_pretrained('ernie-1.0')
id2corpus = read_text(args.corpus_file)
corpus_list = [{idx: text} for idx, text in id2corpus.items()]
predictor.predict(corpus_list, tokenizer)
|
py | 7dfc3874fef22f7104c0b0677936af6ffbde73c4 | from strenum import StrEnum
class Config(StrEnum):
"""
Configuration for the application.
"""
ELASTICSEARCH_INDEX = "location_stats"
RAPID_API_REALTOR_HOST = "realty-in-ca1.p.rapidapi.com"
GOOGLE_MAPS_API_URL = "https://maps.googleapis.com/maps/api"
GOOGLE_GEO_FILTERING_COMPONENTS = "country:CA|locality:ON"
|
py | 7dfc38bffc56725efbbb1fc1dcbd9cda4789db7f | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def flatten(self, root: TreeNode) -> None:
"""
Do not return anything, modify root in-place instead.
"""
if not root: return None
if root.right: self.flatten(root.right)
if root.left:
self.flatten(root.left)
l, r = root.left, root.right
root.left, root.right = None, l
while l.right: l = l.right
l.right = r
|
py | 7dfc38d3c70b1f3acd26028fc134fbcf94ad8bbe | import math
import os
import random
import time
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import backend, callbacks
from tensorflow.python.keras.initializers import Constant
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import layers
from .weighted_loss import WeightedLoss
from ..base_model import BaseModel
from utils import tool
from tensorflow.python import train
from utils.auc import AUC # This can be replaced by tf.keras.AUC when tf version >=1.12
from tensorflow.python.keras.models import Model
from tensorflow.python.ops import nn
class UncertaintyWeight(object):
'''
Train any model in uncertainty_weight manner
'''
def __init__(self, base_model: BaseModel):
'''
:param base_model: any model inherited from the base_model
'''
self.base_model = base_model
self.add_weighted_loss()
def __getattr__(self, item):
'''
Delegate the base model
:param item:
:return:
'''
return getattr(self.base_model, item)
def add_weighted_loss(self):
y_true = layers.Input(shape=(1,), dtype=tf.float32, name='label')
user_id, item_id, domain_idx = self.model.inputs
y_pred = self.model.outputs[0]
y_pred = WeightedLoss(n_domains=self.n_domain)([y_true, y_pred, domain_idx])
model = Model(inputs=[user_id, item_id, domain_idx, y_true], outputs=y_pred)
model.summary()
# Optimization
if self.train_config['optimizer'] == 'adam':
opt = train.AdamOptimizer(learning_rate=self.train_config['learning_rate'])
else:
opt = self.train_config['optimizer']
model.compile(loss=None,
optimizer=opt,
metrics=[AUC(num_thresholds=500, name="AUC")])
model.metrics_names = [] # Must set to empty to remove bug
self.model = model
def train(self):
backend.get_session().run(tf.global_variables_initializer())
tensorboard_callback = callbacks.TensorBoard(log_dir=os.path.dirname(self.checkpoint_path),
histogram_freq=self.train_config['histogram_freq'],
write_grads=True)
data_iter = self.build_data_iter()
train_sequence = list(range(self.n_domain))
lock = False
for epoch in range(self.train_config['epoch']):
print("Epoch: {}".format(epoch), "-" * 30)
# Train
random.shuffle(train_sequence)
for idx in train_sequence:
d = self.dataset.train_dataset[idx]
print("Train on: Domain {}".format(idx))
old_time = time.time()
self.model.fit(d['data'], steps_per_epoch=d['n_step'], verbose=0, callbacks=[],
epochs=epoch + 1, initial_epoch=epoch)
print("Training time: ", time.time() - old_time)
# Val
print("Val Result: ")
avg_loss, avg_auc, domain_loss, domain_auc = self.val_and_test("val")
# Early Stopping
if self.early_stop_step(avg_auc):
break
# Test
print("Test Result: ")
test_avg_loss, test_avg_auc, test_domain_loss, test_domain_auc = self.val_and_test("test")
# Lock the graph for better performance
if not lock:
graph = tf.get_default_graph()
graph.finalize()
lock = True
def build_data_iter(self):
data_iter = {}
for idx, d in self.dataset.train_dataset.items():
train_iter = d['data'].make_initializable_iterator()
data_iter[idx] = {"train_iter": train_iter, "train_step": d['n_step']}
return data_iter
|
py | 7dfc38d67b7e8d9722ad9a9fcc8f4f5cf877a58c | """A hello world.
Uses the main libraries to verify the environment installation.
"""
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
import pymia.evaluation.evaluator as pymia_eval
import SimpleITK as sitk
import sklearn.ensemble as sk_ensemble
def main():
print(f'Welcome to MIALab {datetime.now().year}!')
# --- scikit-learn
sk_ensemble.RandomForestClassifier(max_depth=2, random_state=0)
# --- SimpleITK
image = sitk.Image(256, 128, 64, sitk.sitkInt16)
print('Image dimension:', image.GetDimension())
print('Voxel intensity before setting:', image.GetPixel(0, 0, 0))
image.SetPixel(0, 0, 0, 1)
print('Voxel intensity after setting:', image.GetPixel(0, 0, 0))
# --- numpy and matplotlib
array = np.array([1, 23, 2, 4])
plt.plot(array)
plt.ylabel('Some meaningful numbers')
plt.xlabel('The x-axis')
plt.title('Wohoo')
plt.show()
# --- pymia
pymia_eval.SegmentationEvaluator([], {})
print('Everything seems to work fine!')
if __name__ == "__main__":
"""The program's entry point."""
main()
|
py | 7dfc397eca463f482ab4845dab59b960e4b883ad | #from lib.pytube import YouTube
from lib.dewa import cari
from lib.anime import *
from lib.brainly import *
from lib.manga import *
from lib.resize import *
from lib.search import *
from lib.nulis import *
from urllib.parse import *
from flask import *
#from werkzeug.utils import *
from bs4 import BeautifulSoup as bs
from requests import get, post
import os, math, json, random, re, html_text, pytesseract, base64, time, smtplib
ua_ig = 'Mozilla/5.0 (Linux; Android 9; SM-A102U Build/PPR1.180610.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/74.0.3729.136 Mobile Safari/537.36 Instagram 155.0.0.37.107 Android (28/9; 320dpi; 720x1468; samsung; SM-A102U; a10e; exynos7885; en_US; 239490550)'
app = Flask(__name__)
apiKey = 'O8mUD3YrHIy9KM1fMRjamw8eg'
apiKey_ocr = '09731daace88957'
app.config['MEDIA'] = 'tts'
app.secret_key = b'BB,^z\x90\x88?\xcf\xbb'
#ALLOWED_EXTENSION = set(['png', 'jpeg', 'jpg'])
#app.config['Layer_Folder'] = 'layer'
def convert_size(size_bytes):
if size_bytes == 0:
return '0B'
size_name = ('B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB')
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return '%s %s' % (s, size_name[i])
#def allowed_file(filename):
# return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSION
@app.route('/tts/<path:filename>', methods=['GET','POST'])
def sendTts(filename):
return send_from_directory(app.config['MEDIA'], filename, as_attachment=True)
@app.route('/api/layer', methods=['GET','POST'])
def layer():
if request.args.get('base64image'):
try:
open('piw.jpg','w').write(request.args.get('base64image'))
os.system('base64 -i -d piw.jpg > paw.jpg')
hehe = resizeTo('paw.jpg')
huhu = layer(hehe, 'black')
os.system('base64 result.jpg > pow.jpg')
return {
'status': 200,
'result': '`data:image/jpg;base64,%s`' % open('pow.jpg').read()
}
except Exception as e:
print(e)
#os.remove('piw.jpg')
return {
'status': False,
'error': '[!] Invalid base64 image!'
}
else:
return {
'status': False,
'msg': '[!] Masukkan parameter base64image'
}
@app.route('/api/spamgmail', methods=['GET','POST'])
def spamgimel():
if request.args.get('target'):
if request.args.get('jum'):
abece = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
target_imel = request.args.get('target')
jumlah = int(request.args.get('jum'))
if jumlah > 10:
return {
'status': False,
'msg': '[!] Max 10 tod!'
}
try:
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.login('[email protected]', 'Barbar05')
hasil = ''
for i in range(jumlah):
mess = ''.join(random.choice(abece) for _ in range(4))
msg = f'From: {random.randint(1, 100)}<Hacker>\nSubject: Anonymous ~ Hacker\n{mess}'
server.sendmail('[email protected]', target_imel, msg)
hasil += '[!] Sukses\n'
server.quit()
return {
'status': 200,
'logs': hasil
}
except Exception as e:
print(e)
hasil = '[!] Gagal'
return {
'status': False,
'logs': hasil
}
else:
return {
'status': False,
'msg': 'Masukkan parameter jum'
}
else:
return {
'status': False,
'msg': 'Masukkan parameter target'
}
@app.route('/api/spamcall', methods=['GET','POST'])
def spamcall():
if request.args.get('no'):
no = request.args.get('no')
if str(no).startswith('8'):
hasil = ''
kyaa = post('https://id.jagreward.com/member/verify-mobile/%s' % no).json()
print(kyaa['message'])
if 'Anda akan menerima' in kyaa['message']:
hasil += '[!] Berhasil mengirim spam call ke nomor : 62%s' % no
else:
hasil += '[!] Gagal mengirim spam call ke nomor : 62%s' % no
return {
'status': 200,
'logs': hasil
}
else:
return {
'status': False,
'msg': '[!] Tolong masukkan nomor dengan awalan 8'
}
else:
return {
'status': False,
'msg': '[!] Masukkan parameter no'
}
@app.route('/api/spamsms', methods=['GET','POST'])
def spamming():
if request.args.get('no'):
if request.args.get('jum'):
no = request.args.get('no')
jum = int(request.args.get('jum'))
if jum > 20: return {
'status': 200,
'msg': '[!] Max 20 ganteng'
}
url = 'https://www.lpoint.co.id/app/member/ESYMBRJOTPSEND.do'
head = {'UserAgent': 'Mozilla/5.0 (Linux; Android 8.1.0; CPH1853) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Mobile Safari/537.36'}
data = {'pn': '',
'bird': '',
'webMbrId': '',
'webPwd': '',
'maFemDvC': '',
'cellNo': no,
'otpNo': '',
'seq': '',
'otpChk': 'N',
'count': ''
}
hasil = ''
for i in range(jum):
kyaa = post(url, headers=head, data=data).text
if 'error' in kyaa:
hasil += '[!] Gagal\n'
else:
hasil += '[!] Sukses\n'
return {
'status': 200,
'logs': hasil
}
else:
return {
'status': False,
'msg': '[!] Masukkin parameter jum juga ganteng'
}
else:
return {
'status': False,
'msg': '[!] Masukkan parameter no'
}
@app.route('/nulis', methods=['GET','POST'])
def noolees():
if request.args.get('text'):
try:
nulis = tulis(unquote(request.args.get('text')))
for i in nulis:
i.save('resolt.jpg')
return {
'status': 200,
'result': imageToBase64('resolt.jpg')
}
except:
return {
'status': False,
'error': 'Failed writing dude:('
}
else:
return {
'status': False,
'msg': '[!] Masukkan parameter text'
}
@app.route('/api/wiki', methods=['GET','POST'])
def wikipedia():
if request.args.get('q'):
try:
kya = request.args.get('q')
cih = f'https://id.wikipedia.org/w/api.php?format=json&action=query&prop=extracts&exintro&explaintext&redirects=1&titles={kya}'
heuh = get(cih).json()
heuh_ = heuh['query']['pages']
hueh = re.findall(r'(\d+)', str(heuh_))
result = heuh_[hueh[0]]['extract']
return {
'status': 200,
'result': result
}
except Exception as e:
print(e)
return {
'status': False,
'error': '[❗] Yang anda cari tidak bisa saya temukan di wikipedia!'
}
else:
return {
'status': False,
'msg': '[!] Masukkan param q'
}
@app.route('/api/tts', methods=['GET','POST'])
def tts():
if request.args.get('text'):
try:
teks = request.args.get('text')
print(teks)
if int(len(teks)) - int(len(teks.split(' '))) == 250:
return {
'status': 200,
'msg': '[❗] Maaf teks terlalu panjang!!',
}
else:
url = f'https://rest.farzain.com/api/tts.php?id={teks}&apikey='
if os.path.isfile('./tts/tts.mp3') == True:
os.remove('./tts/tts.mp3')
Tts = get(f'{url}{apiKey}').content
open('tts/tts.mp3','wb').write(Tts)
return {
'status': 200,
'msg': 'Success convert text to speech!',
'file': 'https://mhankbarbar/tts/tts.mp3'
}
else:
Tts = get(f'{url}{apiKey}').content
open('tts/tts.mp3','wb').write(Tts)
return {
'status': 200,
'msg': 'Success convert text to speech!',
'file': 'https://mhankbarbar.herokuapp.com/tts/tts.mp3'
}
except Exception as e:
print(e)
return {
'status': False,
'msg': '[!] Upss, terjadi kesalahan'
}
else:
return {
'status': 200,
'msg': '[!] Masukkan parameter text'
}
@app.route('/api/ytv', methods=['GET','POST'])
def ytv():
if request.args.get('url'):
try:
url = request.args.get('url').replace('[','').replace(']','')
ytv = post('https://www.y2mate.com/mates/en60/analyze/ajax',data={'url':url,'q_auto':'0','ajax':'1'}).json()
yaha = bs(ytv['result'], 'html.parser').findAll('td')
filesize = yaha[len(yaha)-23].text
id = re.findall('var k__id = "(.*?)"', ytv['result'])
thumb = bs(ytv['result'], 'html.parser').find('img')['src']
title = bs(ytv['result'], 'html.parser').find('b').text
dl_link = bs(post('https://www.y2mate.com/mates/en60/convert',data={'type':url.split('/')[2],'_id':id[0],'v_id':url.split('/')[3],'ajax':'1','token':'','ftype':'mp4','fquality':'360p'}).json()['result'],'html.parser').find('a')['href']
return {
'status': 200,
'title': title,
'thumb': thumb,
'result': dl_link,
'resolution': '360p',
'filesize': filesize,
'ext': 'mp4'
}
except Exception as e:
print('Error : %s ' % e)
return {
'status': False,
'error': '[❗] Terjadi kesalahan, mungkin link yang anda kirim tidak valid!'
}
else:
return {
'status': False,
'msg': 'Masukkan parameter url'
}
@app.route('/api/yta', methods=['GET','POST'])
def yta():
if request.args.get('url'):
try:
url = request.args.get('url').replace('[','').replace(']','')
yta = post('https://www.y2mate.com/mates/en60/analyze/ajax',data={'url':url,'q_auto':'0','ajax':'1'}).json()
yaha = bs(yta['result'], 'html.parser').findAll('td')
filesize = yaha[len(yaha)-10].text
id = re.findall('var k__id = "(.*?)"', yta['result'])
thumb = bs(yta['result'], 'html.parser').find('img')['src']
title = bs(yta['result'], 'html.parser').find('b').text
dl_link = bs(post('https://www.y2mate.com/mates/en60/convert',data={'type':url.split('/')[2],'_id':id[0],'v_id':url.split('/')[3],'ajax':'1','token':'','ftype':'mp3','fquality':'128'}).json()['result'],'html.parser').find('a')['href']
return {
'status': 200,
'title': title,
'thumb': thumb,
'filesize': filesize,
'result': dl_link,
'ext': 'mp3'
}
except Exception as e:
print('Error : %s' % e)
return {
'status': False,
'error': '[❗] Terjadi kesalahan mungkin link yang anda kirim tidak valid!'
}
else:
return {
'status': False,
'msg': '[!] Masukkan parameter url'
}
@app.route('/api/chord', methods=['GET','POST'])
def chord():
if request.args.get('q'):
try:
q = request.args.get('q').replace(' ','+')
id = get('http://app.chordindonesia.com/?json=get_search_results&exclude=date,modified,attachments,comment_count,comment_status,thumbnail,thumbnail_images,author,excerpt,content,categories,tags,comments,custom_fields&search=%s' % q).json()['posts'][0]['id']
chord = get('http://app.chordindonesia.com/?json=get_post&id=%s' % id).json()
result = html_text.parse_html(chord['post']['content']).text_content()
return {
'status': 200,
'result': result
}
except Exception as e:
print(e)
return {
'status': False,
'error': '[❗] Maaf chord yang anda cari tidak dapat saya temukan!'
}
else:
return {
'status': False,
'msg': '[!] Masukkan parameter q'
}
@app.route('/api/dewabatch', methods=['GET','POST'])
def dewabatch():
if request.args.get('q'):
try:
q = request.args.get('q')
he=search_dewabatch(quote(q))
dewabatch=cari(he)
if he != '':
return {
'status': 200,
'sinopsis': dewabatch['result'],
'thumb': dewabatch['cover'],
'result': dewabatch['info']
}
except Exception as e:
print(e)
return {
'status': False,
'error': 'Anime %s Tidak di temukan!' % unquote(q)
}
else:
return {
'status': False,
'msg': '[!] Masukkan parameter q'
}
@app.route('/api/komiku', methods=['GET','POST'])
def komiku():
if request.args.get('q'):
try:
q = request.args.get('q')
komi = search_komiku(q)
if 'Tidak di temukan' not in komi:
manga = scrap_komiku(komi)
return {
'status': 200,
'info': manga['info'],
'genre': manga['genre'],
'sinopsis': manga['sinopsis'],
'thumb': manga['thumb'],
'link_dl': manga['dl_link']
}
except Exception as e:
print(e)
return {
'status': False,
'error': 'Manga %s Tidak di temukan' % unquote(q)
}
else:
return {
'status': False,
'msg': '[!] Masukkan parameter q'
}
@app.route('/api/kuso', methods=['GET','POST'])
def kusonime():
if request.args.get('q'):
try:
q = request.args.get('q')
he=search_kusonime(quote(q))
kuso=scrap_kusonime(he)
if he != '':
return {
'status': 200,
'sinopsis': kuso['sinopsis'],
'thumb': kuso['thumb'],
'info': kuso['info'],
'title': kuso['title'],
'link_dl': kuso['link_dl']
}
except Exception as e:
print(e)
return {
'status': False,
'error': 'Anime %s Tidak di temukan' % unquote(q)
}
else:
return {
'status': False,
'msg': '[!] Masukkan parameter q'
}
@app.route('/api/otakudesu')
def otakudesuu():
if request.args.get('q'):
try:
q = request.args.get('q')
he=search_otakudesu(quote(q))
if he != '':
otaku=scrap_otakudesu(he)
return {
'status': 200,
'sinopsis': otaku['sinopsis'],
'thumb': otaku['thumb'],
'info': otaku['info'],
'title': otaku['title']
}
except Exception as e:
print(e)
return {
'status': False,
'error': 'Anime %s Tidak di temukan' % unquote(q)
}
else:
return {
'status': False,
'msg': '[!] Masukkan parameter q'
}
@app.route('/api/brainly', methods=['GET','POST'])
def brainly_scraper():
if request.args.get('q'):
try:
query = request.args.get('q')
br=brainly(gsearch('"%s" site:brainly.co.id' % quote(query), lang='id')[0])
return {
'status': 200,
'result': br
}
except Exception as e:
print(e)
return {
'status': False,
'error': '[❗] Pertanyaan %s tidak dapat saya temukan di brainly' % unquote(query)
}
else:
return {
'status': False,
'msg': '[!] Masukkan parameter q'
}
@app.route('/api/nekonime', methods=['GET','POST'])
def nekonimek():
try:
neko = get('https://waifu.pics/api/sfw/neko').json()
nimek = neko['url']
return {
'status': 200,
'result': nimek
}
except:
neko = get('https://waifu.pics/api/sfw/neko').json()
nimek = neko['url']
return {
'status': 200,
'result': nimek
}
@app.route('/api/randomloli', methods=['GET','POST'])
def randomloli():
try:
hehe = ['kawaii','neko']
loli = get('https://api.lolis.life/%s' % random.choice(hehe)).json()['url']
return {
'status': 200,
'result': loli
}
except:
return {
'status': 200,
'result': loli
}
@app.route('/api/ig', methods=['GET','POST'])
def igeh():
if request.args.get('url'):
try:
url = request.args.get('url')
data = {'id': url}
result = get('https://www.villahollanda.com/api.php?url=' + url).json()
if result['descriptionc'] == None:
return {
'status': False,
'result': 'https://c4.wallpaperflare.com/wallpaper/976/117/318/anime-girls-404-not-found-glowing-eyes-girls-frontline-wallpaper-preview.jpg',
}
else:
return {
'status': 200,
'result': result['descriptionc'],
}
except Exception as e:
print(e)
return {
'status': False,
'result': 'https://c4.wallpaperflare.com/wallpaper/976/117/318/anime-girls-404-not-found-glowing-eyes-girls-frontline-wallpaper-preview.jpg',
'error': True
}
else:
return {
'status': False,
'msg': '[!] Masukkan parameter url'
}
@app.route('/api/cuaca', methods=['GET','POST'])
def cuaca():
if request.args.get('q'):
try:
q = request.args.get('q')
print(q)
url = f'https://rest.farzain.com/api/cuaca.php?id={q}&apikey='
weather = get(f'{url}{apiKey}').json()
print(weather)
if weather['respon']['deskripsi'] == 'null' or weather['respon']['deskripsi'] == None:
return {
'status': 404,
'error': '[❗] Gagal mengambil informasi cuaca, mungkin tempat tidak terdaftar/salah!'
}
else:
return {
'status': 200,
'result': {
'tempat': weather['respon']['tempat'],
'cuaca': weather['respon']['cuaca'],
'desk': weather['respon']['deskripsi'],
'suhu': weather['respon']['suhu'],
'kelembapan': weather['respon']['kelembapan'],
'udara': weather['respon']['udara'],
'angin': weather['respon']['angin']
},
'creator': 'Mhank BarBar'
}
except Exception as e:
print('Error : %s' % e)
return {
'status': False,
'msg': '[❗] Gagal mengambil informasi cuaca, mungkin tempat tidak terdaftar/salah!'
}
else:
return {
'status': False,
'msg': '[!] Masukkan parameter q'
}
@app.route('/api/stalk', methods=['GET','POST'])
def stalk():
if request.args.get('username'):
try:
username = request.args.get('username').replace('@','')
igestalk = bs(get('https://www.mystalk.net/profile/%s' % username, headers={'User-Agent':'Mozilla/5.0 (Linux; Android 8.1.0; CPH1909) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.101 Mobile Safari/537.36'}).text, 'html.parser').find('div', class_='user-profile-area')
igestalk_ = igestalk.findAll('span')
thumb = igestalk.find('img')['src']
return {
'status': 200,
'Name': igestalk_[0].text.strip(),
'Username': igestalk_[1].text.strip(),
'Jumlah_Post': igestalk_[2].text.replace('\n',' ').strip(),
'Jumlah_Followers': igestalk_[3].text.replace('\n',' ').strip(),
'Jumlah_Following': igestalk_[4].text.replace('\n',' ').strip(),
'Biodata': igestalk.find('p').text.strip(),
'Profile_pic': thumb
}
except Exception as e:
print(e)
return {
'status': False,
'error': '[❗] Username salah!!'
}
else:
return {
'status': False,
'msg': '[!] Masukkan parameter username'
}
@app.route('/daerah', methods=['GET','POST'])
def daerah():
daerah = 'Andreas, Ambon, Amlapura, Alford, Argamakmur, Atambua, Babo, Bagan Siapiapi, Central Kalimantan, Birmingham, Samosir, Balikpapan, Banda Aceh, Bandar Lampung, Bandung, Bangkalan, Cianjur, Bangko, Bangli, Banjar, Banjar Baru, Banjarmasin, Corn, BANTAENG , Banten, Bantul, Banyuwangi, Barabai, Barito, Barru, Batam, Batang, Batu, Baturaja, Batusangkar, Baubau, Bekasi, Bengkalis, Bengkulu, Benteng, Biak, Bima, Binjai, Bireuen, Bitung, Blitar, Blora, Bogor, Bojonegoro , Bondowoso, Bontang, Boyolali, Brebes, Bukit Tinggi, Maluku, Bulukumba, Buntok, Cepu, Ciamis, Cianjur, Cibinong, Cilacap, Cilegon, Cimahi, Cirebon, Curup, Demak, Denpasar, Depok, Dili, Dompu, Donggala, Dumai, Ende, Enggano, Enrekang, Fakfak, Garut, Gianyar, Gombong, Gorontalo, Gresik, Gunung Sitoli, Indramayu, Jakarta Barat, Jakarta Pusat, Jakarta Selatan, Jakarta Timur, Jakarta Utara, Jambi,Jayapura, Jember, Jeneponto, Jepara, Jombang, Kabanjahe, Kalabahi, Kalianda, Kandangan, Karanganyar, Karawang, Kasungan, Kayuagung, Kebumen, Kediri, Kefamenanu, Kendal, Kendari, Kertosono, Ketapang, Kisaran, Klaten, Kolaka, Kota Baru Pulau Laut , Bumi Bumi, Kota Jantho, Kotamobagu, Kuala Kapuas, Kuala Kurun, Kuala Pembuang, Kuala Tungkal, Kudus, Kuningan, Kupang, Kutacane, Kutoarjo, Labuhan, Lahat, Lamongan, Langsa, Larantuka, Lawang, Lhoseumawe, Limboto, Lubuk Basung, Lubuk Linggau, Lubuk Pakam, Lubuk Sikaping, Lumajang, Luwuk, Madiun, Magelang, Magetan, Majalengka, Majene, Makale, Makassar, Malang, Mamuju, Manna, Manokwari, Marabahan, Maros, Martapura Kalsel, Sulsel, Masohi, Mataram, Maumere, Medan, Mempawah, Menado, Mentok, Merauke, Metro, Meulaboh, Mojokerto, Muara Bulian, Muara Bungo, Muara Enim, Muara Teweh, Muaro Sijunjung, Muntilan, Nabire,Negara, Nganjuk, Ngawi, Nunukan, Pacitan, Padang, Padang Panjang, Padang Sidempuan, Pagaralam, Painan, Palangkaraya, Palembang, Palopo, Palu, Pamekasan, Pandeglang, Pangka_, Pangkajene Sidenreng, Pangkalan Bun, Pangkalpinang, Panyabungan, Par_, Parepare, Pariaman, Pasuruan, Pati, Payakumbuh, Pekalongan, Pekan Baru, Pemalang, Pematangsiantar, Pendopo, Pinrang, Pleihari, Polewali, Pondok Gede, Ponorogo, Pontianak, Poso, Prabumulih, Praya, Probolinggo, Purbalingga, Purukcahu, Purwakarta, Purwodadigrobogan, Purwarta Purworejo, Putussibau, Raha, Rangkasbitung, Rantau, Rantauprapat, Rantepao, Rembang, Rengat, Ruteng, Sabang, Salatiga, Samarinda, Kalbar, Sampang, Sampit, Sanggau, Sawahlunto, Sekayu, Selong, Semarang, Sengkang, Serang, Serui, Sibolga, Sidikalang, Sidoarjo, Sigli, Singaparna, Singaraja, Singkawang, Sinjai, Sintang, Situbondo, Slawi,Sleman, Soasiu, Soe, Solo, Solok, Soreang, Sorong, Sragen, Stabat, Subang, Sukabumi, Sukoharjo, Sumbawa Besar, Sumedang, Sumenep, Sungai Liat, Sungai Penuh, Sungguminasa, Surabaya, Surakarta, Tabanan, Tahuna, Takalar, Takengon , Tamiang Layang, Tanah Grogot, Tangerang, Tanjung Balai, Tanjung Enim, Tanjung Pandan, Tanjung Pinang, Tanjung Redep, Tanjung Selor, Tapak Tuan, Tarakan, Tarutung, Tasikmalaya, Tebing Tinggi, Tegal, Temanggung, Tembilahan, Tenggarong, Ternate, Tolitoli , Tondano, Trenggalek, Tual, Tuban, Tulung Agung, Ujung Berung, Ungaran, Waikabubak, Waingapu, Wamena, Watampone, Watansoppeng, Wates, Wonogiri, Wonosari, Wonosobo, YogyakartaTakalar, Takengon, Tamiang Layang, Tanah Grogot, Tangerang, Tanjung Balai, Tanjung Enim, Tanjung Pandan, Tanjung Pinang, Tanjung Redep, Tanjung Selor, Tapak Tuan, Tarakan, Tarutung, Tasikmalaya, Tebing Tinggi, Tegal, Temanggung, Tembilahan, Tenggarong, Ternate, Tolitoli, Tondano, Trenggalek, Tual, Tuban, Tulung Agung, Ujung Berung, Ungaran, Waikabubak, Waingapu, Wamena, Watampone, Watansoppeng, Wates, Wonogiri, Wonosari, Wonosobo, YogyakartaTakalar, Takengon, Tamiang Layang, Tanah Grogot, Tangerang, Tanjung Balai, Tanjung Enim, Tanjung Pandan, Tanjung Pinang, Tanjung Redep, Tanjung Selor, Tapak Tuan, Tarakan, Tarutung, Tasikmalaya, Tebing Tinggi, Tegal, Temanggung, Tembilahan, Tenggarong, Ternate, Tolitoli, Tondano, Trenggalek, Tual, Tuban, Tulung Agung, Ujung Berung, Ungaran, Waikabubak, Waingapu, Wamena, Watampone, Watansoppeng, Wates, Wonogiri, Wonosari, Wonosobo, YogyakartaWonogiri, Wonosari, Wonosobo, YogyakartaWonogiri, Wonosari, Wonosobo, Yogyakarta'
no = 1
hasil = ''
for i in daerah.split(','):
hasil += '%s. %s\n' % (no, i)
no += 1
return {
'status': 200,
'result': hasil
}
@app.route('/api/jadwalshalat', methods=['GET','POST'])
def jadwalshalat():
if request.args.get('daerah'):
try:
daer = request.args.get('daerah')
daerah = 'Ambarawa, Ambon, Amlapura, Amuntai, Argamakmur, Atambua, Babo, Bagan Siapiapi, Kalteng, Bajawa, Balige, Balikpapan, Banda Aceh, Bandarlampung, Bandung, Bangkalan, Bangkinang, Bangko, Bangli, Banjar, Banjar Baru, Banjarmasin, Banjarnegara, Bantaeng, Banten, Bantul, Banyuwangi, Barabai, Barito, Barru, Batam, Batang, Batu, Baturaja, Batusangkar, Baubau, Bekasi, Bengkalis, Bengkulu, Benteng, Biak, Bima, Binjai, Bireuen, Bitung, Blitar, Blora, Bogor, Bojonegoro, Bondowoso, Bontang, Boyolali, Brebes, Bukit Tinggi, Maluku, Bulukumba, Buntok, Cepu, Ciamis, Cianjur, Cibinong, Cilacap, Cilegon, Cimahi, Cirebon, Curup, Demak, Denpasar, Depok, Dili, Dompu, Donggala, Dumai, Ende, Enggano, Enrekang, Fakfak, Garut, Gianyar, Gombong, Gorontalo, Gresik, Gunung Sitoli, Indramayu, Jakarta Barat, Jakarta Pusat, Jakarta Selatan, Jakarta Timur, Jakarta Utara, Jambi, Jayapura, Jember, Jeneponto, Jepara, Jombang, Kabanjahe, Kalabahi, Kalianda, Kandangan, Karanganyar, Karawang, Kasungan, Kayuagung, Kebumen, Kediri, Kefamenanu, Kendal, Kendari, Kertosono, Ketapang, Kisaran, Klaten, Kolaka, Kota Baru Pulau Laut, Kota Bumi, Kota Jantho, Kotamobagu, Kuala Kapuas, Kuala Kurun, Kuala Pembuang, Kuala Tungkal, Kudus, Kuningan, Kupang, Kutacane, Kutoarjo, Labuhan, Lahat, Lamongan, Langsa, Larantuka, Lawang, Lhoseumawe, Limboto, Lubuk Basung, Lubuk Linggau, Lubuk Pakam, Lubuk Sikaping, Lumajang, Luwuk, Madiun, Magelang, Magetan, Majalengka, Majene, Makale, Makassar, Malang, Mamuju, Manna, Manokwari, Marabahan, Maros, Martapura Kalsel, Sulsel, Masohi, Mataram, Maumere, Medan, Mempawah, Menado, Mentok, Merauke, Metro, Meulaboh, Mojokerto, Muara Bulian, Muara Bungo, Muara Enim, Muara Teweh, Muaro Sijunjung, Muntilan, Nabire, Negara, Nganjuk, Ngawi, Nunukan, Pacitan, Padang, Padang Panjang, Padang Sidempuan, Pagaralam, Painan, Palangkaraya, Palembang, Palopo, Palu, Pamekasan, Pandeglang, Pangka_, Pangkajene Sidenreng, Pangkalan Bun, Pangkalpinang, Panyabungan, Par_, Parepare, Pariaman, Pasuruan, Pati, Payakumbuh, Pekalongan, Pekan Baru, Pemalang, Pematangsiantar, Pendopo, Pinrang, Pleihari, Polewali, Pondok Gede, Ponorogo, Pontianak, Poso, Prabumulih, Praya, Probolinggo, Purbalingga, Purukcahu, Purwakarta, Purwodadigrobogan, Purwokerto, Purworejo, Putussibau, Raha, Rangkasbitung, Rantau, Rantauprapat, Rantepao, Rembang, Rengat, Ruteng, Sabang, Salatiga, Samarinda, Kalbar, Sampang, Sampit, Sanggau, Sawahlunto, Sekayu, Selong, Semarang, Sengkang, Serang, Serui, Sibolga, Sidikalang, Sidoarjo, Sigli, Singaparna, Singaraja, Singkawang, Sinjai, Sintang, Situbondo, Slawi, Sleman, Soasiu, Soe, Solo, Solok, Soreang, Sorong, Sragen, Stabat, Subang, Sukabumi, Sukoharjo, Sumbawa Besar, Sumedang, Sumenep, Sungai Liat, Sungai Penuh, Sungguminasa, Surabaya, Surakarta, Tabanan, Tahuna, Takalar, Takengon, Tamiang Layang, Tanah Grogot, Tangerang, Tanjung Balai, Tanjung Enim, Tanjung Pandan, Tanjung Pinang, Tanjung Redep, Tanjung Selor, Tapak Tuan, Tarakan, Tarutung, Tasikmalaya, Tebing Tinggi, Tegal, Temanggung, Tembilahan, Tenggarong, Ternate, Tolitoli, Tondano, Trenggalek, Tual, Tuban, Tulung Agung, Ujung Berung, Ungaran, Waikabubak, Waingapu, Wamena, Watampone, Watansoppeng, Wates, Wonogiri, Wonosari, Wonosobo, Yogyakarta'
url = f'https://api.haipbis.xyz/jadwalsholat?daerah={daer}'
jadwal = get(url).json()
return {
'Imsyak': jadwal['Imsyak'],
'Subuh': jadwal['Subuh'],
'Dhuha': jadwal['Dhuha'],
'Dzuhur': jadwal['Dzuhur'],
'Ashar': jadwal['Ashar'],
'Maghrib': jadwal['Maghrib'],
'Isya': jadwal['Isya']
}
except:
return {
'status': False,
'error': '[❗] Daerah yang tersedia hanya : %s' % daerah
}
else:
return {
'status': False,
'msg': '[!] Masukkan parameter daerah'
}
@app.route('/api/waifu', methods=['GET','POST'])
def waifu():
scrap = bs(get('https://mywaifulist.moe/random').text, 'html.parser')
a = json.loads(scrap.find('script', attrs={'type':'application/ld+json'}).string)
desc = bs(get(a['url']).text, 'html.parser').find('meta', attrs={'property':'og:description'}).attrs['content']
result = json.loads(bs(get(a['url']).text, 'html.parser').find('script', attrs={'type':'application/ld+json'}).string)
if result['gender'] == 'female':
return {
'status': 200,
'name': result['name'],
'desc': desc,
'image': result['image'],
'source': result['url']
}
else:
return {
'status': 200,
'name': '%s (husbu)' % result['name'],
'desc': desc,
'image': result['image'],
'source': result['url']
}
@app.route('/api/infogempa', methods=['GET','POST'])
def infogempa():
be = bs(get('https://www.bmkg.go.id/').text, 'html.parser').find('div', class_="col-md-4 md-margin-bottom-10")
em = be.findAll('li')
img = be.find('a')['href']
return {
'status': 200,
'map': img,
'waktu': em[0].text,
'magnitude': em[1].text,
'kedalaman': em[2].text,
'koordinat': em[3].text,
'lokasi': em[4].text,
'potensi': em[5].text
}
@app.route('/api/randomquotes', methods=['GET','POST'])
def quotes():
quotes_file = json.loads(open('quotes.json').read())
result = random.choice(quotes_file)
print(result)
return {
'status': 200,
'author': result['author'],
'quotes': result['quotes']
}
@app.route('/api/quotesnime/random', methods=['GET','POST'])
def quotesnimerandom():
quotesnime = get('https://h4ck3rs404-api.herokuapp.com/api/animequote?apikey=404Api').json()['data'][0]
print(quotesnime)
return {
'status': 200,
'data': {
'anime': quotesnime['anime'],
'character': quotesnime['chara'],
'quote': quotesnime['quote'],
}
}
@app.route('/api', methods=['GET','POST'])
def api():
return render_template('api.html')
@app.route('/', methods=['GET','POST'])
def index():
return render_template('index.html')
@app.errorhandler(404)
def error(e):
return render_template('error.html'), 404
if __name__ == '__main__':
app.run(host='0.0.0.0', port=int(os.environ.get('PORT','5000')),debug=True)
|
py | 7dfc39e6cb3a7de3afd5d7a94d2a7e84167b538e | import base64
import hashlib
import os
import random
import sys
import time
from datetime import datetime, timedelta
try:
import cPickle as pickle
except ImportError:
import pickle
from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from django.utils.crypto import constant_time_compare, salted_hmac
# Use the system (hardware-based) random number generator if it exists.
if hasattr(random, 'SystemRandom'):
randrange = random.SystemRandom().randrange
else:
randrange = random.randrange
MAX_SESSION_KEY = 18446744073709551616L # 2 << 63
class CreateError(Exception):
"""
Used internally as a consistent exception type to catch from save (see the
docstring for SessionBase.save() for details).
"""
pass
class SessionBase(object):
"""
Base class for all Session classes.
"""
TEST_COOKIE_NAME = 'testcookie'
TEST_COOKIE_VALUE = 'worked'
def __init__(self, session_key=None):
self._session_key = session_key
self.accessed = False
self.modified = False
def __contains__(self, key):
return key in self._session
def __getitem__(self, key):
return self._session[key]
def __setitem__(self, key, value):
self._session[key] = value
self.modified = True
def __delitem__(self, key):
del self._session[key]
self.modified = True
def keys(self):
return self._session.keys()
def items(self):
return self._session.items()
def get(self, key, default=None):
return self._session.get(key, default)
def pop(self, key, *args):
self.modified = self.modified or key in self._session
return self._session.pop(key, *args)
def setdefault(self, key, value):
if key in self._session:
return self._session[key]
else:
self.modified = True
self._session[key] = value
return value
def set_test_cookie(self):
self[self.TEST_COOKIE_NAME] = self.TEST_COOKIE_VALUE
def test_cookie_worked(self):
return self.get(self.TEST_COOKIE_NAME) == self.TEST_COOKIE_VALUE
def delete_test_cookie(self):
del self[self.TEST_COOKIE_NAME]
def _hash(self, value):
key_salt = "django.contrib.sessions" + self.__class__.__name__
return salted_hmac(key_salt, value).hexdigest()
def encode(self, session_dict):
"Returns the given session dictionary pickled and encoded as a string."
pickled = pickle.dumps(session_dict, pickle.HIGHEST_PROTOCOL)
hash = self._hash(pickled)
return base64.encodestring(hash + ":" + pickled)
def decode(self, session_data):
encoded_data = base64.decodestring(session_data)
try:
# could produce ValueError if there is no ':'
hash, pickled = encoded_data.split(':', 1)
expected_hash = self._hash(pickled)
if not constant_time_compare(hash, expected_hash):
raise SuspiciousOperation("Session data corrupted")
else:
return pickle.loads(pickled)
except Exception:
# ValueError, SuspiciousOperation, unpickling exceptions
# Fall back to Django 1.2 method
# PendingDeprecationWarning <- here to remind us to
# remove this fallback in Django 1.5
try:
return self._decode_old(session_data)
except Exception:
# Unpickling can cause a variety of exceptions. If something happens,
# just return an empty dictionary (an empty session).
return {}
def _decode_old(self, session_data):
encoded_data = base64.decodestring(session_data)
pickled, tamper_check = encoded_data[:-32], encoded_data[-32:]
if not constant_time_compare(hashlib.md5(pickled + settings.SECRET_KEY).hexdigest(),
tamper_check):
raise SuspiciousOperation("User tampered with session cookie.")
return pickle.loads(pickled)
def update(self, dict_):
self._session.update(dict_)
self.modified = True
def has_key(self, key):
return self._session.has_key(key)
def values(self):
return self._session.values()
def iterkeys(self):
return self._session.iterkeys()
def itervalues(self):
return self._session.itervalues()
def iteritems(self):
return self._session.iteritems()
def clear(self):
# To avoid unnecessary persistent storage accesses, we set up the
# internals directly (loading data wastes time, since we are going to
# set it to an empty dict anyway).
self._session_cache = {}
self.accessed = True
self.modified = True
def _get_new_session_key(self):
"Returns session key that isn't being used."
# The random module is seeded when this Apache child is created.
# Use settings.SECRET_KEY as added salt.
try:
pid = os.getpid()
except AttributeError:
# No getpid() in Jython, for example
pid = 1
while 1:
session_key = hashlib.md5("%s%s%s%s"
% (randrange(0, MAX_SESSION_KEY), pid, time.time(),
settings.SECRET_KEY)).hexdigest()
if not self.exists(session_key):
break
return session_key
def _get_session_key(self):
if self._session_key:
return self._session_key
else:
self._session_key = self._get_new_session_key()
return self._session_key
def _set_session_key(self, session_key):
self._session_key = session_key
session_key = property(_get_session_key, _set_session_key)
def _get_session(self, no_load=False):
"""
Lazily loads session from storage (unless "no_load" is True, when only
an empty dict is stored) and stores it in the current instance.
"""
self.accessed = True
try:
return self._session_cache
except AttributeError:
if self._session_key is None or no_load:
self._session_cache = {}
else:
self._session_cache = self.load()
return self._session_cache
_session = property(_get_session)
def get_expiry_age(self):
"""Get the number of seconds until the session expires."""
expiry = self.get('_session_expiry')
if not expiry: # Checks both None and 0 cases
return settings.SESSION_COOKIE_AGE
if not isinstance(expiry, datetime):
return expiry
delta = expiry - datetime.now()
return delta.days * 86400 + delta.seconds
def get_expiry_date(self):
"""Get session the expiry date (as a datetime object)."""
expiry = self.get('_session_expiry')
if isinstance(expiry, datetime):
return expiry
if not expiry: # Checks both None and 0 cases
expiry = settings.SESSION_COOKIE_AGE
return datetime.now() + timedelta(seconds=expiry)
def set_expiry(self, value):
"""
Sets a custom expiration for the session. ``value`` can be an integer,
a Python ``datetime`` or ``timedelta`` object or ``None``.
If ``value`` is an integer, the session will expire after that many
seconds of inactivity. If set to ``0`` then the session will expire on
browser close.
If ``value`` is a ``datetime`` or ``timedelta`` object, the session
will expire at that specific future time.
If ``value`` is ``None``, the session uses the global session expiry
policy.
"""
if value is None:
# Remove any custom expiration for this session.
try:
del self['_session_expiry']
except KeyError:
pass
return
if isinstance(value, timedelta):
value = datetime.now() + value
self['_session_expiry'] = value
def get_expire_at_browser_close(self):
"""
Returns ``True`` if the session is set to expire when the browser
closes, and ``False`` if there's an expiry date. Use
``get_expiry_date()`` or ``get_expiry_age()`` to find the actual expiry
date/age, if there is one.
"""
if self.get('_session_expiry') is None:
return settings.SESSION_EXPIRE_AT_BROWSER_CLOSE
return self.get('_session_expiry') == 0
def flush(self):
"""
Removes the current session data from the database and regenerates the
key.
"""
self.clear()
self.delete()
self.create()
def cycle_key(self):
"""
Creates a new session key, whilst retaining the current session data.
"""
data = self._session_cache
key = self.session_key
self.create()
self._session_cache = data
self.delete(key)
# Methods that child classes must implement.
def exists(self, session_key):
"""
Returns True if the given session_key already exists.
"""
raise NotImplementedError
def create(self):
"""
Creates a new session instance. Guaranteed to create a new object with
a unique key and will have saved the result once (with empty data)
before the method returns.
"""
raise NotImplementedError
def save(self, must_create=False):
"""
Saves the session data. If 'must_create' is True, a new session object
is created (otherwise a CreateError exception is raised). Otherwise,
save() can update an existing object with the same key.
"""
raise NotImplementedError
def delete(self, session_key=None):
"""
Deletes the session data under this key. If the key is None, the
current session key value is used.
"""
raise NotImplementedError
def load(self):
"""
Loads the session data and returns a dictionary.
"""
raise NotImplementedError
|
py | 7dfc39e89f03a42c3ba906f0fbaf87f7072b41a7 | import math
import os
import random
import shutil
from pathlib import Path
import cv2
import numpy as np
import torch
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from build_utils.utils import xyxy2xywh, xywh2xyxy
help_url = 'https://github.com/ultralytics/yolov3/wiki/Train-Custom-Data'
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
# get orientation in exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == "Orientation":
break
def exif_size(img):
"""
get the original img size
check if the image is rotated by exif, return it to the size before rotation if it is rotated
:param img: PIL image
:return: the size of original image
"""
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotate 270 counter-clockwise
s = (s[1], s[0])
elif rotation == 8: # rotate 90 counter-clockwise
s = (s[1], s[0])
except:
# skip over if there is no rotation information
pass
return s
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self,
path, #point to train_data.txt or val_data.txt
# set the image size after pre-processing
# when deal with training images, this is the biggest size (if multi-scale training is on)
# when deal with evaluation images, this is the final size used
img_size=480, # originally 416
batch_size=16,
augment=False, # True for training dara (augment_hsv),False for val data
hyp=None, # hyperparameter dictionary, including the hyperparameters for image augmentation
rect=False, # whether use rectangular training or not
cache_images=True, #originally False # whether the image is loaded in RAM
single_cls=False, pad=0.0, rank=-1):
try:
path = str(Path(path))
# parent = str(Path(path).parent) + os.sep
if os.path.isfile(path): # file
# read train/val_data.txt file line by line
with open(path, "r") as f:
f = f.read().splitlines()
else:
raise Exception("%s does not exist" % path)
# check if every image's file type is supported
# img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
self.img_files = [x for x in f if os.path.splitext(x)[-1].lower() in img_formats]
except Exception as e:
raise FileNotFoundError("Error loading data from {}. {}".format(path, e))
# raise an error if there is no image in the image list
# no using all the images, only 5000 of them
# if len(self.img_files) > 5000:
# n = 5000
# if "train" in str(Path(path)):
# print("\nUsing 5000 images only for training")
# elif "val" in str(Path(path)):
# print("\nUsing 5000 images only for validation")
# else:
# n = len(self.img_files)
n = len(self.img_files)
assert n > 0, "No images found in %s. See %s" % (path, help_url)
# batch index
# split data into individual batches
bi = np.floor(np.arange(n) / batch_size).astype(np.int)
# record the total number of batches after splitting
nb = bi[-1] + 1 # number of batches
self.n = n # number of images
self.batch = bi # batch index of image
self.img_size = img_size # use augment_hsv or not
self.augment = augment # use augment_hsv or not
self.hyp = hyp # hyperparameter dictionary
self.rect = rect # use rectangular training or not
# ATTENTION: once rect is turned on, mosaic will be turn off
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
# Define labels
# loop through the path for labels of each image
# (./my_yolo_dataset/train/images/2009_004012.jpg) -> (./my_yolo_dataset/train/labels/2009_004012.txt)
self.label_files = [x.replace("images", "labels").replace("-color", "-box").replace(os.path.splitext(x)[-1], ".txt")
for x in self.img_files]
# Read image shapes (wh)
# check where there is a .shapes file for the dataset,which stores the width, height for each image
sp = path.replace(".txt", ".shapes") # shapefile path
try:
with open(sp, "r") as f: # read existing shapefile
s = [x.split() for x in f.read().splitlines()]
# check if the number of lines in shapes file is the same amount of images in current dataset
# re-create .shapes file if the number does not match
assert len(s) == n, "shapefile out of aync"
except Exception as e:
# print("read {} failed [{}], rebuild {}.".format(sp, e, sp))
# read the information of every image and display with tqdm
if rank in [-1, 0]:
image_files = tqdm(self.img_files, desc="Reading image shapes")
else:
image_files = self.img_files
s = [exif_size(Image.open(f)) for f in image_files]
# save the shape information of every image in .shapes file
np.savetxt(sp, s, fmt="%g") # overwrite existing (if any)
# record the original size of every image
self.shapes = np.array(s, dtype=np.float64)
# Rectangular Training https://github.com/ultralytics/yolov3/issues/232
# when rect is true, network training will use the rectangle that similar to the original image
# the longest side will be img_size, but not img_size x img_size
# mosaic off if rect is on
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
# calculate the height/width ratio for each image
ar = s[:, 1] / s[:, 0] # aspect ratio
# argsort will return a matrix that arranged from smallest to largest
# arrange based on the height/width ratio so images in one batch will have similar ratio
irect = ar.argsort()
# rearrange the sequence of images, labels and shape according to the ratio sequence
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# set training image shapes
# calculate the size of each batch
shapes = [[1, 1]] * nb # nb: number of batches
for i in range(nb):
ari = ar[bi == i] # bi: batch index
# get the smallest and biggest height/width ratio in the i th batch
mini, maxi = ari.min(), ari.max()
# if w > h, set w as img_size
if maxi < 1:
shapes[i] = [maxi, 1]
# if w < h, set h as img_size
elif mini > 1:
shapes[i] = [1, 1 / mini]
# calculate every batch's shape value into the network (round up to integer multiples of 32)
self.batch_shapes = np.ceil(np.array(shapes) * img_size / 32. + pad).astype(np.int) * 32
# cache labels
self.imgs = [None] * n # n is the total number of images
# label: [class, x, y, w, h] xywh are relative values
self.labels = [np.zeros((0, 5), dtype=np.float32)] * n
extract_bounding_boxes, labels_loaded = False, False
nm, nf, ne, nd = 0, 0, 0, 0 # number mission, found, empty, duplicate
# name .npy differently to avoid mis-calculating mAP when rect is True/False
# when rect is True, re-arrange self.images and self.labels
if rect is True:
np_labels_path = str(Path(self.label_files[0]).parent) + ".rect.npy" # saved labels in *.npy file
else:
np_labels_path = str(Path(self.label_files[0]).parent) + ".norect.npy"
if os.path.isfile(np_labels_path):
x = np.load(np_labels_path, allow_pickle=True)
if len(x) == n:
# if the number of labels is equal to current number of images
# it is the same dataset and we'll directly load data from memory
self.labels = x
labels_loaded = True
# the processing progress is only shown in the first thread
if rank in [-1, 0]:
pbar = tqdm(self.label_files)
else:
pbar = self.label_files
# load labels from all files
for i, file in enumerate(pbar):
if labels_loaded is True:
# load from memory if it is the same dataset
l = self.labels[i]
else:
# read data from the label files
try:
with open(file, "r") as f:
# read every line of labels and divide by space
l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
except Exception as e:
print("An error occurred while loading the file {}: {}".format(file, e))
nm += 1 # file missing
continue
# if the label file is not empty
if l.shape[0]:
# every label must have five values [class, x, y, w, h]
assert l.shape[1] == 5, "> 5 label columns: %s" % file
assert (l >= 0).all(), "negative labels: %s" % file
assert (l[:, 1:] <= 1).all(), "non-normalized or out of bounds coordinate labels: %s" % file
# check every line for repeated information
if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows
nd += 1
if single_cls:
l[:, 0] = 0 # force dataset into single-class mode
self.labels[i] = l
nf += 1 # file found
# Extract object detection boxes for a second stage classifier
if extract_bounding_boxes:
p = Path(self.img_files[i])
img = cv2.imread(str(p))
h, w = img.shape[:2]
for j, x in enumerate(l):
f = "%s%sclassifier%s%g_%g_%s" % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)
if not os.path.exists(Path(f).parent):
os.makedirs(Path(f).parent) # make new output folder
# translate from relative coordinate values to absolute values
# b: x, y, w, h
b = x[1:] * [w, h, w, h] # box
# set height and width to the bigger value in [height, width]
b[2:] = b[2:].max() # rectangle to square
# increase the height and width of the image
b[2:] = b[2:] * 1.3 + 30 # pad
# transform from x,y,w,h to xmin,ymin,xmax,ymax
b = xywh2xyxy(b.reshape(-1, 4)).revel().astype(np.int)
# cut the bbox coordinates and put into image
b[[0, 2]] = np.clip[b[[0, 2]], 0, w]
b[[1, 3]] = np.clip[b[[1, 3]], 0, h]
assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), "Failure extracting classifier boxes"
else:
ne += 1 # file empty
# the processing progress is only shown in the first thread
if rank in [-1, 0]:
# update the information in progress bar
pbar.desc = "Caching labels (%g found, %g missing, %g empty, %g duplicate, for %g images)" % (
nf, nm, ne, nd, n)
assert nf > 0, "No labels found in %s." % os.path.dirname(self.label_files[0]) + os.sep
# if the labels are not stored as numpy format, store them as numpy format if the number of training samples >1000
if not labels_loaded and n > 1000:
print("Saving labels to %s for faster future loading" % np_labels_path)
np.save(np_labels_path, self.labels) # save for next time
# Cache images into memory for faster training (Warning: large datasets may exceed system RAM)
if cache_images: # if training
gb = 0 # Gigabytes of cached images, to record cache usage
if rank in [-1, 0]:
pbar = tqdm(range(len(self.img_files)), desc="Caching images")
else:
pbar = range(len(self.img_files))
self.img_hw0, self.img_hw = [None] * n, [None] * n
for i in pbar: # max 10k images
self.imgs[i], self.img_hw0[i], self.img_hw[i] = load_image(self, i) # img, hw_original, hw_resized
gb += self.imgs[i].nbytes # record RAM usage
if rank in [-1, 0]:
pbar.desc = "Caching images (%.1fGB)" % (gb / 1E9)
# Detect corrupted images https://medium.com/joelthchao/programmatically-detect-corrupted-image-8c1b2006c3d3
detect_corrupted_images = False
if detect_corrupted_images:
from skimage import io # conda install -c conda-forge scikit-image
for file in tqdm(self.img_files, desc="Detecting corrupted images"):
try:
_ = io.imread(file)
except Exception as e:
print("Corrupted image detected: {}, {}".format(file, e))
def __len__(self):
return len(self.img_files)
def __getitem__(self, index):
hyp = self.hyp
if self.mosaic:
# load mosaic
img, labels = load_mosaic(self, index)
shapes = None
else:
# load image
img, (h0, w0), (h, w) = load_image(self, index)
# letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scale_up=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
# load labels
labels = []
x = self.labels[index]
if x.size > 0:
# Normalized xywh to pixel xyxy format
labels = x.copy() # label: class, x, y, w, h
labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width
labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height
labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
if self.augment:
# Augment imagespace
if not self.mosaic:
img, labels = random_affine(img, labels,
degrees=hyp["degrees"],
translate=hyp["translate"],
scale=hyp["scale"],
shear=hyp["shear"])
# Augment colorspace
augment_hsv(img, h_gain=hyp["hsv_h"], s_gain=hyp["hsv_s"], v_gain=hyp["hsv_v"])
nL = len(labels) # number of labels
if nL:
# convert xyxy to xywh
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5])
# Normalize coordinates 0-1
labels[:, [2, 4]] /= img.shape[0] # height
labels[:, [1, 3]] /= img.shape[1] # width
if self.augment:
# random left-right flip
lr_flip = True # random horizontal flip
if lr_flip and random.random() < 0.5:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1] # 1 - x_center
# random up-down flip
ud_flip = False
if ud_flip and random.random() < 0.5:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2] # 1 - y_center
labels_out = torch.zeros((nL, 6)) # nL: number of labels
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert BGR to RGB, and HWC to CHW(3x512x512)
img = img[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes, index
def coco_index(self, index):
"""
this method is used for preparing label information for cocotools
does not process images or labels
"""
# load image
# path = self.img_files[index]
# img = cv2.imread(path) # BGR
# import matplotlib.pyplot as plt
# plt.imshow(img[:, :, ::-1])
# plt.show()
# assert img is not None, "Image Not Found " + path
# o_shapes = img.shape[:2] # orig hw
o_shapes = self.shapes[index][::-1] # wh to hw
# Convert BGR to RGB, and HWC to CHW(3x512x512)
# img = img[:, :, ::-1].transpose(2, 0, 1)
# img = np.ascontiguousarray(img)
# load labels
labels = []
x = self.labels[index]
if x.size > 0:
labels = x.copy() # label: class, x, y, w, h
return torch.from_numpy(labels), o_shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes, index = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes, index
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
path = self.img_files[index]
img = cv2.imread(path) # BGR
assert img is not None, "Image Not Found " + path
h0, w0 = img.shape[:2] # orig hw
# img_size image size after pre-processing
r = self.img_size / max(h0, w0) # resize image to img_size
if r != 1: # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def load_mosaic(self, index):
"""
concatenate four images into one mosaic image
:param self:
:param index: the index of requied images
:return:
"""
# loads images in a mosaic
labels4 = [] # concatenate the labels
s = self.img_size
# choose a random center point for the concatenated image
xc, yc = [int(random.uniform(s * 0.5, s * 1.5)) for _ in range(2)] # mosaic center x, y
# get three random images from the dataset
indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(3)] # 3 additional image indices
# concatenate four images together
for i, index in enumerate(indices):
# load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
# create mosaic image
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
# calculate the coordinate information in mosaic image(fill the image into the mosaic image)
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
# calculate the crop information (put the image to the top left of mosaic image,
# drop the area that outside the boarder)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
# put the cropped image to the corresponding position in mosaic
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
# calculate pad (distance between image boarder and mosaic boarder, negative if the boarder is crossed)
padw = x1a - x1b
padh = y1a - y1b
# labels is the label information from all the concatenated image
# [class_index, x_center, y_center, w, h]
x = self.labels[index]
labels = x.copy() # copy the labels to prevent using original information
if x.size > 0: # Normalized xywh to pixel xyxy format
# calculated the labels in mosaic image (absolute x and y)
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw # xmin
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh # ymin
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw # xmax
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh # ymax
labels4.append(labels)
# Concat/clip labels
if len(labels4):
labels4 = np.concatenate(labels4, 0)
# np.clip(labels4[:, 1:] - s / 2, 0, s, out=labels4[:, 1:]) # use with center crop
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_affine
# Augment
# random rotate, resize, translation and shear
img4, labels4 = random_affine(img4, labels4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
border=-s // 2) # border to remove
return img4, labels4
def random_affine(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, border=0):
"""random rotate, resize, translation and shear"""
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4
# targets = [cls, xyxy]
# the given input image size, which equal to img4.shape / 2
height = img.shape[0] + border * 2
width = img.shape[1] + border * 2
# Rotation and Scale
# generate rotation and scale matrix
R = np.eye(3) # diagonal matrix
a = random.uniform(-degrees, degrees) # random rotation degrees
s = random.uniform(1 - scale, 1 + scale) # random scale number
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s)
# Translation
# generate translation matrix
T = np.eye(3)
T[0, 2] = random.uniform(-translate, translate) * img.shape[0] + border # x translation (pixels)
T[1, 2] = random.uniform(-translate, translate) * img.shape[1] + border # y translation (pixels)
# Shear
# generate shear matrix
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Combined rotation matrix
M = S @ T @ R # ORDER IS IMPORTANT HERE!!
if (border != 0) or (M != np.eye(3)).any(): # image changed
img = cv2.warpAffine(img, M[:2], dsize=(width, height), flags=cv2.INTER_LINEAR, borderValue=(114, 114, 114))
# Transform label coordinates
n = len(targets)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
# [4*n, 3] -> [n, 8]
xy = (xy @ M.T)[:, :2].reshape(n, 8)
# create new boxes
# correct the bbox after transform (if the bbox becomes rhombus, need to correct to rectangle)
x = xy[:, [0, 2, 4, 6]] # [n, 4]
y = xy[:, [1, 3, 5, 7]] # [n, 4]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T # [n, 4]
# reject warped points outside of image
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
w = xy[:, 2] - xy[:, 0]
h = xy[:, 3] - xy[:, 1]
# calculate the area of box after alternation
area = w * h
# calculate the area of box before alternation
area0 = (targets[:, 3] - targets[:, 1]) * (targets[:, 4] - targets[:, 2])
# calculate ratio of every box
ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16)) # aspect ratio
# pick bbox has side length bigger than 4 pixels, area bigger than 0.2 and smaller than 10
i = (w > 4) & (h > 4) & (area / (area0 * s + 1e-16) > 0.2) & (ar < 10)
targets = targets[i]
targets[:, 1:5] = xy[i]
return img, targets
def augment_hsv(img, h_gain=0.5, s_gain=0.5, v_gain=0.5):
r = np.random.uniform(-1, 1, 3) * [h_gain, s_gain, v_gain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
# Histogram equalization
# if random.random() < 0.2:
# for i in range(3):
# img[:, :, i] = cv2.equalizeHist(img[:, :, i])
def letterbox(img: np.ndarray,
new_shape=(416, 416),
color=(114, 114, 114),
auto=True,
scale_fill=False,
scale_up=True):
"""
scale the image to the size needed
:param img:
:param new_shape:
:param color:
:param auto:
:param scale_fill:
:param scale_up:
:return:
"""
shape = img.shape[:2] # [h, w]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scale_up: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle keep original scale and change the longer side to new size
# keep the image after padding is integral multiples of 32
dw, dh = np.mod(dw, 32), np.mod(dh, 32) # wh padding
elif scale_fill: # stretch the image to designated size
dw, dh = 0, 0
new_unpad = new_shape
ratio = new_shape[0] / shape[1], new_shape[1] / shape[0] # wh ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
# shape:[h, w] new_unpad:[w, h]
if shape[::-1] != new_unpad:
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) # calculate padding on top and bottom
left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) # calculate padding on left and right
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def create_folder(path="./new_folder"):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder |
py | 7dfc3a4b35f29bf6ed4deb0e88f306d8690179ab | #!/usr/bin/python
import sqlite3
import pickle
import traceback
import base64
import hashlib
from counters import increment
from threading import Lock
from utils import sucky_uuid
DOCUMENT_LOCK = Lock()
class Document:
def __init__(self):
DOCUMENT_LOCK.acquire()
self.connection = sqlite3.connect("db.sqlite", 300)
self.connection.text_factory = str
self.cursor = self.connection.cursor()
self.cursor.execute("PRAGMA foreign_keys = ON")
self.connection.commit()
self.cursor.execute('''CREATE TABLE IF NOT EXISTS TBL_JSON_COL (
COLUUID VARCHAR(36),
NAME VARCHAR(64) UNIQUE NOT NULL,
PRIMARY KEY (COLUUID));''')
self.cursor.execute('''CREATE TABLE IF NOT EXISTS TBL_JSON_OBJ (
OBJUUID VARCHAR(36),
COLUUID VARCHAR(36),
VALUE BLOB NOT NULL,
PRIMARY KEY (OBJUUID),
FOREIGN KEY (COLUUID) REFERENCES TBL_JSON_COL(COLUUID) ON DELETE CASCADE);''')
self.cursor.execute('''CREATE TABLE IF NOT EXISTS TBL_JSON_ATTR (
COLUUID VARCHAR(36),
ATTRIBUTE VARCHAR(64),
PATH VARCHAR(64),
PRIMARY KEY (COLUUID, ATTRIBUTE),
FOREIGN KEY (COLUUID) REFERENCES TBL_JSON_COL(COLUUID) ON DELETE CASCADE);''')
self.cursor.execute('''CREATE TABLE IF NOT EXISTS TBL_JSON_IDX (
OBJUUID VARCHAR(36),
COLUUID VARCHAR(36),
ATTRIBUTE VARCHAR(64),
VALUE VARCHAR(64),
PRIMARY KEY (OBJUUID, ATTRIBUTE),
FOREIGN KEY (OBJUUID) REFERENCES TBL_JSON_OBJ(OBJUUID) ON DELETE CASCADE,
FOREIGN KEY (COLUUID, ATTRIBUTE) REFERENCES TBL_JSON_ATTR(COLUUID, ATTRIBUTE) ON DELETE CASCADE);''')
self.connection.commit()
DOCUMENT_LOCK.release()
def vacuum(self):
try:
DOCUMENT_LOCK.acquire()
self.cursor.execute("VACUUM;")
finally:
self.connection.commit()
DOCUMENT_LOCK.release()
def create_object(self, coluuid, objuuid):
try:
DOCUMENT_LOCK.acquire()
self.cursor.execute("insert into TBL_JSON_OBJ (COLUUID, OBJUUID, VALUE) values (?, ?, ?);", \
(str(coluuid), str(objuuid), pickle.dumps({"objuuid" : objuuid, "coluuid" : coluuid})))
finally:
self.connection.commit()
DOCUMENT_LOCK.release()
def set_object(self, coluuid, objuuid, object):
try:
DOCUMENT_LOCK.acquire()
object["objuuid"] = objuuid
object["coluuid"] = coluuid
self.cursor.execute("update TBL_JSON_OBJ set VALUE = ? where OBJUUID = ?;", \
(pickle.dumps(object), str(objuuid)))
self.cursor.execute("delete from TBL_JSON_IDX where OBJUUID = ?;", (objuuid,))
attributes = self.list_attributes(coluuid)
for attribute_name in attributes:
try:
self.cursor.execute("""insert into TBL_JSON_IDX (OBJUUID, COLUUID, ATTRIBUTE, VALUE)
values (?, ?, ?, ?);""", \
(str(objuuid), \
str(coluuid), \
str(attribute_name), \
str(eval("str(self.get_object(objuuid)" + attributes[attribute_name] + ")"))))
except:
continue
finally:
self.connection.commit()
DOCUMENT_LOCK.release()
def get_object(self, objuuid):
self.cursor.execute("select VALUE from TBL_JSON_OBJ where OBJUUID = ?;", (str(objuuid),))
self.connection.commit()
return pickle.loads(self.cursor.fetchall()[0][0])
def find_objects(self, coluuid, attribute, value):
self.cursor.execute("select OBJUUID from TBL_JSON_IDX where ATTRIBUTE = ? and VALUE = ? and COLUUID = ?;", \
(str(attribute), str(value), str(coluuid)))
self.connection.commit()
objuuids = []
for row in self.cursor.fetchall():
objuuids.append(row[0])
return objuuids
def delete_object(self, objuuid):
try:
DOCUMENT_LOCK.acquire()
self.cursor.execute("delete from TBL_JSON_OBJ where OBJUUID = ?;", (str(objuuid),))
finally:
self.connection.commit()
DOCUMENT_LOCK.release()
def create_attribute(self, coluuid, attribute, path):
try:
DOCUMENT_LOCK.acquire()
self.cursor.execute("insert into TBL_JSON_ATTR (COLUUID, ATTRIBUTE, PATH) values (?, ?, ?);", \
(str(coluuid), str(attribute), str(path)))
self.cursor.execute("delete from TBL_JSON_IDX where ATTRIBUTE = ? and COLUUID = ?;", (str(attribute), str(coluuid)))
self.cursor.execute("select OBJUUID, VALUE from TBL_JSON_OBJ where COLUUID = ?;", (str(coluuid),))
objects = {}
for row in self.cursor.fetchall():
objects[row[0]] = pickle.loads(row[1])
for objuuid in objects:
try:
self.cursor.execute("""insert into TBL_JSON_IDX (OBJUUID, COLUUID, ATTRIBUTE, VALUE)
values (?, ?, ?, ?);""", \
(str(objuuid), \
str(coluuid), \
str(attribute), \
str(eval("str(objects[objuuid]" + path + ")"))))
except:
continue
except:
pass
finally:
self.connection.commit()
DOCUMENT_LOCK.release()
def delete_attribute(self, coluuid, attribute):
try:
DOCUMENT_LOCK.acquire()
self.cursor.execute("delete from TBL_JSON_ATTR where COLUUID = ? and ATTRIBUTE = ?;", \
(str(coluuid), str(attribute)))
finally:
self.connection.commit()
DOCUMENT_LOCK.release()
def list_attributes(self, coluuid):
self.cursor.execute("select ATTRIBUTE, PATH from TBL_JSON_ATTR where COLUUID = ?;", (str(coluuid),))
self.connection.commit()
attributes = {}
for row in self.cursor.fetchall():
attributes[row[0]] = row[1]
return attributes
def create_collection(self, uuid = None, name = "New Collection"):
try:
DOCUMENT_LOCK.acquire()
if not uuid:
uuid = sucky_uuid()
self.cursor.execute("insert into TBL_JSON_COL (COLUUID, NAME) values (?, ?);", \
(str(uuid), str(name)))
finally:
self.connection.commit()
DOCUMENT_LOCK.release()
return uuid
def delete_collection(self, uuid):
try:
DOCUMENT_LOCK.acquire()
self.cursor.execute("delete from TBL_JSON_COL where COLUUID = ?;", (str(uuid),))
finally:
self.connection.commit()
DOCUMENT_LOCK.release()
def rename_collection(self, uuid, name):
try:
DOCUMENT_LOCK.acquire()
self.cursor.execute("update TBL_JSON_COL set NAME = ? where COLUUID = ?;", \
(str(name), str(uuid)))
finally:
self.connection.commit()
DOCUMENT_LOCK.release()
def list_collections(self):
self.cursor.execute("select NAME, COLUUID from TBL_JSON_COL;")
self.connection.commit()
collections = {}
for row in self.cursor.fetchall():
collections[row[0]] = row[1]
return collections
def list_collection_objects(self, coluuid):
self.cursor.execute("select OBJUUID from TBL_JSON_OBJ where COLUUID = ?;", (coluuid,))
self.connection.commit()
objuuids = []
for row in self.cursor.fetchall():
objuuids.append(row[0])
return objuuids
def list_objects(self):
self.cursor.execute("select OBJUUID from TBL_JSON_OBJ;")
self.connection.commit()
objuuids = []
for row in self.cursor.fetchall():
objuuids.append(row[0])
return objuuids
def __del__(self):
self.connection.close()
class Object(Document):
def __init__(self, coluuid, objuuid):
Document.__init__(self)
self.objuuid = objuuid
self.coluuid = coluuid
self.load()
increment("sql object reads")
def load(self):
try:
self.object = Document.get_object(self, self.objuuid)
except IndexError:
Document.create_object(self, self.coluuid, self.objuuid)
self.object = Document.get_object(self, self.objuuid)
increment("sql object writes")
finally:
increment("sql object reads")
def set(self):
Document.set_object(self, self.coluuid, self.objuuid, self.object)
increment("sql object writes")
def destroy(self):
Document.delete_object(self, self.objuuid)
self.object = None
increment("sql object writes")
class Collection(Document):
def __init__(self, collection_name):
Document.__init__(self)
self.collection_name = collection_name
try:
self.coluuid = Document.list_collections(self)[self.collection_name]
except KeyError:
self.coluuid = Document.create_collection(self, name = self.collection_name)
def destroy(self):
Document.delete_collection(self, self.coluuid)
def rename(self, name):
Document.rename_collection(self, self.coluuid, name)
self.collection_name = name
def create_attribute(self, attribute, path):
Document.create_attribute(self, self.coluuid, attribute, path)
def delete_attribute(self, attribute):
Document.delete_attribute(self, self.coluuid, attribute)
def find(self, **kargs):
objuuid_sets = []
if len(kargs) == 0:
objuuid_sets.append(self.list_objuuids())
for attribute, value in kargs.iteritems():
objuuid_sets.append(Document.find_objects(self, self.coluuid, attribute, value))
intersection = set(objuuid_sets[0])
for objuuids in objuuid_sets[1:]:
intersection = intersection.intersection(set(objuuids))
objects = []
for objuuid in list(intersection):
objects.append(Object(self.coluuid, objuuid))
return objects
def find_objuuids(self, **kargs):
objuuid_sets = []
if len(kargs) == 0:
objuuid_sets.append(self.list_objuuids())
for attribute, value in kargs.iteritems():
objuuid_sets.append(Document.find_objects(self, self.coluuid, attribute, value))
intersection = set(objuuid_sets[0])
for objuuids in objuuid_sets[1:]:
intersection = intersection.intersection(set(objuuids))
objuuids = []
for objuuid in list(intersection):
objuuids.append(objuuid)
return objuuids
def get_object(self, objuuid = None):
if not objuuid:
objuuid = sucky_uuid()
return Object(self.coluuid, objuuid)
def list_objuuids(self):
return Document.list_collection_objects(self, self.coluuid) |
py | 7dfc3aaa5443187b593e791a592a984958943d43 | """Get your own public IP address or that of any host."""
from __future__ import annotations
from datetime import timedelta
import logging
import aiodns
from aiodns.error import DNSError
import voluptuous as vol
from homeassistant.components.sensor import (
PLATFORM_SCHEMA as PARENT_PLATFORM_SCHEMA,
SensorEntity,
)
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import CONF_NAME
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.device_registry import DeviceEntryType
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from .const import (
CONF_HOSTNAME,
CONF_IPV4,
CONF_IPV6,
CONF_RESOLVER,
CONF_RESOLVER_IPV6,
DEFAULT_HOSTNAME,
DEFAULT_IPV6,
DEFAULT_RESOLVER,
DEFAULT_RESOLVER_IPV6,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=120)
PLATFORM_SCHEMA = PARENT_PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_HOSTNAME, default=DEFAULT_HOSTNAME): cv.string,
vol.Optional(CONF_RESOLVER, default=DEFAULT_RESOLVER): cv.string,
vol.Optional(CONF_RESOLVER_IPV6, default=DEFAULT_RESOLVER_IPV6): cv.string,
vol.Optional(CONF_IPV6, default=DEFAULT_IPV6): cv.boolean,
}
)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_devices: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the DNS IP sensor."""
_LOGGER.warning(
"Configuration of the DNS IP platform in YAML is deprecated and will be "
"removed in Home Assistant 2022.4; Your existing configuration "
"has been imported into the UI automatically and can be safely removed "
"from your configuration.yaml file"
)
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=config,
)
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up the dnsip sensor entry."""
hostname = entry.data[CONF_HOSTNAME]
name = entry.data[CONF_NAME]
resolver_ipv4 = entry.options[CONF_RESOLVER]
resolver_ipv6 = entry.options[CONF_RESOLVER_IPV6]
entities = []
if entry.data[CONF_IPV4]:
entities.append(WanIpSensor(name, hostname, resolver_ipv4, False))
if entry.data[CONF_IPV6]:
entities.append(WanIpSensor(name, hostname, resolver_ipv6, True))
async_add_entities(entities, update_before_add=True)
class WanIpSensor(SensorEntity):
"""Implementation of a DNS IP sensor."""
_attr_icon = "mdi:web"
def __init__(
self,
name: str,
hostname: str,
resolver: str,
ipv6: bool,
) -> None:
"""Initialize the DNS IP sensor."""
self._attr_name = f"{name} IPv6" if ipv6 else name
self._attr_unique_id = f"{hostname}_{ipv6}"
self.hostname = hostname
self.resolver = aiodns.DNSResolver()
self.resolver.nameservers = [resolver]
self.querytype = "AAAA" if ipv6 else "A"
self._attr_extra_state_attributes = {
"Resolver": resolver,
"Querytype": self.querytype,
}
self._attr_device_info = DeviceInfo(
entry_type=DeviceEntryType.SERVICE,
identifiers={(DOMAIN, f"{hostname}_{ipv6}")},
manufacturer="DNS",
model=aiodns.__version__,
name=hostname,
)
async def async_update(self) -> None:
"""Get the current DNS IP address for hostname."""
try:
response = await self.resolver.query(self.hostname, self.querytype)
except DNSError as err:
_LOGGER.warning("Exception while resolving host: %s", err)
response = None
if response:
self._attr_native_value = response[0].host
self._attr_available = True
else:
self._attr_available = False
|
py | 7dfc3ab26c0dded2a54f15fe6cb80beffa6f226f | # content of test_sysexit.py
import numpy as np
import pandas as pd
import nccid_cleaning as nc
def test_example():
# Load our test case and reference output
df = pd.read_csv("notebooks/data/example.csv")
# Some type coertion for the loaded CSV fields
date_cols = [
"date_of_positive_covid_swab",
"date_of_admission",
"swabdate",
"latest_swab_date",
]
dtypes = {
"pmh_lung_disease": "object",
"cxr_severity_2": "object",
"covid_code": "object",
}
df_target = pd.read_csv(
"notebooks/data/example_cleaned.csv",
parse_dates=date_cols,
dtype=dtypes,
)
# Do actual cleaning
df_cleaned = nc.clean_data_df(df, nc.patient_df_pipeline)
# Test the equivalence
pd.testing.assert_frame_equal(df_cleaned, df_target)
def test_coerce_numeric_columns_when_no_values():
val = ""
cols = (
"Fibrinogen if d-dimer not performed",
"Urea on admission",
"O2 saturation",
"Temperature on admission",
)
df = pd.DataFrame([[val for _ in range(len(cols))]], columns=cols)
df = nc.cleaning._coerce_numeric_columns(df)
output_cols = [col for col in df.columns if col not in cols]
assert (df[output_cols].dtypes == 'float64').all()
|
py | 7dfc3aeb84bbc489d111ae43cc8ede5c33d68310 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._operations import QuestionAnsweringClientOperationsMixin
__all__ = [
"QuestionAnsweringClientOperationsMixin",
]
|
py | 7dfc3b982dae477e487ce3e4a749ed33a3184d85 | __all__ = ('ComponentSelect',)
import reprlib
from scarletio import copy_docs, include
from .component_base import ComponentBase
from .component_select_option import ComponentSelectOption
from .debug import (
_debug_component_custom_id, _debug_component_enabled, _debug_component_max_values, _debug_component_min_values,
_debug_component_options, _debug_component_placeholder
)
from .preinstanced import ComponentType
create_auto_custom_id = include('create_auto_custom_id')
class ComponentSelect(ComponentBase):
"""
Select component.
Attributes
----------
custom_id : `str`
Custom identifier to detect which component was used by the user.
enabled : `bool`
Whether the component is enabled.
options : `None`, `tuple` of ``ComponentSelectOption``
Options of the select.
placeholder : `str`
Placeholder text of the select.
max_values : `int
The maximal amount of options to select. Can be in range [1:25]. Defaults to `1`.
min_values : `int`
The minimal amount of options to select. Can be in range [1:15]. Defaults to `1`.
Class Attributes
----------------
type : ``ComponentType`` = `ComponentType.select`
The component's type.
"""
type = ComponentType.select
__slots__ = ('custom_id', 'enabled', 'options', 'placeholder', 'max_values', 'min_values', )
def __new__(cls, options, custom_id=None, *, enabled=True, placeholder=None, max_values=1, min_values=1):
"""
Creates a new ``ComponentSelect`` with the given parameters.
Parameters
----------
options : `None` or (`list`, `tuple`) of ``ComponentSelectOption``
Options of the select.
custom_id : `None`, `str` = `None`, Optional
Custom identifier to detect which component was used by the user.
enabled : `bool` = `True`, Optional (Keyword only)
Whether the button is enabled. Defaults to `True`.
placeholder : `None`, `str` = `None`, Optional (Keyword only)
Placeholder text of the select.
max_values : `int` = `1`, Optional (Keyword only)
The maximal amount of options to select. Can be in range [1:25].
min_values : `int` = `1`, Optional (Keyword only)
The minimal amount of options to select. Can be in range [1:15].
Raises
------
AssertionError
- If `custom_id` is not given as `None`, `str`.
- If `custom_id`'s length is out of range [0:100].
- If `options` length is out from the expected range [1:25].
- If `options` is neither `None` or (`list`, `tuple`) of ``ComponentSelectOption`` elements.
- If `min_values` is not `int`.
- If `min_values` is out of range [1:15].
- If `max_values` is not `int`.
- If `max_values` is out of range [1:25].
- If `enabled` was not given as `bool`.
"""
if __debug__:
_debug_component_custom_id(custom_id)
_debug_component_enabled(enabled)
_debug_component_options(options)
_debug_component_placeholder(placeholder)
_debug_component_min_values(min_values)
_debug_component_max_values(max_values)
# custom_id
if (custom_id is None) or (not custom_id):
custom_id = create_auto_custom_id()
# enabled
# No additional checks
# options
if (options is not None):
options = tuple(options)
if (not options):
options = None
# placeholder
if (placeholder is not None) and (not placeholder):
placeholder = None
# max_values
# No additional checks
# min_values
# No additional checks
self = object.__new__(cls)
self.custom_id = custom_id
self.enabled = enabled
self.options = options
self.placeholder = placeholder
self.max_values = max_values
self.min_values = min_values
return self
@classmethod
@copy_docs(ComponentBase.from_data)
def from_data(cls, data):
self = object.__new__(cls)
# custom_id
self.custom_id = data['custom_id']
# enabled
self.enabled = not data.get('disabled', False)
# options
option_datas = data['options']
if option_datas:
options = tuple(ComponentSelectOption.from_data(option_data) for option_data in option_datas)
else:
options = None
self.options = options
# placeholder
placeholder = data.get('placeholder', None)
if (placeholder is not None) and (not placeholder):
placeholder = None
self.placeholder = placeholder
# max_values
self.max_values = data.get('max_values', 1)
# min_values
self.min_values = data.get('min_values', 1)
return self
@copy_docs(ComponentBase.to_data)
def to_data(self):
# type & custom_id
data = {
'type': self.type.value,
'custom_id': self.custom_id,
}
# enabled
if (not self.enabled):
data['disabled'] = True
# options
options = self.options
if options is None:
options_value = []
else:
options_value = [option.to_data() for option in options]
data['options'] = options_value
# placeholder
placeholder = self.placeholder
if (placeholder is not None):
data['placeholder'] = placeholder
# max_values
max_values = self.max_values
if max_values != 1:
data['max_values'] = max_values
# min_values
min_values = self.min_values
if min_values != 1:
data['min_values'] = min_values
return data
@copy_docs(ComponentBase.__repr__)
def __repr__(self):
repr_parts = ['<', self.__class__.__name__]
# Descriptive fields : type
# type
type_ = self.type
repr_parts.append(' type=')
repr_parts.append(type_.name)
repr_parts.append(' (')
repr_parts.append(repr(type_.value))
repr_parts.append(')')
# System fields : custom_id & options
# custom_id
repr_parts.append(', custom_id=')
repr_parts.append(reprlib.repr(self.custom_id))
# options
repr_parts.append(', options=')
options = self.options
if (options is None):
repr_parts.append('[]')
else:
repr_parts.append('[')
index = 0
limit = len(options)
while True:
option = options[index]
index += 1
repr_parts.append(repr(option))
if index == limit:
break
repr_parts.append(', ')
continue
repr_parts.append(']')
repr_parts.append('>')
# Text fields : placeholder
# placeholder
placeholder = self.placeholder
if (placeholder is not None):
repr_parts.append(', placeholder=')
repr_parts.append(repr(placeholder))
# Optional descriptive fields: min_values & max_values & enabled
# min_values
min_values = self.min_values
if min_values != 1:
repr_parts.append(', min_values=')
repr_parts.append(repr(min_values))
# max_values
max_values = self.max_values
if max_values != 1:
repr_parts.append(', max_values=')
repr_parts.append(repr(max_values))
# enabled
enabled = self.enabled
if (not enabled):
repr_parts.append(', enabled=')
repr_parts.append(repr(enabled))
return ''.join(repr_parts)
@copy_docs(ComponentBase.copy)
def copy(self):
new = object.__new__(type(self))
# custom_id
new.custom_id = self.custom_id
# enabled
new.enabled = self.enabled
# options
options = self.options
if (options is not None):
options = tuple(option.copy() for option in options)
new.options = options
# placeholder
new.placeholder = self.placeholder
# max_values
new.max_values = self.max_values
# min_values
new.min_values = self.min_values
return new
def copy_with(self, **kwargs):
"""
Copies the component and modifies the created one with the given parameters.
Parameters
----------
**kwargs : Keyword parameters
Keyword parameters referencing attributes.
Other Parameters
----------------
custom_id : `None`, `str`, Optional (Keyword only)
Custom identifier to detect which component was used by the user.
enabled : `bool`, Optional (Keyword only)
Whether the button is enabled. Defaults to `True`.
options : `None` or (`list`, `tuple`) of ``ComponentSelectOption``, Optional (Keyword only)
Options of the select.
placeholder : `str`, Optional (Keyword only)
Placeholder text of the select.
max_values : `int`, Optional (Keyword only)
The maximal amount of options to select. Can be in range [1:25]. Defaults to `1`.
min_values : `int`, Optional (Keyword only)
The minimal amount of options to select. Can be in range [1:15]. Defaults to `1`.
Returns
-------
new : ``ComponentSelect``
"""
# custom_id
try:
custom_id = kwargs.pop('custom_id')
except KeyError:
custom_id = self.custom_id
else:
if __debug__:
_debug_component_custom_id(custom_id)
if custom_id is None:
custom_id = self.custom_id
# enabled
try:
enabled = kwargs.pop('enabled')
except KeyError:
enabled = self.enabled
else:
if __debug__:
_debug_component_enabled(enabled)
# options
try:
options = kwargs.pop('options')
except KeyError:
options = self.options
if (options is not None):
options = tuple(option.copy() for option in options)
else:
if __debug__:
_debug_component_options(options)
if (options is not None):
options = tuple(options)
if (not options):
options = None
# placeholder
try:
placeholder = kwargs.pop('placeholder')
except KeyError:
placeholder = self.placeholder
else:
if __debug__:
_debug_component_placeholder(placeholder)
if (placeholder is not None) and (not placeholder):
placeholder = None
# max_values
try:
max_values = kwargs.pop('max_values')
except KeyError:
max_values = self.max_values
else:
if __debug__:
_debug_component_max_values(max_values)
# min_values
try:
min_values = kwargs.pop('min_values')
except KeyError:
min_values = self.min_values
else:
if __debug__:
_debug_component_min_values(min_values)
if kwargs:
raise TypeError(f'Unused or unsettable attributes: {kwargs!r}.')
new = object.__new__(type(self))
new.custom_id = custom_id
new.enabled = enabled
new.options = options
new.placeholder = placeholder
new.max_values = max_values
new.min_values = min_values
return new
@copy_docs(ComponentBase.__eq__)
def __eq__(self, other):
if type(other) is not type(self):
return NotImplemented
# custom_id
if self.custom_id != other.custom_id:
return False
# enabled
if self.enabled != other.enabled:
return False
# options
if self.options != other.options:
return False
# placeholder
if self.placeholder != other.placeholder:
return False
# max_values
if self.max_values != other.max_values:
return False
# min_values
if self.min_values != other.min_values:
return False
return True
@copy_docs(ComponentBase.__hash__)
def __hash__(self):
hash_value = self.type.value
# custom_id
hash_value ^= hash(self.custom_id)
# enabled
if self.enabled:
hash_value ^= 1 << 8
# options
options = self.options
if (options is not None):
hash_value ^= len(options) << 12
for option in options:
hash_value ^= hash(option)
# placeholder
placeholder = self.placeholder
if (placeholder is not None):
hash_value ^= hash(placeholder)
# max_values
max_values = self.max_values
if (max_values != 1):
hash_value ^= (max_values << 18)
# min_values
min_values = self.min_values
if (min_values != 1):
min_values ^= (min_values << 22)
return hash_value
@copy_docs(ComponentBase._iter_components)
def _iter_components(self):
yield self
options = self.options
if (options is not None):
for option in options:
yield from option._iter_components()
@copy_docs(ComponentBase._replace_direct_sub_components)
def _replace_direct_sub_components(self, relation):
options = self.options
if (options is not None):
self.options = tuple(relation.get(option, option) for option in options)
@copy_docs(ComponentBase._iter_direct_sub_components)
def _iter_direct_sub_components(self):
options = self.options
if (options is not None):
yield from options
|
py | 7dfc3c3e58b299da2923ec1a427fef234c3229b1 | import logging
import random
import string
import collections
import re
from pyzabbix import ZabbixAPI, ZabbixAPIException
class ZabbixConn(object):
"""
Zabbix connector class
Defines methods for managing Zabbix users and groups
"""
def __init__(self, config, ldap_conn):
self.ldap_conn = ldap_conn
self.server = config.zbx_server
self.username = config.zbx_username
self.password = config.zbx_password
self.auth = config.zbx_auth
self.dryrun = config.zbx_dryrun
self.nocheckcertificate = config.zbx_nocheckcertificate
self.ldap_groups = config.ldap_groups
self.ldap_media = config.ldap_media
self.media_opt = config.media_opt
self.media_description = config.media_description
self.user_opt = config.user_opt
if self.nocheckcertificate:
from requests.packages.urllib3 import disable_warnings
disable_warnings()
if config.ldap_wildcard_search:
self.ldap_groups = ldap_conn.get_groups_with_wildcard()
# Use logger to log information
self.logger = logging.getLogger()
if config.verbose:
self.logger.setLevel(logging.DEBUG)
else:
self.logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
# Log to stdout
ch = logging.StreamHandler()
if config.verbose:
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
self.logger.addHandler(ch) # Use logger to log information
# Log from pyzabbix
log = logging.getLogger('pyzabbix')
log.addHandler(ch)
if config.verbose:
log.setLevel(logging.DEBUG)
def connect(self):
"""
Establishes a connection to the Zabbix server
Raises:
SystemExit
"""
if self.auth == "webform":
self.conn = ZabbixAPI(self.server)
elif self.auth == "http":
self.conn = ZabbixAPI(self.server, use_authenticate=False)
self.conn.session.auth = (self.username, self.password)
else:
raise SystemExit('api auth method not implemented: %s' % self.conn.auth)
if self.nocheckcertificate:
self.conn.session.verify = False
try:
self.conn.login(self.username, self.password)
except ZabbixAPIException as e:
raise SystemExit('Cannot login to Zabbix server: %s' % e)
self.logger.info("Connected to Zabbix API Version %s" % self.conn.api_version())
def get_users(self):
"""
Retrieves the existing Zabbix users
Returns:
A list of the existing Zabbix users
"""
result = self.conn.user.get(output='extend')
users = [user['alias'] for user in result]
return users
def get_mediatype_id(self, description):
"""
Retrieves the mediatypeid by description
Args:
description (str): Zabbix media type description
Returns:
The mediatypeid for specified media type description
"""
result = self.conn.mediatype.get(filter={'description': description})
if result:
mediatypeid = result[0]['mediatypeid']
else:
mediatypeid = None
return mediatypeid
def get_user_id(self, user):
"""
Retrieves the userid of a specified user
Args:
user (str): The Zabbix username to lookup
Returns:
The userid of the specified user
"""
result = self.conn.user.get(output='extend')
userid = [u['userid'] for u in result if u['alias'] == user].pop()
return userid
def get_groups(self):
"""
Retrieves the existing Zabbix groups
Returns:
A dict of the existing Zabbix groups and their group ids
"""
result = self.conn.usergroup.get(status=0, output='extend')
groups = [{'name': group['name'], 'usrgrpid': group['usrgrpid']} for group in result]
return groups
def get_group_members(self, groupid):
"""
Retrieves group members for a Zabbix group
Args:
groupid (int): The group id
Returns:
A list of the Zabbix users for the specified group id
"""
result = self.conn.user.get(output='extend', usrgrpids=groupid)
users = [user['alias'] for user in result]
return users
def create_group(self, group):
"""
Creates a new Zabbix group
Args:
group (str): The Zabbix group name to create
Returns:
The groupid of the newly created group
"""
result = self.conn.usergroup.create(name=group)
groupid = result['usrgrpids'].pop()
return groupid
def create_user(self, user, groupid, user_opt):
"""
Creates a new Zabbix user
Args:
user (dict): A dict containing the user details
groupid (int): The groupid for the new user
user_opt (dict): User options
"""
random_passwd = ''.join(random.sample(string.ascii_letters + string.digits, 32))
user_defaults = {'autologin': 0, 'type': 1, 'usrgrps': [{'usrgrpid': str(groupid)}], 'passwd': random_passwd}
user_defaults.update(user_opt)
user.update(user_defaults)
result = self.conn.user.create(user)
return result
def delete_user(self, user):
"""
Deletes Zabbix user
Args:
user (string): Zabbix username
"""
userid = self.get_user_id(user)
result = self.conn.user.delete(userid)
return result
def update_user(self, user, groupid):
"""
Adds an existing Zabbix user to a group
Args:
user (dict): A dict containing the user details
groupid (int): The groupid to add the user to
"""
userid = self.get_user_id(user)
result = self.conn.usergroup.massadd(usrgrpids=[str(groupid)], userids=[str(userid)])
return result
def update_media(self, user, description, sendto, media_opt):
"""
Adds media to an existing Zabbix user
Args:
user (dict): A dict containing the user details
description (str): A string containing Zabbix media description
sendto (str): A string containing address, phone number, etc...
media_opt (dict): Media options
"""
userid = self.get_user_id(user)
mediatypeid = self.get_mediatype_id(description)
if mediatypeid:
media_defaults = {
'mediatypeid': mediatypeid,
'sendto': sendto,
'active': '0',
'severity': '63',
'period': '1-7,00:00-24:00'
}
media_defaults.update(media_opt)
self.delete_media_by_description(user, description)
result = self.conn.user.addmedia(users=[{"userid": str(userid)}], medias=media_defaults)
else:
result = None
return result
def delete_media_by_description(self, user, description):
"""
Remove all media from user (with specific mediatype)
Args:
user (dict): A dict containing the user details
description (str): A string containing Zabbix media description
"""
userid = self.get_user_id(user)
mediatypeid = self.get_mediatype_id(description)
if mediatypeid:
user_full = self.conn.user.get(output="extend", userids=userid, selectMedias=["mediatypeid", "mediaid"])
media_ids = [int(u['mediaid']) for u in user_full[0]['medias'] if u['mediatypeid'] == mediatypeid]
if media_ids:
self.logger.info('Remove other exist media from user %s (type=%s)' % (user, description))
for id in media_ids:
self.conn.user.deletemedia(id)
def create_missing_groups(self):
"""
Creates any missing LDAP groups in Zabbix
"""
missing_groups = set(self.ldap_groups) - set([g['name'] for g in self.get_groups()])
for eachGroup in missing_groups:
self.logger.info('Creating Zabbix group %s' % eachGroup)
if not self.dryrun:
grpid = self.create_group(eachGroup)
self.logger.info('Group %s created with groupid %s' % (eachGroup, grpid))
def convert_severity(self, severity):
converted_severity = severity.strip()
if re.match("\d+", converted_severity):
return converted_severity
sev_entries = collections.OrderedDict({
"Disaster": "0",
"High": "0",
"Average": "0",
"Warning": "0",
"Information": "0",
"Not Classified": "0",
})
for sev in converted_severity.split(","):
sev = sev.strip()
if sev not in sev_entries:
raise Exception("wrong argument: %s" % sev)
sev_entries[sev] = "1"
str_bitmask = ""
for sev, digit in sev_entries.items():
str_bitmask += digit
converted_severity = str(int(str_bitmask, 2))
self.logger.info('Converted severity "%s" to "%s"' % (severity, converted_severity))
return severity
def sync_users(self):
"""
Syncs Zabbix with LDAP users
"""
self.ldap_conn.connect()
zabbix_all_users = self.get_users()
for eachGroup in self.ldap_groups:
ldap_users = self.ldap_conn.get_group_members(eachGroup)
# Do nothing if LDAP group contains no users and "--delete-orphans" is not specified
if not ldap_users and not self.deleteorphans:
continue
zabbix_grpid = [g['usrgrpid'] for g in self.get_groups() if g['name'] == eachGroup].pop()
zabbix_group_users = self.get_group_members(zabbix_grpid)
missing_users = set(list(ldap_users.keys())) - set(zabbix_group_users)
# Add missing users
for eachUser in missing_users:
# Create new user if it does not exists already
if eachUser not in zabbix_all_users:
self.logger.info('Creating user "%s", member of Zabbix group "%s"' % (eachUser, eachGroup))
user = {'alias': eachUser}
user['name'] = self.ldap_conn.get_user_givenName(ldap_users[eachUser]).decode('utf8')
user['surname'] = self.ldap_conn.get_user_sn(ldap_users[eachUser]).decode('utf8')
if user['name'] is None:
user['name'] = ''
if user['surname'] is None:
user['surname'] = ''
self.create_user(user, zabbix_grpid, self.user_opt)
zabbix_all_users.append(eachUser)
else:
# Update existing user to be member of the group
self.logger.info('Updating user "%s", adding to group "%s"' % (eachUser, eachGroup))
self.update_user(eachUser, zabbix_grpid)
# Handle any extra users in the groups
extra_users = set(zabbix_group_users) - set(list(ldap_users.keys()))
if extra_users:
self.logger.info('Users in group %s which are not found in LDAP group:' % eachGroup)
for eachUser in extra_users:
if self.deleteorphans:
self.logger.info('Deleting user: "%s"' % eachUser)
if not self.dryrun:
self.delete_user(eachUser)
else:
self.logger.info(' * %s' % eachUser)
# update users media
onlycreate = False
media_opt_filtered = []
for elem in self.media_opt:
if elem[0] == "onlycreate" and elem[1].lower() == "true":
onlycreate = True
if elem[0] == "severity":
media_opt_filtered.append(
(elem[0], self.convert_severity(elem[1]))
)
else:
media_opt_filtered.append(elem)
if onlycreate:
self.logger.info("Add media only on newly created users for group >>>%s<<<" % eachGroup)
zabbix_group_users = missing_users
else:
self.logger.info("Update media on all users for group >>>%s<<<" % eachGroup)
zabbix_group_users = self.get_group_members(zabbix_grpid)
for eachUser in set(zabbix_group_users):
self.logger.info('>>> Updating/create user media for "%s", update "%s"' % (eachUser, self.media_description))
sendto = self.ldap_conn.get_user_media(ldap_users[eachUser], self.ldap_media).decode("utf8")
if sendto and not self.dryrun:
self.update_media(eachUser, self.media_description, sendto, media_opt_filtered)
self.ldap_conn.disconnect()
|
py | 7dfc3c552d5a1e7523726e336a6879d65c7c94cc | # coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Behavioral Cloning Agents.
Implements generic form of behavioral cloning.
Users must provide their own loss functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import gin
import tensorflow as tf
from tf_agents.agents import tf_agent
from tf_agents.policies import epsilon_greedy_policy
from tf_agents.policies import greedy_policy
from tf_agents.policies import q_policy
from tf_agents.utils import common
from tf_agents.utils import eager_utils
from tf_agents.utils import nest_utils
class BehavioralCloningLossInfo(collections.namedtuple(
'BehavioralCloningLossInfo', ('loss',))):
"""Stores a per-batch-entry loss value."""
pass
@gin.configurable
class BehavioralCloningAgent(tf_agent.TFAgent):
"""An behavioral cloning Agent.
Implements behavioral cloning, wherein the network learns to clone
given experience. Users must provide their own loss functions. Note this
implementation will use a QPolicy. To use with other policies subclass this
agent and override the `_get_policies` method. Note the cloning_network must
match the requirements of the generated policies.
Behavioral cloning was proposed in the following articles:
Pomerleau, D.A., 1991. Efficient training of artificial neural networks for
autonomous navigation. Neural Computation, 3(1), pp.88-97.
Russell, S., 1998, July. Learning agents for uncertain environments.
In Proceedings of the eleventh annual conference on Computational learning
theory (pp. 101-103). ACM.
"""
# TODO(b/127327645): This causes a loop failure when RNNs are enabled.
_enable_functions = False
def __init__(
self,
time_step_spec,
action_spec,
cloning_network,
optimizer,
epsilon_greedy=0.1,
# Params for training.
loss_fn=None,
gradient_clipping=None,
# Params for debugging
debug_summaries=False,
summarize_grads_and_vars=False,
train_step_counter=None,
name=None):
"""Creates an behavioral cloning Agent.
Args:
time_step_spec: A `TimeStep` spec of the expected time_steps.
action_spec: A nest of BoundedTensorSpec representing the actions.
cloning_network: A tf_agents.network.Network to be used by the agent.
The network will be called as
```
network(observation, step_type, network_state=None)
```
(with `network_state` optional) and must return a 2-tuple with elements
`(output, next_network_state)` where `output` will be passed as the
first argument to `loss_fn`, and used by a `Policy`. Input tensors will
be shaped `[batch, time, ...]` when training, and they will be shaped
`[batch, ...]` when the network is called within a `Policy`. If
`cloning_network` has an empty network state, then for training
`time` will always be `1` (individual examples).
optimizer: The optimizer to use for training.
epsilon_greedy: probability of choosing a random action in the default
epsilon-greedy collect policy (used only if a wrapper is not provided to
the collect_policy method).
loss_fn: A function for computing the error between the output of the
cloning network and the action that was taken. If None, the loss
depends on the action dtype. If the dtype is integer, then `loss_fn`
is
```python
def loss_fn(logits, action):
return tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=action - action_spec.minimum, logits=logits)
```
If the dtype is floating point, the loss is
`tf.math.squared_difference`.
`loss_fn` must return a loss value for each element of the batch.
gradient_clipping: Norm length to clip gradients.
debug_summaries: A bool to gather debug summaries.
summarize_grads_and_vars: If True, gradient and network variable summaries
will be written during training.
train_step_counter: An optional counter to increment every time the train
op is run. Defaults to the global_step.
name: The name of this agent. All variables in this module will fall
under that name. Defaults to the class name.
Raises:
NotImplementedError: If the action spec contains more than one action.
"""
tf.Module.__init__(self, name=name)
flat_action_spec = tf.nest.flatten(action_spec)
self._num_actions = [
spec.maximum - spec.minimum + 1 for spec in flat_action_spec
]
# TODO(oars): Get behavioral cloning working with more than one dim in
# the actions.
if len(flat_action_spec) > 1:
raise NotImplementedError(
'Multi-arity actions are not currently supported.')
if loss_fn is None:
loss_fn = self._get_default_loss_fn(flat_action_spec[0])
self._cloning_network = cloning_network
self._loss_fn = loss_fn
self._epsilon_greedy = epsilon_greedy
self._optimizer = optimizer
self._gradient_clipping = gradient_clipping
policy, collect_policy = self._get_policies(time_step_spec, action_spec,
cloning_network)
super(BehavioralCloningAgent, self).__init__(
time_step_spec,
action_spec,
policy,
collect_policy,
train_sequence_length=1 if not cloning_network.state_spec else None,
debug_summaries=debug_summaries,
summarize_grads_and_vars=summarize_grads_and_vars,
train_step_counter=train_step_counter)
def _get_default_loss_fn(self, spec):
if spec.dtype.is_floating:
return tf.math.squared_difference
if spec.shape.ndims > 1:
raise NotImplementedError(
'Only scalar and one dimensional integer actions are supported.')
# TODO(ebrevdo): Maybe move the subtraction of the minimum into a
# self._label_fn and rewrite this.
def xent_loss_fn(logits, actions):
# Subtract the minimum so that we get a proper cross entropy loss on
# [0, maximum - minimum).
return tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=actions - spec.minimum)
return xent_loss_fn
def _get_policies(self, time_step_spec, action_spec, cloning_network):
policy = q_policy.QPolicy(
time_step_spec, action_spec, q_network=self._cloning_network)
collect_policy = epsilon_greedy_policy.EpsilonGreedyPolicy(
policy, epsilon=self._epsilon_greedy)
policy = greedy_policy.GreedyPolicy(policy)
return policy, collect_policy
def _initialize(self):
return tf.no_op()
def _train(self, experience, weights=None):
loss_info = self._loss(experience, weights=weights)
transform_grads_fn = None
if self._gradient_clipping is not None:
transform_grads_fn = eager_utils.clip_gradient_norms_fn(
self._gradient_clipping)
loss_info = eager_utils.create_train_step(
loss_info,
self._optimizer,
total_loss_fn=lambda loss_info: loss_info.loss,
global_step=self.train_step_counter,
transform_grads_fn=transform_grads_fn,
summarize_gradients=self._summarize_grads_and_vars,
variables_to_train=lambda: self._cloning_network.trainable_weights,
)
return loss_info
@eager_utils.future_in_eager_mode
# TODO(b/79688437): Figure out how to enable defun for Eager mode.
# @tfe.defun
def _loss(self, experience, weights=None):
"""Computes loss for behavioral cloning.
Args:
experience: A `Trajectory` containing experience.
weights: Optional scalar or element-wise (per-batch-entry) importance
weights.
Returns:
loss: A `LossInfo` struct.
Raises:
ValueError:
If the number of actions is greater than 1.
"""
with tf.name_scope('loss'):
actions = tf.nest.flatten(experience.action)[0]
logits, _ = self._cloning_network(
experience.observation,
experience.step_type)
boundary_weights = tf.cast(~experience.is_boundary(), logits.dtype)
error = boundary_weights * self._loss_fn(logits, actions)
if nest_utils.is_batched_nested_tensors(
experience.action, self.action_spec, num_outer_dims=2):
# Do a sum over the time dimension.
error = tf.reduce_sum(input_tensor=error, axis=1)
# Average across the elements of the batch.
# Note: We use an element wise loss above to ensure each element is always
# weighted by 1/N where N is the batch size, even when some of the
# weights are zero due to boundary transitions. Weighting by 1/K where K
# is the actual number of non-zero weight would artificially increase
# their contribution in the loss. Think about what would happen as
# the number of boundary samples increases.
if weights is not None:
error *= weights
loss = tf.reduce_mean(input_tensor=error)
with tf.name_scope('Losses/'):
tf.compat.v2.summary.scalar(
name='loss', data=loss, step=self.train_step_counter)
if self._summarize_grads_and_vars:
with tf.name_scope('Variables/'):
for var in self._cloning_network.trainable_weights:
tf.compat.v2.summary.histogram(
name=var.name.replace(':', '_'),
data=var,
step=self.train_step_counter)
if self._debug_summaries:
common.generate_tensor_summaries('errors', error,
self.train_step_counter)
return tf_agent.LossInfo(loss, BehavioralCloningLossInfo(loss=error))
|
py | 7dfc3e1696dbe3e28626d5599dbd134869e6f28d | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2014, Nicolas P. Rougier. All rights reserved.
# Distributed under the terms of the new BSD License.
# -----------------------------------------------------------------------------
"""
Very simple transformation library that is needed for some examples.
Notes
-----
Functions that take a matrix as input generally operate on that matrix in
place.
"""
# This file is copied from glumpy[https://github.com/glumpy/glumpy]
# some functions add by myself
# Note: we use functions from math module because they're faster on scalars
import math
import numpy as np
def normalize(x):
if isinstance(x, float):
return -1 if x < 0 else 1
elif len(x) > 1:
sqr = math.sqrt(np.sum(x*x))
return x / sqr
def translate(M, x, y=None, z=None):
"""Translate by an offset (x, y, z) .
Parameters
----------
M : array
Original transformation (4x4).
x : float
X coordinate of a translation vector.
y : float | None
Y coordinate of translation vector. If None, `x` will be used.
z : float | None
Z coordinate of translation vector. If None, `x` will be used.
Returns
-------
M : array
Updated transformation (4x4). Note that this function operates
in-place.
"""
y = x if y is None else y
z = x if z is None else z
T = np.array([[1.0, 0.0, 0.0, x],
[0.0, 1.0, 0.0, y],
[0.0, 0.0, 1.0, z],
[0.0, 0.0, 0.0, 1.0]], dtype=M.dtype).T
M[...] = np.dot(M, T)
return M
def translation(x, y=None, z=None):
"""Translate by an offset (x, y, z) .
Parameters
----------
x : float
X coordinate of a translation vector.
y : float | None
Y coordinate of translation vector. If None, `x` will be used.
z : float | None
Z coordinate of translation vector. If None, `x` will be used.
Returns
-------
M : array
Translation matrix
"""
M = np.eye(4, dtype=np.float32)
return translate(M,x,y,z)
def scale(M, x, y=None, z=None):
"""Non-uniform scaling along the x, y, and z axes
Parameters
----------
M : array
Original transformation (4x4).
x : float
X coordinate of the translation vector.
y : float | None
Y coordinate of the translation vector. If None, `x` will be used.
z : float | None
Z coordinate of the translation vector. If None, `x` will be used.
Returns
-------
M : array
Updated transformation (4x4). Note that this function operates
in-place.
"""
y = x if y is None else y
z = x if z is None else z
S = np.array([[x, 0.0, 0.0, 0.0],
[0.0, y, 0.0, 0.0],
[0.0, 0.0, z, 0.0],
[0.0, 0.0, 0.0, 1.0]], dtype=M.dtype).T
M[...] = np.dot(M, S)
return M
def xrotate(M, theta):
"""Rotate about the X axis
Parameters
----------
M : array
Original transformation (4x4).
theta : float
Specifies the angle of rotation, in degrees.
Returns
-------
M : array
Updated transformation (4x4). Note that this function operates
in-place.
"""
t = math.pi * theta / 180.
cosT = math.cos(t)
sinT = math.sin(t)
R = np.array([[1.0, 0.0, 0.0, 0.0],
[0.0, cosT, -sinT, 0.0],
[0.0, sinT, cosT, 0.0],
[0.0, 0.0, 0.0, 1.0]], dtype=M.dtype)
M[...] = np.dot(M, R)
return M
def yrotate(M, theta):
"""Rotate about the Y axis
Parameters
----------
M : array
Original transformation (4x4).
theta : float
Specifies the angle of rotation, in degrees.
Returns
-------
M : array
Updated transformation (4x4). Note that this function operates
in-place.
"""
t = math.pi * theta / 180
cosT = math.cos(t)
sinT = math.sin(t)
R = np.array(
[[cosT, 0.0, sinT, 0.0],
[0.0, 1.0, 0.0, 0.0],
[-sinT, 0.0, cosT, 0.0],
[0.0, 0.0, 0.0, 1.0]], dtype=M.dtype)
M[...] = np.dot(M, R)
return M
def zrotate(M, theta):
"""Rotate about the Z axis
Parameters
----------
M : array
Original transformation (4x4).
theta : float
Specifies the angle of rotation, in degrees.
Returns
-------
M : array
Updated transformation (4x4). Note that this function operates
in-place.
"""
t = math.pi * theta / 180
cosT = math.cos(t)
sinT = math.sin(t)
R = np.array(
[[cosT, -sinT, 0.0, 0.0],
[sinT, cosT, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0]], dtype=M.dtype)
M[...] = np.dot(M, R)
return M
def rotate(M, angle, x, y, z, point=None):
"""Rotation about a vector
Parameters
----------
M : array
Original transformation (4x4).
angle : float
Specifies the angle of rotation, in degrees.
x : float
X coordinate of the angle of rotation vector.
y : float | None
Y coordinate of the angle of rotation vector.
z : float | None
Z coordinate of the angle of rotation vector.
Returns
-------
M : array
Updated transformation (4x4). Note that this function operates
in-place.
"""
angle = math.pi * angle / 180
c, s = math.cos(angle), math.sin(angle)
n = math.sqrt(x * x + y * y + z * z)
x /= n
y /= n
z /= n
cx, cy, cz = (1 - c) * x, (1 - c) * y, (1 - c) * z
R = np.array([[cx * x + c, cy * x - z * s, cz * x + y * s, 0],
[cx * y + z * s, cy * y + c, cz * y - x * s, 0],
[cx * z - y * s, cy * z + x * s, cz * z + c, 0],
[0, 0, 0, 1]], dtype=M.dtype).T
M[...] = np.dot(M, R)
return M
def ortho(left, right, bottom, top, znear, zfar):
"""Create orthographic projection matrix
Parameters
----------
left : float
Left coordinate of the field of view.
right : float
Right coordinate of the field of view.
bottom : float
Bottom coordinate of the field of view.
top : float
Top coordinate of the field of view.
znear : float
Near coordinate of the field of view.
zfar : float
Far coordinate of the field of view.
Returns
-------
M : array
Orthographic projection matrix (4x4).
"""
assert(right != left)
assert(bottom != top)
assert(znear != zfar)
M = np.zeros((4, 4), dtype=np.float32)
M[0, 0] = +2.0 / (right - left)
M[3, 0] = -(right + left) / float(right - left)
M[1, 1] = +2.0 / (top - bottom)
M[3, 1] = -(top + bottom) / float(top - bottom)
M[2, 2] = -2.0 / (zfar - znear)
M[3, 2] = -(zfar + znear) / float(zfar - znear)
M[3, 3] = 1.0
return M
def frustum(left, right, bottom, top, znear, zfar):
"""Create view frustum
Parameters
----------
left : float
Left coordinate of the field of view.
right : float
Right coordinate of the field of view.
bottom : float
Bottom coordinate of the field of view.
top : float
Top coordinate of the field of view.
znear : float
Near coordinate of the field of view.
zfar : float
Far coordinate of the field of view.
Returns
-------
M : array
View frustum matrix (4x4).
"""
assert(right != left)
assert(bottom != top)
assert(znear != zfar)
M = np.zeros((4, 4), dtype=np.float32)
M[0, 0] = +2.0 * znear / (right - left)
M[2, 0] = (right + left) / (right - left)
M[1, 1] = +2.0 * znear / (top - bottom)
M[3, 1] = (top + bottom) / (top - bottom)
M[2, 2] = -(zfar + znear) / (zfar - znear)
M[3, 2] = -2.0 * znear * zfar / (zfar - znear)
M[2, 3] = -1.0
return M
def perspective(fovy, aspect, znear, zfar):
"""Create perspective projection matrix
Parameters
----------
fovy : float
The field of view along the y axis.
aspect : float
Aspect ratio of the view.
znear : float
Near coordinate of the field of view.
zfar : float
Far coordinate of the field of view.
Returns
-------
M : array
Perspective projection matrix (4x4).
"""
assert(znear != zfar)
h = math.tan(fovy / 360.0 * math.pi) * znear
w = h * aspect
return frustum(-w, w, -h, h, znear, zfar)
def lookAt(eye, center, up):
"""
"""
f = normalize(center - eye)
s = normalize(np.cross(f, up))
u = np.cross(s, f)
result = np.identity(4, np.float32)
result[:,0][:3] = s
result[:,1][:3] = u
result[:,2][:3] = -f
result[3][0] = -np.dot(s, eye)
result[3][1] = -np.dot(u, eye)
result[3][2] = np.dot(f, eye)
return result |
py | 7dfc3e5ca8a4da19cadc8c9f109111f054981f6b | """
setup.py
Stripe Payments Demo. Created by Adrienne Dreyfus (@adrind).
This is a one-time setup script for your server. It creates a set of fixtures,
namely products and SKUs, that can then used to calculate payment amounts when completing the
checkout flow in the web interface.
"""
import stripe
import os
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
stripe.api_key = os.getenv('STRIPE_SECRET_KEY')
stripe.api_version = '2019-03-14'
def create_data():
try:
products = [{'id': 'increment', 'type': 'good', 'name': 'Increment Magazine', 'attributes': ['issue']},
{'id': 'pins', 'type': 'good',
'name': 'Stripe Pins', 'attributes': ['set']},
{'id': 'shirt', 'type': 'good', 'name': 'Stripe Shirt', 'attributes': ['size', 'gender']}]
for product in products:
stripe.Product.create(**product)
skus = [{'id': 'increment-03', 'product': 'increment', 'attributes': {'issue': 'Issue #3 “Development”'},
'price': 399, 'currency': 'usd', 'inventory': {'type': 'infinite'}},
{'id': 'shirt-small-woman', 'product': 'shirt',
'attributes': {'size': 'Small Standard', 'gender': 'Woman'},
'price': 999, 'currency': 'usd', 'inventory': {'type': 'infinite'}},
{'id': 'pins-collector', 'product': 'pins', 'attributes': {'set': 'Collector Set'},
'price': 799, 'currency': 'usd', 'inventory': {'type': 'finite', 'quantity': 500}}]
for sku in skus:
stripe.SKU.create(**sku)
except stripe.InvalidRequestError as e:
print('Products already exist', e)
|
py | 7dfc3f9b299b7d6f68e39867194fbb5a9cedcf3f | import numpy as np
from openrec.tf1.legacy.utils.evaluators import Evaluator
class Precision(Evaluator):
def __init__(self, precision_at, name='Precision'):
self._precision_at = np.array(precision_at)
super(Precision, self).__init__(etype='rank', name=name)
def compute(self, rank_above, negative_num):
del negative_num
results = np.zeros(len(self._precision_at))
for rank in rank_above:
results += (rank <= self._precision_at).astype(np.float32)
return results / self._precision_at
|
py | 7dfc41227fdf918d642964f7d3b9a2d90fe20a18 | #
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from bigdl.util.common import JavaValue
from bigdl.util.common import callBigDlFunc
from bigdl.util.common import JTensor
from bigdl.nn.layer import Layer
import numpy as np
if sys.version >= '3':
long = int
unicode = str
class Criterion(JavaValue):
"""
Criterion is helpful to train a neural network.
Given an input and a target, they compute a gradient according to a given loss function.
"""
def __init__(self, jvalue, bigdl_type, *args):
self.value = jvalue if jvalue else callBigDlFunc(
bigdl_type, JavaValue.jvm_class_constructor(self), *args)
self.bigdl_type = bigdl_type
def __str__(self):
return self.value.toString()
def forward(self, input, target):
"""
NB: It's for debug only, please use optimizer.optimize() in production.
Takes an input object, and computes the corresponding loss of the criterion,
compared with `target`
:param input: ndarray or list of ndarray
:param target: ndarray or list of ndarray
:return: value of loss
"""
jinput, input_is_table = Layer.check_input(input)
jtarget, target_is_table = Layer.check_input(target)
output = callBigDlFunc(self.bigdl_type,
"criterionForward",
self.value,
jinput,
input_is_table,
jtarget,
target_is_table)
return output
def backward(self, input, target):
"""
NB: It's for debug only, please use optimizer.optimize() in production.
Performs a back-propagation step through the criterion, with respect to the given input.
:param input: ndarray or list of ndarray
:param target: ndarray or list of ndarray
:return: ndarray
"""
jinput, input_is_table = Layer.check_input(input)
jtarget, target_is_table = Layer.check_input(target)
output = callBigDlFunc(self.bigdl_type,
"criterionBackward",
self.value,
jinput,
input_is_table,
jtarget,
target_is_table)
return Layer.convert_output(output)
@classmethod
def of(cls, jcriterion, bigdl_type="float"):
"""
Create a python Criterion by a java criterion object
:param jcriterion: A java criterion object which created by Py4j
:return: a criterion.
"""
criterion = Criterion(bigdl_type, jcriterion)
criterion.value = jcriterion
criterion.bigdl_type = bigdl_type
return criterion
class ClassNLLCriterion(Criterion):
'''
The negative log likelihood criterion. It is useful to train a classification problem with n
classes. If provided, the optional argument weights should be a 1D Tensor assigning weight to
each of the classes. This is particularly useful when you have an unbalanced training set.
The input given through a forward() is expected to contain log-probabilities/probabilities of
each class: input has to be a 1D Tensor of size n. Obtaining log-probabilities/probabilities
in a neural network is easily achieved by adding a LogSoftMax/SoftMax layer in the last layer
of your neural network. You may use CrossEntropyCriterion instead, if you prefer not to add an
extra layer to your network. This criterion expects a class index (1 to the number of class) as
target when calling forward(input, target) and backward(input, target).
In the log-probabilities case,
The loss can be described as:
loss(x, class) = -x[class]
or in the case of the weights argument it is specified as follows:
loss(x, class) = -weights[class] * x[class]
Due to the behaviour of the backend code, it is necessary to set sizeAverage to false when
calculating losses in non-batch mode.
Note that if the target is `-1`, the training process will skip this sample.
In other will, the forward process will return zero output and the backward process
will also return zero `gradInput`.
By default, the losses are averaged over observations for each minibatch. However, if the field
sizeAverage is set to false, the losses are instead summed for each minibatch.
In particular, when weights=None, size_average=True and logProbAsInput=False, this is same as
`sparse_categorical_crossentropy` loss in keras.
:param weights: weights of each class
:param size_average: whether to average or not
:param logProbAsInput: indicating whether to accept log-probabilities or probabilities as input.
>>> np.random.seed(123)
>>> weights = np.random.uniform(0, 1, (2,)).astype("float32")
>>> classNLLCriterion = ClassNLLCriterion(weights, True, True)
creating: createClassNLLCriterion
>>> classNLLCriterion = ClassNLLCriterion()
creating: createClassNLLCriterion
'''
def __init__(self,
weights=None,
size_average=True,
logProbAsInput=True,
bigdl_type="float"):
super(ClassNLLCriterion, self).__init__(None, bigdl_type,
JTensor.from_ndarray(weights),
size_average, logProbAsInput)
class MSECriterion(Criterion):
'''
Creates a criterion that measures the mean squared error between n elements
in the input x and output y:
```
loss(x, y) = 1/n \sum |x_i - y_i|^2
```
If x and y are d-dimensional Tensors with a total of n elements,
the sum operation still operates over all the elements, and divides by n.
The two Tensors must have the same number of elements (but their sizes might be different).
The division by n can be avoided if one sets the internal variable sizeAverage to false.
By default, the losses are averaged over observations for each minibatch. However,
if the field sizeAverage is set to false, the losses are instead summed.
>>> mSECriterion = MSECriterion()
creating: createMSECriterion
'''
def __init__(self, bigdl_type="float"):
super(MSECriterion, self).__init__(None, bigdl_type)
class AbsCriterion(Criterion):
'''
measures the mean absolute value of the element-wise difference between input
>>> absCriterion = AbsCriterion(True)
creating: createAbsCriterion
'''
def __init__(self,
size_average=True,
bigdl_type="float"):
super(AbsCriterion, self).__init__(None, bigdl_type,
size_average)
class ClassSimplexCriterion(Criterion):
'''
ClassSimplexCriterion implements a criterion for classification.
It learns an embedding per class, where each class' embedding is a
point on an (N-1)-dimensional simplex, where N is the number of classes.
:param nClasses: the number of classes.
>>> classSimplexCriterion = ClassSimplexCriterion(2)
creating: createClassSimplexCriterion
'''
def __init__(self,
n_classes,
bigdl_type="float"):
super(ClassSimplexCriterion, self).__init__(None, bigdl_type,
n_classes)
class CosineDistanceCriterion(Criterion):
"""
Creates a criterion that measures the loss given an input and target,
Loss = 1 - cos(x, y)
>>> cosineDistanceCriterion = CosineDistanceCriterion(True)
creating: createCosineDistanceCriterion
>>> cosineDistanceCriterion.forward(np.array([1.0, 2.0, 3.0, 4.0, 5.0]),
... np.array([5.0, 4.0, 3.0, 2.0, 1.0]))
0.07272728
"""
def __init__(self,
size_average=True,
bigdl_type="float"):
super(CosineDistanceCriterion, self).__init__(None, bigdl_type,
size_average)
class CosineEmbeddingCriterion(Criterion):
"""
Creates a criterion that measures the loss given an input x = {x1, x2},
a table of two Tensors, and a Tensor label y with values 1 or -1.
:param margin: a number from -1 to 1, 0 to 0.5 is suggested
>>> cosineEmbeddingCriterion = CosineEmbeddingCriterion(1e-5, True)
creating: createCosineEmbeddingCriterion
>>> cosineEmbeddingCriterion.forward([np.array([1.0, 2.0, 3.0, 4.0, 5.0]),
... np.array([5.0, 4.0, 3.0, 2.0, 1.0])],
... [np.ones(5)])
0.0
"""
def __init__(self,
margin=0.0,
size_average=True,
bigdl_type="float"):
super(CosineEmbeddingCriterion, self).__init__(None, bigdl_type,
margin,
size_average)
class DistKLDivCriterion(Criterion):
'''
The Kullback-Leibler divergence criterion
:param sizeAverage:
>>> distKLDivCriterion = DistKLDivCriterion(True)
creating: createDistKLDivCriterion
'''
def __init__(self,
size_average=True,
bigdl_type="float"):
super(DistKLDivCriterion, self).__init__(None, bigdl_type,
size_average)
class HingeEmbeddingCriterion(Criterion):
'''
Creates a criterion that measures the loss given an
input x which is a 1-dimensional vector and a label y (1 or -1).
This is usually used for measuring whether two inputs are similar
or dissimilar,
e.g. using the L1 pairwise distance, and is typically used for
learning nonlinear embeddings or semi-supervised learning.
If x and y are n-dimensional Tensors, the sum operation still operates
over all the elements, and divides by n (this can be avoided if one sets
the internal variable sizeAverage to false). The margin has a default
value of 1, or can be set in the constructor.
>>> hingeEmbeddingCriterion = HingeEmbeddingCriterion(1e-5, True)
creating: createHingeEmbeddingCriterion
'''
def __init__(self,
margin=1.0,
size_average=True,
bigdl_type="float"):
super(HingeEmbeddingCriterion, self).__init__(None, bigdl_type,
margin,
size_average)
class L1HingeEmbeddingCriterion(Criterion):
'''
Creates a criterion that measures the loss given an input x = {x1, x2},
a table of two Tensors, and a label y (1 or -1):
:param margin:
>>> l1HingeEmbeddingCriterion = L1HingeEmbeddingCriterion(1e-5)
creating: createL1HingeEmbeddingCriterion
>>> l1HingeEmbeddingCriterion = L1HingeEmbeddingCriterion()
creating: createL1HingeEmbeddingCriterion
>>> input1 = np.array([2.1, -2.2])
>>> input2 = np.array([-0.55, 0.298])
>>> input = [input1, input2]
>>> target = np.array([1.0])
>>> result = l1HingeEmbeddingCriterion.forward(input, target)
>>> (result == 5.148)
True
'''
def __init__(self,
margin=1.0,
bigdl_type="float"):
super(L1HingeEmbeddingCriterion, self).__init__(None, bigdl_type,
margin)
class MarginCriterion(Criterion):
'''
Creates a criterion that optimizes a two-class classification hinge loss (margin-based loss)
between input x (a Tensor of dimension 1) and output y.
When margin = 1, size_average = True and squared = False, this is the same as hinge loss in keras;
When margin = 1, size_average = False and squared = True, this is the same as squared_hinge loss in keras.
:param margin: if unspecified, is by default 1.
:param size_average: size average in a mini-batch
:param squared: whether to calculate the squared hinge loss
>>> marginCriterion = MarginCriterion(1e-5, True, False)
creating: createMarginCriterion
'''
def __init__(self,
margin=1.0,
size_average=True,
squared=False,
bigdl_type="float"):
super(MarginCriterion, self).__init__(None, bigdl_type,
margin,
size_average,
squared)
class MarginRankingCriterion(Criterion):
'''
Creates a criterion that measures the loss given an input x = {x1, x2},
a table of two Tensors of size 1 (they contain only scalars), and a label y (1 or -1).
In batch mode, x is a table of two Tensors of size batchsize, and y is a Tensor of size
batchsize containing 1 or -1 for each corresponding pair of elements in the input Tensor.
If y == 1 then it assumed the first input should be ranked higher (have a larger value) than
the second input, and vice-versa for y == -1.
:param margin:
>>> marginRankingCriterion = MarginRankingCriterion(1e-5, True)
creating: createMarginRankingCriterion
'''
def __init__(self,
margin=1.0,
size_average=True,
bigdl_type="float"):
super(MarginRankingCriterion, self).__init__(None, bigdl_type,
margin,
size_average)
class MultiCriterion(Criterion):
'''
a weighted sum of other criterions each applied to the same input and target
>>> multiCriterion = MultiCriterion()
creating: createMultiCriterion
>>> mSECriterion = MSECriterion()
creating: createMSECriterion
>>> multiCriterion = multiCriterion.add(mSECriterion)
>>> multiCriterion = multiCriterion.add(mSECriterion)
'''
def __init__(self,
bigdl_type="float"):
super(MultiCriterion, self).__init__(None, bigdl_type)
def add(self, criterion, weight=1.0):
self.value.add(criterion.value, weight)
return self
class MultiLabelMarginCriterion(Criterion):
'''
Creates a criterion that optimizes a multi-class multi-classification hinge loss (
margin-based loss) between input x and output y (which is a Tensor of target class indices)
:param size_average: size average in a mini-batch
>>> multiLabelMarginCriterion = MultiLabelMarginCriterion(True)
creating: createMultiLabelMarginCriterion
'''
def __init__(self,
size_average=True,
bigdl_type="float"):
super(MultiLabelMarginCriterion, self).__init__(None, bigdl_type,
size_average)
class ParallelCriterion(Criterion):
'''
ParallelCriterion is a weighted sum of other criterions each applied to a different input
and target. Set repeatTarget = true to share the target for criterions.
Use add(criterion[, weight]) method to add criterion. Where weight is a scalar(default 1).
:param repeat_target: Whether to share the target for all criterions.
>>> parallelCriterion = ParallelCriterion(True)
creating: createParallelCriterion
>>> mSECriterion = MSECriterion()
creating: createMSECriterion
>>> parallelCriterion = parallelCriterion.add(mSECriterion)
>>> parallelCriterion = parallelCriterion.add(mSECriterion)
'''
def __init__(self,
repeat_target=False,
bigdl_type="float"):
super(ParallelCriterion, self).__init__(None, bigdl_type,
repeat_target)
def add(self, criterion, weight=1.0):
self.value.add(criterion.value, weight)
return self
class KLDCriterion(Criterion):
'''
Computes the KL-divergence of the Gaussian distribution.
>>> KLDCriterion = KLDCriterion()
creating: createKLDCriterion
'''
def __init__(self, bigdl_type="float"):
super(KLDCriterion, self).__init__(None, bigdl_type)
class GaussianCriterion(Criterion):
'''
Computes the log-likelihood of a sample x given a Gaussian distribution p.
>>> GaussianCriterion = GaussianCriterion()
creating: createGaussianCriterion
'''
def __init__(self, bigdl_type="float"):
super(GaussianCriterion, self).__init__(None, bigdl_type)
class SmoothL1Criterion(Criterion):
'''
Creates a criterion that can be thought of as a smooth version of the AbsCriterion.
It uses a squared term if the absolute element-wise error falls below 1.
It is less sensitive to outliers than the MSECriterion and in some
cases prevents exploding gradients (e.g. see "Fast R-CNN" paper by Ross Girshick).
```
| 0.5 * (x_i - y_i)^2^, if |x_i - y_i| < 1
loss(x, y) = 1/n \sum |
| |x_i - y_i| - 0.5, otherwise
```
If x and y are d-dimensional Tensors with a total of n elements,
the sum operation still operates over all the elements, and divides by n.
The division by n can be avoided if one sets the internal variable sizeAverage to false
:param size_average: whether to average the loss
>>> smoothL1Criterion = SmoothL1Criterion(True)
creating: createSmoothL1Criterion
'''
def __init__(self,
size_average=True,
bigdl_type="float"):
super(SmoothL1Criterion, self).__init__(None, bigdl_type,
size_average)
class SmoothL1CriterionWithWeights(Criterion):
'''
a smooth version of the AbsCriterion
It uses a squared term if the absolute element-wise error falls below 1.
It is less sensitive to outliers than the MSECriterion and in some cases
prevents exploding gradients (e.g. see "Fast R-CNN" paper by Ross Girshick).
```
d = (x - y) * w_in
loss(x, y, w_in, w_out)
| 0.5 * (sigma * d_i)^2 * w_out if |d_i| < 1 / sigma / sigma
= 1/n \sum |
| (|d_i| - 0.5 / sigma / sigma) * w_out otherwise
```
>>> smoothL1CriterionWithWeights = SmoothL1CriterionWithWeights(1e-5, 1)
creating: createSmoothL1CriterionWithWeights
'''
def __init__(self,
sigma,
num=0,
bigdl_type="float"):
super(SmoothL1CriterionWithWeights, self).__init__(None, bigdl_type,
sigma,
num)
class SoftmaxWithCriterion(Criterion):
'''
Computes the multinomial logistic loss for a one-of-many classification task,
passing real-valued predictions through a softmax to get a probability distribution over classes.
It should be preferred over separate SoftmaxLayer + MultinomialLogisticLossLayer
as its gradient computation is more numerically stable.
:param ignoreLabel: (optional) Specify a label value thatshould be ignored when computing the loss.
:param normalizeMode: How to normalize the output loss.
>>> softmaxWithCriterion = SoftmaxWithCriterion()
creating: createSoftmaxWithCriterion
>>> softmaxWithCriterion = SoftmaxWithCriterion(1, "FULL")
creating: createSoftmaxWithCriterion
'''
def __init__(self,
ignore_label=None,
normalize_mode="VALID",
bigdl_type="float"):
super(SoftmaxWithCriterion, self).__init__(None, bigdl_type,
ignore_label,
normalize_mode)
class TimeDistributedCriterion(Criterion):
'''
This class is intended to support inputs with 3 or more dimensions.
Apply Any Provided Criterion to every temporal slice of an input.
:param criterion: embedded criterion
:param size_average: whether to divide the sequence length
>>> td = TimeDistributedCriterion(ClassNLLCriterion())
creating: createClassNLLCriterion
creating: createTimeDistributedCriterion
'''
def __init__(self, criterion, size_average=False, bigdl_type="float"):
super(TimeDistributedCriterion, self).__init__(
None, bigdl_type, criterion, size_average)
class CrossEntropyCriterion(Criterion):
"""
This criterion combines LogSoftMax and ClassNLLCriterion in one single class.
:param weights: A tensor assigning weight to each of the classes
>>> np.random.seed(123)
>>> weights = np.random.uniform(0, 1, (2,)).astype("float32")
>>> cec = CrossEntropyCriterion(weights)
creating: createCrossEntropyCriterion
>>> cec = CrossEntropyCriterion()
creating: createCrossEntropyCriterion
"""
def __init__(self,
weights=None,
size_average=True,
bigdl_type="float"):
super(CrossEntropyCriterion, self).__init__(None, bigdl_type,
JTensor.from_ndarray(
weights),
size_average)
class BCECriterion(Criterion):
'''
Creates a criterion that measures the Binary Cross Entropy
between the target and the output
:param weights: weights for each class
:param sizeAverage: whether to average the loss or not
>>> np.random.seed(123)
>>> weights = np.random.uniform(0, 1, (2,)).astype("float32")
>>> bCECriterion = BCECriterion(weights)
creating: createBCECriterion
>>> bCECriterion = BCECriterion()
creating: createBCECriterion
'''
def __init__(self,
weights=None,
size_average=True,
bigdl_type="float"):
super(BCECriterion, self).__init__(None, bigdl_type,
JTensor.from_ndarray(weights),
size_average)
class MultiLabelSoftMarginCriterion(Criterion):
'''
A MultiLabel multiclass criterion based on sigmoid:
the loss is:
```
l(x,y) = - sum_i y[i] * log(p[i]) + (1 - y[i]) * log (1 - p[i])
```
where p[i] = exp(x[i]) / (1 + exp(x[i]))
and with weights:
```
l(x,y) = - sum_i weights[i] (y[i] * log(p[i]) + (1 - y[i]) * log (1 - p[i]))
```
>>> np.random.seed(123)
>>> weights = np.random.uniform(0, 1, (2,)).astype("float32")
>>> multiLabelSoftMarginCriterion = MultiLabelSoftMarginCriterion(weights)
creating: createMultiLabelSoftMarginCriterion
>>> multiLabelSoftMarginCriterion = MultiLabelSoftMarginCriterion()
creating: createMultiLabelSoftMarginCriterion
'''
def __init__(self,
weights=None,
size_average=True,
bigdl_type="float"):
super(MultiLabelSoftMarginCriterion, self).__init__(None, bigdl_type,
JTensor.from_ndarray(weights),
size_average)
class MultiMarginCriterion(Criterion):
'''
Creates a criterion that optimizes a multi-class classification hinge loss (margin-based loss)
between input x and output y (which is a target class index).
:param p:
:param weights:
:param margin:
:param size_average:
>>> np.random.seed(123)
>>> weights = np.random.uniform(0, 1, (2,)).astype("float32")
>>> multiMarginCriterion = MultiMarginCriterion(1,weights)
creating: createMultiMarginCriterion
>>> multiMarginCriterion = MultiMarginCriterion()
creating: createMultiMarginCriterion
'''
def __init__(self,
p=1,
weights=None,
margin=1.0,
size_average=True,
bigdl_type="float"):
super(MultiMarginCriterion, self).__init__(None, bigdl_type,
p,
JTensor.from_ndarray(weights),
margin,
size_average)
class SoftMarginCriterion(Criterion):
"""
Creates a criterion that optimizes a two-class classification logistic loss
between input x (a Tensor of dimension 1) and output y (which is a tensor
containing either 1s or -1s).
```
loss(x, y) = sum_i (log(1 + exp(-y[i]*x[i]))) / x:nElement()
```
:param sizeaverage: The normalization by the number of elements in the inputcan be disabled by setting
>>> softMarginCriterion = SoftMarginCriterion(False)
creating: createSoftMarginCriterion
>>> softMarginCriterion = SoftMarginCriterion()
creating: createSoftMarginCriterion
"""
def __init__(self,
size_average=True,
bigdl_type="float"):
super(SoftMarginCriterion, self).__init__(None, bigdl_type, size_average)
class DiceCoefficientCriterion(Criterion):
'''
The Dice-Coefficient criterion
input: Tensor,target: Tensor
```
return: 2 * (input intersection target)
1 - ----------------------------------
input union target
```
>>> diceCoefficientCriterion = DiceCoefficientCriterion(size_average = True, epsilon = 1.0)
creating: createDiceCoefficientCriterion
>>> diceCoefficientCriterion = DiceCoefficientCriterion()
creating: createDiceCoefficientCriterion
'''
def __init__(self,
size_average=True,
epsilon=1.0,
bigdl_type="float"):
super(DiceCoefficientCriterion, self).__init__(None, bigdl_type,
size_average,
epsilon)
class L1Cost(Criterion):
'''
compute L1 norm for input, and sign of input
>>> l1Cost = L1Cost()
creating: createL1Cost
'''
def __init__(self,
bigdl_type="float"):
super(L1Cost, self).__init__(None, bigdl_type)
class CosineProximityCriterion(Criterion):
'''
compute the negative of the mean cosine proximity between predictions and targets.
```
x'(i) = x(i) / sqrt(max(sum(x(i)^2), 1e-12))
y'(i) = y(i) / sqrt(max(sum(x(i)^2), 1e-12))
cosine_proximity(x, y) = sum_i(-1 * x'(i) * y'(i))
```
>>> cosineProximityCriterion = CosineProximityCriterion()
creating: createCosineProximityCriterion
'''
def __init__(self,
bigdl_type="float"):
super(CosineProximityCriterion, self).__init__(None, bigdl_type)
def _test():
import doctest
from pyspark import SparkContext
from bigdl.nn import criterion
from bigdl.util.common import init_engine
from bigdl.util.common import create_spark_conf
globs = criterion.__dict__.copy()
sc = SparkContext(master="local[4]", appName="test criterion",
conf=create_spark_conf())
globs['sc'] = sc
init_engine()
(failure_count, test_count) = doctest.testmod(globs=globs,
optionflags=doctest.ELLIPSIS)
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
py | 7dfc412b6202ea5b8604de6e69690a980846794c | """
Code for generating an ensemble submission for the SeeClickFix contest hosted on Kaggle. It loads base
submission files generated by our team's (Bryan Gregory and Miroslaw Horbal) individual models then combines them
using segment based averaging.
Ensemble weights for each segment of data are stored in SETTINGS.json along with the filepaths for the base input
submission files. Note that base submission 0 corresponds to Bryan's model and base submission 1 is Miroslaw's model.
This relies on already generated submission files from the base models, so base models must be run prior to performing
the ensemble.
Requires: PANDAS >.13
NUMPY
"""
__author__ = ['Bryan Gregory','Miroslaw Horbal']
__email__ = ['[email protected]','[email protected]']
__date__ = '01-04-2013'
#Internal modules
import utils
#Start logger to record all info, warnings, and errors to Logs/logfile.log
log = utils.start_logging(__name__)
#External modules
import sys
import pandas as pd
import numpy as np
from datetime import datetime
def main():
#---Load environment settings from SETTINGS.json in root directory and build filepaths for all base submissions---#
settings = utils.load_settings('SETTINGS.json')
base_filepaths = (settings['file_bryan_submission'],
settings['file_miroslaw_submission'])
segment_weights = settings['ensemble_segment_weights']
segments = segment_weights.keys()
targets = segment_weights[segments[0]].keys()
#---Output the segment weights to be used for ensemble averaging of base submissions---#
log.info('==========ENSEMBLE WEIGHTS (B,M)============')
for segment in segment_weights:
log.info(segment.upper()+':')
for target in segment_weights[segment]:
log.info(' '+target.upper()+' -- ['+segment_weights[segment][target]['0']+','+
segment_weights[segment][target]['1']+']')
#---Load each base submission to a list of dataframes---#
base_subs = []
for file in base_filepaths:
try:
base_subs.append(pd.read_csv(file).set_index(['id'], drop=False).sort())
log.info('Base submission successfully loaded: %s.' % file)
except IOError:
log.info('Base submission file does not exist: %s. Run base model to generate, or update filepath.' %file)
sys.exit('---Exiting---')
utils.line_break()
#---Load id's labeled with segments to a dataframe used for segment based averaging---#
file = settings['file_segment_ids']
try:
segment_ids = pd.read_csv(file)
log.info('Segment IDs successfully loaded from: %s.' % file)
except IOError:
log.info('Segment IDs file does not exist: %s. Update filepath in SETTINGS.json.' % file)
utils.line_break()
#---Transform base predictions to log space prior to averaging, if selected in settings---#
if settings['avg_log_space'] == 'y':
log.info('Transforming base predictions to log space prior to averaging.')
for i in range(len(base_subs)):
for target in targets:
base_subs[i][target] = np.log(base_subs[i][target]+1)
utils.line_break()
#---Apply segment based weights to each base submission then combine them to create ensemble submission---#
log.info('Applying segment weights to base submissions then combining to create ensemble.')
for i in range(len(base_subs)):
#Merge the segment labels from the segment id's file with the base submission dataframe
base_subs[i] = base_subs[i].merge(segment_ids,on='id',how='inner')
for segment in segments:
for target in targets:
base_subs[i][target][base_subs[i]['Segment'] == segment] \
*= float(segment_weights[segment][target][str(i)])
del base_subs[i]['Segment']
ensemble_sub = base_subs[0].ix[:]
for i in range(len(base_subs)-1):
for target in targets:
ensemble_sub[target] += base_subs[i+1][target]
utils.line_break()
#---Transform ensemble predictions back to normal, if use log space averaging was selected in settings---#
if settings['avg_log_space'] == 'y':
log.info('Transforming ensemble predictions back to normal from log space.')
for target in targets:
ensemble_sub[target] = np.exp(ensemble_sub[target])-1
utils.line_break()
#---Apply any final target scalars to ensemble predictions---#
for target in targets:
ensemble_sub[target] *= float(settings['target_scalars'][target])
#---Output ensemble submission to directory set in SETTINGS.json, appending creation date and time---#
timestamp = datetime.now().strftime('%m-%d-%y_%H%M')
filename = settings['dir_ensemble_submissions']+'ensemble_predictions_'+timestamp+'.csv'
ensemble_sub.to_csv(filename, index=False)
log.info('Ensemble submission saved: %s' % filename)
utils.line_break()
#End main
log.info('Program executed successfully without error! Exiting.')
if __name__ == '__main__':
sys.exit(main()) |
py | 7dfc4185eb8fb5be16b5a372e0d501d824b477ca | # Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .... import oscar as mo
from ...core import NodeRole, AbstractService
from ..uploader import NodeInfoUploaderActor
from .locator import WorkerSupervisorLocatorActor
class ClusterWorkerService(AbstractService):
"""
Cluster service on worker.
Service Configuration
---------------------
{
"disk_dirs": ["List of disk directories"],
"cluster": {
"backend": "<cluster backend name>",
"lookup_address": "<address of master>",
"node_check_interval": check interval seconds for nodes,
"resource": {
"numa-0": 8,
"gpu-0": 1
}
}
}
"""
async def start(self):
svc_config = self._config["cluster"]
address = self._address
backend = svc_config.get("backend", "fixed")
lookup_address = svc_config.get(
"lookup_address", address if backend == "fixed" else None
)
await mo.create_actor(
WorkerSupervisorLocatorActor,
backend_name=backend,
lookup_address=lookup_address,
uid=WorkerSupervisorLocatorActor.default_uid(),
address=address,
)
await mo.create_actor(
NodeInfoUploaderActor,
role=NodeRole.WORKER,
interval=svc_config.get("node_check_interval"),
band_to_slots=svc_config.get("resource"),
uid=NodeInfoUploaderActor.default_uid(),
address=address,
)
async def stop(self):
address = self._address
await mo.destroy_actor(
mo.create_actor_ref(
uid=NodeInfoUploaderActor.default_uid(), address=address
)
)
await mo.destroy_actor(
mo.create_actor_ref(
uid=WorkerSupervisorLocatorActor.default_uid(), address=address
)
)
|
py | 7dfc41e2d0be3ffbeb0260d60e1df8ff6019a525 | # ----------------------------------------------------------------------------
# Copyright (c) 2017-, LabControl development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from bcrypt import hashpw, gensalt, checkpw
from . import base
from . import sql_connection
from . import exceptions
class User(base.LabControlObject):
"""User object
Attributes
----------
id
name
email
access_level
Methods
-------
create
"""
_table = "qiita.qiita_user"
_id_column = "email"
@staticmethod
def list_users(access_only=False):
"""Return a list of user information
Parameters
----------
access_only: bool, optional
Return only users that have access
Returns
-------
list of dict {'email': str, 'name': str}
"""
with sql_connection.TRN as TRN:
sql_where = ''
if access_only:
sql_where = 'JOIN labcontrol.labmanager_access USING (email)'
sql = """SELECT DISTINCT email, coalesce(name, email) as name
FROM qiita.qiita_user
{}
ORDER BY name""".format(sql_where)
TRN.add(sql)
return [dict(r) for r in TRN.execute_fetchindex()]
@staticmethod
def _encode_password(password):
return password if isinstance(password, bytes) \
else password.encode('utf-8')
@staticmethod
def _hash_password(password, hashed_pwd=None):
"""Hashes password
Parameters
----------
password : str
The password to be hashed
hashed_pwd : str, optional
Previously hashed password to pull salt from. If not provided,
a new salt will be generated
Returns
-------
str
The hashed password
"""
# bcrypt requires password to be bytes
password = User._encode_password(password)
hashed_pwd = hashed_pwd if hashed_pwd is not None else gensalt()
return hashpw(password, hashed_pwd)
@classmethod
def login(cls, email, password):
"""Logs a user into the system
Parameters
----------
email : str
The user email
password: str
The password of the user
Returns
-------
User
The User object corresponding to the login information
Raises
------
LabControlUnknownIdError
Email is not recognized
LabControlLoginError
Provided password doesn't match stored password
LabControlLoginDisabledError
If the user doesn't have access to login into LabControl
"""
with sql_connection.TRN as TRN:
sql = """SELECT password::bytea
FROM qiita.qiita_user
WHERE email = %s"""
TRN.add(sql, [email])
res = TRN.execute_fetchindex()
if not res:
# The email is not recognized
raise exceptions.LabControlUnknownIdError('User', email)
sql = """SELECT EXISTS(SELECT *
FROM labcontrol.labmanager_access
WHERE email = %s)"""
TRN.add(sql, [email])
if not TRN.execute_fetchlast():
# The user doesn't have access to login into LabControl
raise exceptions.LabControlLoginDisabledError()
db_pwd = res[0][0]
# Check that the given password matches the one in the DB
password = cls._encode_password(password)
# The stored password is returned as a memory view, we simply need
# to cast it to bytes so we can use it in the checkpw call
db_pwd = bytes(db_pwd)
if checkpw(password, db_pwd):
# Password matches, return the new user object
return cls(email)
else:
# Password didn't match, raise a Login error
raise exceptions.LabControlLoginError()
@property
def name(self):
"""The name of the user"""
name = self._get_attr('name')
if name is None:
return self._get_attr('email')
else:
return name
@property
def email(self):
"""The email of the user"""
return self._get_attr('email')
def grant_access(self):
"""Grants labmanager access to the user"""
with sql_connection.TRN as TRN:
sql = """INSERT INTO labcontrol.labmanager_access (email)
SELECT %s
WHERE NOT EXISTS (SELECT *
FROM labcontrol.labmanager_access
WHERE email = %s)"""
TRN.add(sql, [self.id, self.id])
TRN.execute()
def revoke_access(self):
"""Revokes labmanager access from the user"""
with sql_connection.TRN as TRN:
sql = """DELETE FROM labcontrol.labmanager_access
WHERE email = %s"""
TRN.add(sql, [self.id])
TRN.execute()
|
py | 7dfc4378c7cbaf70c82a72c0ca3459f5fbcf1cbd | import subprocess
import re
if __name__ == '__main__' :
for l in [0.0004, 0.0008, 0.0016, 0.0032, 0.0064, 0.0128, 0.0192, 0.0256, 0.0384, 0.0512, 0.06, 0.07, 0.08, 0.09, 0.1, 0.12, 0.13, 0.15, 0.16] :
print l
a=subprocess.Popen(['python', 'PS9_2.py' , '-w' ,'16' ,'-l',str(l),'-t','10000'],stdout=subprocess.PIPE)
out,err=a.communicate()
print out.split('\n')[1] # get throughput
|
py | 7dfc45c2f7c08a2bc9c96c079f1d3a576fe9233b | """
This folder contains the class ArrayInterval.
The ArrayInterval is very similar to a boolean 1 dimensional numpy array.
It should work as a replacement where such a numpy array could be used.
The advantage of this class is, that it has a memory efficient storage, when
the 1d array represents intervals. This class only stores the slice boundaries,
instead of the values.
The motivation to write this class was to store the voice/source/speech
activity information (e.g. there is speech from sample 16000 to sample 48000)
of a long audio file (> 2h) in memory.
"""
from .core import zeros, ones
from .core import ArrayInterval
from .core import ArrayInterval_from_str as from_str
from .rttm import from_rttm
from .rttm import from_rttm_str
from .rttm import to_rttm
from .rttm import to_rttm_str
from .kaldi import from_kaldi_segments
from .kaldi import from_kaldi_segments_str
|
py | 7dfc48e89f52a65337e34ea92cacba7ef6538269 | """
authlib.oauth2.rfc6749.grants.refresh_token
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A special grant endpoint for refresh_token grant_type. Refreshing an
Access Token per `Section 6`_.
.. _`Section 6`: https://tools.ietf.org/html/rfc6749#section-6
"""
import logging
from .base import BaseGrant
from ..util import scope_to_list
from ..errors import (
InvalidRequestError,
InvalidScopeError,
InvalidGrantError,
UnauthorizedClientError,
)
log = logging.getLogger(__name__)
class RefreshTokenGrant(BaseGrant):
"""A special grant endpoint for refresh_token grant_type. Refreshing an
Access Token per `Section 6`_.
.. _`Section 6`: https://tools.ietf.org/html/rfc6749#section-6
"""
#: authorization_code grant type has token endpoint
TOKEN_ENDPOINT = True
GRANT_TYPE = 'refresh_token'
def _validate_request_client(self):
# require client authentication for confidential clients or for any
# client that was issued client credentials (or with other
# authentication requirements)
client = self.authenticate_token_endpoint_client()
log.debug('Validate token request of %r', client)
if not client.check_client_type('confidential'):
raise UnauthorizedClientError()
if not client.check_grant_type(self.GRANT_TYPE):
raise UnauthorizedClientError()
return client
def _validate_request_token(self):
refresh_token = self.request.data.get('refresh_token')
if refresh_token is None:
raise InvalidRequestError(
'Missing "refresh_token" in request.',
)
token = self.authenticate_refresh_token(refresh_token)
if not token:
raise InvalidGrantError()
return token
def _validate_token_scope(self, token):
scope = self.request.scope
if not scope:
return
original_scope = token.get_scope()
if not original_scope:
raise InvalidScopeError()
original_scope = set(scope_to_list(original_scope))
if not original_scope.issuperset(set(scope_to_list(scope))):
raise InvalidScopeError()
def validate_token_request(self):
"""If the authorization server issued a refresh token to the client, the
client makes a refresh request to the token endpoint by adding the
following parameters using the "application/x-www-form-urlencoded"
format per Appendix B with a character encoding of UTF-8 in the HTTP
request entity-body, per Section 6:
grant_type
REQUIRED. Value MUST be set to "refresh_token".
refresh_token
REQUIRED. The refresh token issued to the client.
scope
OPTIONAL. The scope of the access request as described by
Section 3.3. The requested scope MUST NOT include any scope
not originally granted by the resource owner, and if omitted is
treated as equal to the scope originally granted by the
resource owner.
For example, the client makes the following HTTP request using
transport-layer security (with extra line breaks for display purposes
only):
.. code-block:: http
POST /token HTTP/1.1
Host: server.example.com
Authorization: Basic czZCaGRSa3F0MzpnWDFmQmF0M2JW
Content-Type: application/x-www-form-urlencoded
grant_type=refresh_token&refresh_token=tGzv3JOkF0XG5Qx2TlKWIA
"""
self.request.client = self._validate_request_client()
token = self._validate_request_token()
self._validate_token_scope(token)
self.request.credential = token
def create_token_response(self):
"""If valid and authorized, the authorization server issues an access
token as described in Section 5.1. If the request failed
verification or is invalid, the authorization server returns an error
response as described in Section 5.2.
"""
credential = self.request.credential
user = self.authenticate_user(credential)
if not user:
raise InvalidRequestError('There is no "user" for this token.')
scope = self.request.scope
if not scope:
scope = credential.get_scope()
client = self.request.client
expires_in = credential.get_expires_in()
token = self.generate_token(
client, self.GRANT_TYPE,
user=user,
expires_in=expires_in,
scope=scope,
)
log.debug('Issue token %r to %r', token, client)
self.request.user = user
self.server.save_token(token, self.request)
self.execute_hook('process_token', token=token)
return 200, token, self.TOKEN_RESPONSE_HEADER
def authenticate_refresh_token(self, refresh_token):
"""Get token information with refresh_token string. Developers should
implement this method in subclass::
def authenticate_refresh_token(self, refresh_token):
item = Token.get(refresh_token=refresh_token)
if item and not item.is_refresh_token_expired():
return item
:param refresh_token: The refresh token issued to the client
:return: token
"""
raise NotImplementedError()
def authenticate_user(self, credential):
"""Authenticate the user related to this credential. Developers should
implement this method in subclass::
def authenticate_user(self, credential):
return User.query.get(credential.user_id)
:param credential: Token object
:return: user
"""
raise NotImplementedError()
|
py | 7dfc4b8456946b6806fc00357e7c8f94e9004418 | import attr
from dapodik.auth import BaseAuth
from dapodik.auth import Pengguna
from dapodik.base import BaseDapodik
def test_base_auth():
assert issubclass(BaseAuth, BaseDapodik)
def test_member():
assert attr.has(Pengguna)
|
py | 7dfc4b85d7176e8f911ce95b962cccba54c9bda1 | # -*- coding: UTF-8 -*-
# Copyright 2013-2018 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
"""
A Sphinx extension used to write multilingual user documentation
for a Lino application.
.. rst:directive:: lino2rst
Execute Python code and process the output as reStructuredText
source code. This is like :rst:dir:`py2rst` but with the following
names defined:
:settings: The Django settings module which is active while building
the docs.
:dd: The :mod:`lino.api.dd` module.
:rt: The :mod:`lino.api.rt` module.
Plus the app_label of each installed plugin.
.. rst:directive:: fields_list
Render a bullet list of the fields in a given model. The first
word of the content is the model. If there are any other words,
then these are field names. If no field names are specified, all
fields of the model are included.
.. rst:role:: menupath
Render the given menu command specifier as a `menuselection` role.
.. rst:directive:: actor
Usage::
.. actor:: app_name[.ActorName][.data_element_name]
Optional introduction text.
Insert the full description of the specified data dictionary item. If
the name contains no ".", then it is the name of a Plugin. If the
name contains one ".", then it is the name of an Actor or a Model. If
the name contains two ".", then it is the name of a data element of
that Actor or Model (data elements can be fields or actions)
.. rst:role:: ddref
(Deprecated) Insert a reference to the named data dictionary item.
The visible text will be automatically in the right language in
multilingual userdocs.
"""
from __future__ import unicode_literals, print_function
from builtins import str
from .base import menuselection_text
from docutils.parsers.rst import Directive
# from sphinx.util.compat import Directive
from sphinx.roles import XRefRole, menusel_role
from sphinx.util import ws_re
from sphinx import addnodes
from docutils import nodes, utils
from docutils.nodes import fully_normalize_name
from django.db import models
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.utils import translation
from django.utils.encoding import force_text
from lino.api import dd, rt
from lino.core import actors
from lino.core import actions
from lino.core import choicelists
from lino.core import kernel
from atelier.utils import unindent
from atelier import rstgen
from lino.core.utils import full_model_name
from lino.ad import Plugin
from lino.utils.diag import analyzer
from lino.core.actors import resolve_action
from lino.modlib.users.choicelists import UserTypes
from atelier.sphinxconf.insert_input import Py2rstDirective
def actor_name(a):
return fully_normalize_name(settings.SITE.userdocs_prefix + str(a))
def model_name(m):
return settings.SITE.userdocs_prefix + full_model_name(m).lower()
def app_name(a):
assert a.__name__.endswith('.models')
parts = a.__name__.split('.')
return settings.SITE.userdocs_prefix + parts[-2]
def actor_ref(rpt, text=None):
if text is None:
text = force_text(rpt.label or rpt.title or str(rpt))
return ':ddref:`%s <%s>`' % (text, rpt)
def model_ref(m, text=None):
if text is None:
text = force_text(m._meta.verbose_name)
return ':ref:`%s <%s>`' % (text, model_name(m))
def rptlist(l):
return ', '.join([actor_ref(a) for a in l])
def typeref(cls):
text = cls.__name__
target = cls.__module__ + '.' + cls.__name__
return ":class:`%s <%s>`" % (text, target)
def old_fieldtype(f):
if isinstance(f, models.ForeignKey):
#~ return f.__class__.__name__ + " to " + refto(f.rel.model)
return f.__class__.__name__ + " to " + model_ref(f.remote_field.model)
return f.__class__.__name__
def fieldtype(f):
s = typeref(f.__class__)
if isinstance(f, models.ForeignKey):
s = _("%(classref)s to %(model)s") % dict(
classref=s, model=model_ref(f.remote_field.model))
#~ print(20130908, s)
if isinstance(f, choicelists.ChoiceListField):
s = _("%(classref)s to %(model)s") % dict(
classref=s, model=actor_ref(f.choicelist))
return s
def fields_ul(fields):
helpless = []
def field2li(fld):
s = "**%s**" % str(f.verbose_name).strip()
s += " (``%s``, %s)" % (f.name, fieldtype(f))
if f.help_text:
s += " -- " + str(f.help_text)
return s
helpless.append(s)
return None
items = []
for f in fields:
if not hasattr(f, '_lino_babel_field'):
s = field2li(f)
if s:
items.append(s)
#~ items = [ field2li(f) for f in fields if not hasattr(f,'_lino_babel_field')]
if len(helpless):
s = ', '.join(helpless)
if len(items):
s = _("... and %s") % s
items.append(s)
return rstgen.ul(items)
def fields_table(fields):
headers = ["name", "type"]
#~ formatters = [
#~ lambda f: f.name,
#~ lambda f: f.__class__.__name__,
#~ ]
headers.append("verbose name")
headers.append("help text")
def rowfmt(f):
cells = [
f.name,
fieldtype(f),
f.verbose_name,
f.help_text
]
#~ for lng in babel.AVAILABLE_LANGUAGES:
#~ babel.set_language(lng)
#~ cells.append(force_text(_(f.verbose_name)))
#~ cells.append(f.help_text)
return cells
rows = [rowfmt(f) for f in fields if not hasattr(f, '_lino_babel_field')]
return rstgen.table(headers, rows)
def get_actor_description(self):
"""
`self` is the actor
"""
body = "\n\n"
if self.help_text:
body += unindent(force_text(self.help_text).strip()) + "\n\n"
#~ ll = self.get_handle().list_layout
#~ if ll is not None:
#~ body += fields_table([ e.field for e in ll.main.columns] )
#~ model_reports = [r for r in kernel.master_tables if r.model is self.model]
#~ if model_reports:
#~ body += '\n\nMaster tables: %s\n\n' % rptlist(model_reports)
#~ if getattr(model,'_lino_slaves',None):
#~ body += '\n\nSlave tables: %s\n\n' % rptlist(model._lino_slaves.values())
return body
#~ def get_model_description(self):
#~ """
#~ `self` is the actor
#~ """
#~ body = "\n\n"
#~ help_text = getattr(self,'help_text',None)
#~ if help_text:
#~ body += unindent(force_text(help_text).strip()) + "\n\n"
#~
#~ body += fields_table(self._meta.fields)
#~
#~ return body
IGNORED_ACTIONS = (actions.ShowTable, actions.SubmitDetail,
actions.ShowDetail,
actions.DeleteSelected,
actions.ShowInsert, actions.SubmitInsert)
def menuselection(mi):
return ":menuselection:`%s`" % menuselection_text(mi)
def actions_ul(action_list):
items = []
for ba in action_list:
label = ba.action.label
desc = "**%s** (" % str(label).strip()
if ba.action.action_name:
desc += "``%s``" % ba.action.action_name
desc += ", %s)" % typeref(ba.action.__class__)
if ba.action.help_text:
desc += " -- " + str(ba.action.help_text)
items.append(desc)
return rstgen.ul(items)
# from lino.core.menus import find_menu_item
def actors_overview_ul(model_reports):
user_type = UserTypes.get_by_value('900')
# deprecated
items = []
for tb in model_reports:
desc = actor_ref(tb)
#~ label = str(tb.title or tb.label)
#~ desc += " (%s)" % str(tb)
desc += " (%s)" % typeref(tb)
# mi = find_menu_item(tb.default_action)
mi = user_type.find_menu_item(tb.default_action)
if mi is not None:
desc += _(" (Menu %s)") % menuselection(mi)
#~ print(unicode(mi.label).strip())
if tb.help_text:
desc += " -- " + str(tb.help_text).strip()
items.append(desc)
return rstgen.ul(items)
def resolve_name(name):
l = name.split('.')
if len(l) == 1:
return 1, settings.SITE.plugins.get(name)
# return 1, dd.resolve_app(name)
if len(l) == 3:
model = settings.SITE.models.resolve(l[0] + '.' + l[1])
if model is None:
raise Warning("Unkown name %s" % name)
return 3, model.get_data_elem(l[2])
return len(l), settings.SITE.models.resolve(name)
def form_lines():
yield '<script >'
class ddrefRole(XRefRole):
nodeclass = addnodes.pending_xref
innernodeclass = nodes.emphasis
def __call__(self, typ, rawtext, text, lineno, inliner,
options={}, content=[]):
typ = 'std:ref'
self._reporter = inliner.document.reporter
self._lineno = lineno
return XRefRole.__call__(self, typ, rawtext, text, lineno,
inliner, options, content)
def process_link(self, env, refnode, has_explicit_title, title, target):
"""Called after parsing title and target text, and creating the
reference node (given in *refnode*). This method can alter the
reference node and must return a new (or the same) ``(title, target)``
tuple.
"""
#~ print(20130901, refnode, has_explicit_title, title, target)
#~ 20130901 <pending_xref refdomain="" refexplicit="False" reftype="ddref"/> False cal.Event cal.Event
target = ws_re.sub(' ', target) # replace newlines or tabs by spaces
# ~ target = ' '.join(target.split()) # replace newlines or tabs by spaces
level, x = resolve_name(target)
if x is None:
msg = "Could not resolve name %r" % target
return [self._reporter.warning(msg, line=self._lineno), target]
# raise Exception(msg)
# lng = env.temp_data.get('language', env.config.language)
lng = CurrentLanguage.get_current_value(env)
with translation.override(lng):
if isinstance(x, models.Field):
text = utils.unescape(str(x.verbose_name))
target = model_name(x.model) + '.' + x.name
# print(target)
elif isinstance(x, Plugin):
text = utils.unescape(str(x.verbose_name))
target = settings.SITE.userdocs_prefix + target
elif isinstance(x, type) and issubclass(x, models.Model):
text = utils.unescape(str(x._meta.verbose_name))
target = model_name(x)
elif isinstance(x, type) and issubclass(x, actors.Actor):
text = utils.unescape(str(x.title or x.label))
target = actor_name(x)
elif isinstance(x, actions.Action):
text = utils.unescape(str(x.label))
target = actor_name(x)
else:
raise Exception("Don't know how to handle %r" % x)
if not has_explicit_title:
# avoid replacing title by the heading text
refnode['refexplicit'] = True
title = text
refnode['refwarn'] = False # never warn
#~ refnode['reftype'] = 'ref'
#~ title = "[%s]" % title
#~ if target == 'welfare.reception.waitingvisitors':
#~ print("20130907 ddref to %s : title=%r" % (target,title))
return title, target
class TempDataDirective(Directive):
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec = {}
temp_data_key = None
@classmethod
def get_default_value(self, env):
return None
@classmethod
def get_current_value(cls, env):
return env.temp_data.get(
cls.temp_data_key,
cls.get_default_value(env))
def run(self):
env = self.state.document.settings.env
v = self.arguments[0].strip()
if v == 'None':
del env.temp_data[self.temp_data_key]
else:
env.temp_data[self.temp_data_key] = v
return []
class CurrentLanguage(TempDataDirective):
"""Tell Sphinx to switch to the specified language until the end of
this document.
"""
temp_data_key = 'language'
@classmethod
def get_default_value(cls, env):
return env.config.language
class CurrentProject(TempDataDirective):
"""Tell Sphinx to switch to the specified project until the end of
this document.
"""
temp_data_key = 'lino_project'
class Lino2rstDirective(Py2rstDirective):
"""Defines the :rst:dir:`lino2rst` directive."""
def get_context(self):
from django.conf import settings
context = super(Lino2rstDirective, self).get_context()
context.update(settings=settings)
context.update(settings.SITE.models)
context.update(dd=dd)
context.update(rt=rt)
return context
def output_from_exec(self, code):
from django.utils import translation
with translation.override(self.language):
return super(Lino2rstDirective, self).output_from_exec(code)
class ActorsOverviewDirective(Lino2rstDirective):
def get_rst(self):
user_type = UserTypes.get_by_value('900')
with translation.override(self.language):
#~ set_language(lng)
actor_names = ' '.join(self.content).split()
items = []
for an in actor_names:
cls = settings.SITE.models.resolve(an)
if not isinstance(cls, type):
raise Exception("%s is not an actor." % self.content[0])
desc = "**{0}** (:class:`{1} <{2}>`)".format(
force_text(cls.label),
cls.__name__,
cls.__module__ + '.' + cls.__name__
)
mi = user_type.find_menu_item(cls.default_action)
if mi is not None:
desc += _(" (Menu %s)") % menuselection(mi)
#~ print(str(mi.label).strip())
if cls.help_text:
desc += " : " + force_text(cls.help_text).strip()
# items.append("%s : %s" % (actor_ref(cls), cls.help_text or ''))
items.append(desc)
return rstgen.ul(items)
class ShowFieldsDirective(Lino2rstDirective):
def get_rst(self):
with translation.override(self.language):
names = ' '.join(self.content).split()
if len(names) > 1:
field_names = ' '.join(names[1:])
else:
field_names = None
return analyzer.show_fields(
names[0], field_names=field_names)
class FormDirective(Lino2rstDirective):
def get_rst(self):
level, cls = resolve_name(self.content[0])
s = ''
with translation.override(self.language):
s = '\n'.join(list(form_lines()))
return s
class ActorDirective(Lino2rstDirective):
#~ has_content = False
titles_allowed = True
#~ debug = True
def get_rst(self):
#~ from actordoc import get_actor_description
#~ from django.conf import settings
#~ from djangosite.dbutils import set_language
with translation.override(self.language):
level, cls = resolve_name(self.content[0])
if isinstance(cls, models.Field):
fld = cls
s = ''
name = str(fld.model) + '.' + fld.name
title = force_text(fld.verbose_name).strip()
s += "\n.. index::\n single: "
s += str(_('%(field)s (field in %(model)s)') % dict(
field=title, model=model_ref(fld.model)))
s += '\n\n'
s += rstgen.header(level, _("%s (field)") % title)
if len(self.content) > 1:
s += '\n'.join(self.content[1:])
s += '\n\n'
return s
if isinstance(cls, Plugin):
s = ''
title = str(cls.verbose_name)
s += "\n.. index::\n single: "
s += str(_('%s (app)') % title)
s += '\n\n.. _' + name + ':\n'
s += '\n'
s += rstgen.header(level, _("%s (app)") % title)
return s
if not isinstance(cls, type):
raise Exception("%s is not an actor." % self.content[0])
if issubclass(cls, models.Model):
model = cls
s = ''
name = model_name(model).lower()
title = force_text(model._meta.verbose_name)
s += "\n.. index::\n single: "
s += str(_('%(model)s (model in %(app)s)') % dict(
model=title, app=model._meta.app_label))
s += '\n\n'
s += '\n\n.. _' + name + ':\n'
s += '\n'
s += rstgen.header(level, _("%s (model)") % title)
s += '\n'
s += '\n:Internal name: ``%s``\n' % full_model_name(cls)
s += '\n:Implemented by: %s\n' % typeref(cls)
s += '\n'
if len(self.content) > 1:
s += '\n'.join(self.content[1:])
s += '\n\n'
model_reports = [
r for r in kernel.master_tables if r.model is cls]
model_reports += [r for r in kernel.slave_tables
if r.model is cls]
s += rstgen.boldheader(_("Views on %s") %
cls._meta.verbose_name)
s += actors_overview_ul(model_reports)
s += rstgen.boldheader(_("Fields in %s") %
cls._meta.verbose_name)
s += fields_ul(cls._meta.fields)
action_list = cls.get_default_table().get_actions()
action_list = [
ba for ba in action_list
if not isinstance(ba.action, IGNORED_ACTIONS)]
if action_list:
s += '\n'
s += rstgen.boldheader(_("Actions on %s") %
cls._meta.verbose_name)
s += actions_ul(action_list)
slave_tables = getattr(cls, '_lino_slaves', {}).values()
if slave_tables:
s += rstgen.boldheader(_("Tables referring to %s") %
cls._meta.verbose_name)
s += actors_overview_ul(slave_tables)
return s
if issubclass(cls, actors.Actor):
title = force_text(cls.label or cls.title)
indextext = _('%(actor)s (view in %(app)s)') % dict(
actor=title, app=cls.app_label)
name = actor_name(cls)
#~ if name == 'welfare.reception.waitingvisitors':
#~ self.debug = True
#~ print(20130907, name)
self.index_entries.append(('single', indextext, name, ''))
#~ self.add_ref_target(name,name)
s = ''
s += '\n\n.. _%s:\n\n' % name
s += rstgen.header(level, _("%s (view)") % title)
s += '\n:Internal name: ``%s`` (%s)\n' % (cls, typeref(cls))
if len(self.content) > 1:
s += '\n'.join(self.content[1:])
s += '\n\n'
s += '\n\n'
s += get_actor_description(cls)
s += '\n\n'
return s
raise Exception("Cannot handle actor %r." % cls)
def run(self):
self.index_entries = []
#~ index_entries is a list of 4-tuples of
#~ ``(entrytype, entryname, target, ignored)``
content = super(ActorDirective, self).run()
indexnode = addnodes.index(entries=self.index_entries)
return [indexnode] + content
def menupath_role(typ, rawtext, text, *args, **kwargs):
a = resolve_action(text)
user_type = UserTypes.get_by_value('900')
mi = user_type.find_menu_item(a)
if mi is None:
raise Exception("Unknown menu descriptor %s" % text)
text = menuselection_text(mi)
rawtext = menuselection(mi)
return menusel_role('menuselection', rawtext, text, *args, **kwargs)
def setup(app):
app.add_directive('form', FormDirective)
app.add_directive('actor', ActorDirective)
app.add_directive('actors_overview', ActorsOverviewDirective)
app.add_directive('fields_list', ShowFieldsDirective)
app.add_role('ddref', ddrefRole())
app.add_role('menupath', menupath_role)
app.add_directive('currentlanguage', CurrentLanguage)
app.add_directive('currentproject', CurrentProject)
app.add_directive('django2rst', Lino2rstDirective) # backward compat
app.add_directive('lino2rst', Lino2rstDirective)
|
py | 7dfc4cce7dd6e9b3da5442ef046ccd7f139d5ae9 | #
# This file is part of the onema.io evee Package.
# For the full copyright and license information,
# please view the LICENSE file that was distributed
# with this source code.
#
# @author Juan Manuel Torres <[email protected]>
#
from typing import Callable, Sequence, Any
from evee.abstract_event_dispatcher import AbstractEventDispatcher
from evee.abstract_event_subscriber import AbstractEventSubscriber
from evee.event import Event
from evee.exception import BadMethodCallError
class ImmutableEventDispatcher(AbstractEventDispatcher):
def __init__(self, dispatcher: AbstractEventDispatcher):
super().__init__()
self.__dispatcher = dispatcher
def dispatch(self, event_name: str, event: Event = None) -> Event:
return self.__dispatcher.dispatch(event_name, event)
def add_listener(self, event_name: str, listener: Callable = None, priority: int = 0):
raise BadMethodCallError('Unmodifiable event dispatcher must not be modified.')
def add_subscriber(self, subscriber: AbstractEventSubscriber):
raise BadMethodCallError('Unmodifiable event dispatcher must not be modified.')
def remove_listener(self, event_name: str, listener: Callable):
raise BadMethodCallError('Unmodifiable event dispatcher must not be modified.')
def remove_subscriber(self, subscriber: AbstractEventSubscriber):
raise BadMethodCallError('Unmodifiable event dispatcher must not be modified.')
def get_listeners(self, event_name: str = None) -> Sequence[Callable[[Event, str, Any], Event]]:
return self.__dispatcher.get_listeners(event_name)
def get_listener_priority(self, event_name: str, listener: Callable) -> int:
return self.__dispatcher.get_listener_priority(event_name, listener)
def has_listeners(self, event_name: str = None) -> bool:
return self.__dispatcher.has_listeners(event_name)
|
py | 7dfc4cee027225302e24dcf33cf58c1da2dd0388 | #
# Warning: this is an alpha module and might be removed/renamed in
# later pyjamas versions
#
from __pyjamas__ import wnd, doc, JS, setCompilerOptions
from __javascript__ import ActiveXObject, XMLHttpRequest
from pyjamas import DOM
from __pyjamas__ import debugger
import sys
setCompilerOptions("noSourceTracking", "noLineTracking", "noStoreSource")
class AjaxError(RuntimeError):
pass
def createHttpRequest():
if JS("""typeof $wnd.XMLHttpRequest != 'undefined'"""):
# IE7+, Mozilla, Safari, ...
return JS("""new XMLHttpRequest()""")
# Check for IE6/ActiveX
try:
res = JS("""new ActiveXObject("Msxml2.XMLHTTP")""")
return res
except:
pass
return None
#
# load(url)
#
# @param url URL to load
# @param onreadystatechange function to be used for onreadystatechange
# @param on_load_fn function to be called on succes, with parameters event, request
# @param async request mode
# @returns async == False: request object, async == True: None
#
def load(url, onreadystatechange=None, on_load_fn=None, async=False):
setCompilerOptions("noDebug")
wnd().status = ('Loading ' + url)
req = createHttpRequest()
if onreadystatechange is None:
def onreadystatechange(evnt):
if req.readyState==4 and (req.status == 200 or req.status == 0):
str = req.responseText
wnd().status = ('Loaded ' + url)
if not on_load_fn is None:
on_load_fn(evnt, req)
# next line is in JS() for IE6
JS("req.onreadystatechange = onreadystatechange;")
req.open("GET", url , async)
try:
req.send(None)
if async:
return None
while True:
if ( req.status == 200
or (req.status == 0 and req.responseText)
):
if not on_load_fn is None:
on_load_fn(None, req)
return req
if req.status != 0 or req.responseText != "":
break
except:
pass
raise AjaxError("Synchronous error", req.status)
def inject(values, namespace = None, names=None):
if namespace is None:
from __pyjamas__ import JS
namespace = JS("$pyjs.global_namespace")
values = dict(values)
if names is None:
for k in values:
v = values[k]
JS("""namespace[k] = v;""")
else:
for k in names:
v = values[k]
JS("""namespace[k] = v;""")
#
# activate_css(str)
#
# looks for any < link > in the input and sets up a corresponding link node
# in the main document.
#
def activate_css(targetnode):
scriptnodes = list(targetnode.getElementsByTagName('link'))
for LC in range(len(scriptnodes)):
sn = scriptnodes[LC]
sn.parentNode.removeChild(sn)
fileref = DOM.createElement('link')
if hassattr(sn, "href"):
fileref.href = sn.href
else:
fileref.text = sn.text
fileref.rel = "stylesheet"
fileref.type = "text/css"
doc().getElementsByTagName("head").item(0).appendChild(fileref)
#
# activate_javascript(str)
#
# looks for any < script > in the input text and sets up a corresponding
# script node in the main document.
#
def activate_javascript(txt):
fileref = DOM.createElement('script')
fileref.text = txt
fileref.type = "text/javascript"
fileref.language = "javascript"
#fileref.defer = True
#debug = DOM.createElement('pre')
#debug.innerHTML = 'test'
#debug.innerHTML += "href:" + sn.src + " text:" + fileref.text
#var bodyels = doc().getElementsByTagName("body")
#bodyels[bodyels.length-1].appendChild(debug)
fileref = fileref.cloneNode(True)
doc().getElementsByTagName("head").item(0).appendChild(fileref)
def eval(str):
from __javascript__ import eval
return eval(str)
#
# ajax_eval(url)
#
# @param url load and activate url
# @returns readyState
#
def ajax_eval(url, on_load_fn, async):
setCompilerOptions("noDebug")
def onready(evnt, req):
str = req.responseText
activate_javascript(str)
if not on_load_fn is None:
on_load_fn()
load(url, None, onready, async)
__imported__ = {}
def ajax_import(url, namespace=None, names=None):
setCompilerOptions("noDebug")
if __imported__.has_key(url):
module = __imported__[url]
else:
req = load(url, None, None, False)
module = None
name_getter = []
if names is None:
names = []
for name in names:
name_getter.append("$pyjs$moduleObject['%s'] = %s;" % (name, name))
script = """(function ( ) {
$pyjs$moduleObject={};
%s;
%s
return $pyjs$moduleObject;
})();""" % (req.responseText, "\n".join(name_getter))
try:
module = eval(script)
except:
e = sys.exc_info()
raise AjaxError("Error in %s: %s" % (url, e.message))
__imported__[url] = module
inject(module, namespace, names)
# From here, just converted from dynamicajax.js
#
# pyjs_load_script
#
# @param url load script url
# @param module module name
# @param onload text of function to be eval/executed on successful load
#
def load_script(url, onload, async):
wnd().status = ('Loading ' + url)
def onload_fn():
wnd.status = ('Loaded ' + url)
if not onload is None:
eval(onload)
return True
e = DOM.createElement("script")
e.src = url
e.type="text/javascript"
e.language = "javascript"
e.defer = async
e.onload = onload_fn
doc().getElementsByTagName("head")[0].appendChild(e)
#
# ajax_dlink_refresh(oj,url)
#
# @param id id of element for insert
# @param url load url
# @param timeout refresh timeout period, ms
# @returns readyState
#
# use these to overrun an existing timeout, so that
# we don't end up with several of them!
running_timeout = 0
timeout_idname = None
timeout_url = None
timeout_on_load_fn = None
redo_timeout = None
timeout_id = None
def ajax_dlink_refresh(idname, url, on_load_fn, timeout):
global running_timeout, timeout_idname, timeout_url, timeout_on_load_fn, redo_timeout, timeout_id
timeout_idname = idname
timeout_url = url
timeout_on_load_fn = on_load_fn
redo_timeout = timeout
if running_timeout > 0:
return
# FIXME: should use pyjamas.Timer.Timer
from __javascript__ import setTimeout
timeout_id = setTimeout(do_ajax_dlink_refresh, timeout)
running_timeout = 1
def do_ajax_dlink_refresh():
global running_timeout, timeout_id
if ajax_dlink(timeout_idname, timeout_url, timeout_on_load_fn) == 0:
timeout_id = None
running_timeout = 0
return
timeout_id = None
running_timeout = 0
ajax_dlink_refresh(timeout_idname, timeout_url, timeout_on_load_fn,
redo_timeout)
#
# ajax_dlink(oj,url)
#
# @param id id of element for insert
# @param url load url
# @returns readyState
#
def ajax_dlink(idname, url, on_load_fn):
global running_timeout, timeout_idname, timeout_url, timeout_on_load_fn, redo_timeout, timeout_id
from __pyjamas__ import doc
body = doc().body
from __javascript__ import clearTimeout
if timeout_id:
clearTimeout(timeout_id) # really important - get into a mess otherwise
def onreadystatechange():
if xhtoj.readyState == 4:
jsnode = 0
if xhtoj.status == 200:
txt = xhtoj.responseText
jsnode = None
if idname:
jsnode = DOM.getElementById(idname)
if jsnode is None:
jsnode = DOM.createElement('script')
#tst = DOM.createElement('html')
#tst.innerHTML = str
activate_javascript(txt)
if not on_load_fn is None:
wnd().alert(on_load_fn)
# eval(on_load_fn)
test_fn()
return 1
else:
jsnode = DOM.getElementById(idname)
if not jsnode is None:
jsnode.innerHTML = xhtoj.status
xhtoj = createHttpRequest()
xhtoj.onreadystatechange = onreadystatechange
xhtoj.open("GET", url , True )
xhtoj.send("")
return 0
|
py | 7dfc4e2c0b965ec88c75d6bb4c95c24ecbdf8a73 | # -*- coding: utf-8 -*-
'''
Edit ini files
:maintainer: <[email protected]>
:maturity: new
:depends: re
:platform: all
(for example /etc/sysctl.conf)
'''
# Import Python libs
from __future__ import print_function
from __future__ import absolute_import
import re
import json
from salt.utils.odict import OrderedDict
from salt.utils import fopen as _fopen
__virtualname__ = 'ini'
def __virtual__():
'''
Rename to ini
'''
return __virtualname__
ini_regx = re.compile(r'^\s*\[(.+?)\]\s*$', flags=re.M)
com_regx = re.compile(r'^\s*(#|;)\s*(.*)')
indented_regx = re.compile(r'(\s+)(.*)')
def set_option(file_name, sections=None, separator='='):
'''
Edit an ini file, replacing one or more sections. Returns a dictionary
containing the changes made.
file_name
path of ini_file
sections : None
A dictionary representing the sections to be edited ini file
The keys are the section names and the values are the dictionary
containing the options
If the Ini does not contain sections the keys and values represent the
options
separator : =
A character used to separate keys and values. Standard ini files use
the "=" character.
.. versionadded:: Carbon
API Example:
.. code-block:: python
import salt
sc = salt.client.get_local_client()
sc.cmd('target', 'ini.set_option',
['path_to_ini_file', '{"section_to_change": {"key": "value"}}'])
CLI Example:
.. code-block:: bash
salt '*' ini.set_option /path/to/ini '{section_foo: {key: value}}'
'''
sections = sections or {}
changes = {}
inifile = _Ini.get_ini_file(file_name, separator=separator)
if not inifile:
changes.update({'error': 'ini file not found'})
return changes
changes = inifile.update(sections)
inifile.flush()
return changes
def get_option(file_name, section, option, separator='='):
'''
Get value of a key from a section in an ini file. Returns ``None`` if
no matching key was found.
API Example:
.. code-block:: python
import salt
sc = salt.client.get_local_client()
sc.cmd('target', 'ini.get_option',
[path_to_ini_file, section_name, option])
CLI Example:
.. code-block:: bash
salt '*' ini.get_option /path/to/ini section_name option_name
'''
inifile = _Ini.get_ini_file(file_name, separator=separator)
return inifile.get(section, {}).get(option, None)
def remove_option(file_name, section, option, separator='='):
'''
Remove a key/value pair from a section in an ini file. Returns the value of
the removed key, or ``None`` if nothing was removed.
API Example:
.. code-block:: python
import salt
sc = salt.client.get_local_client()
sc.cmd('target', 'ini.remove_option',
[path_to_ini_file, section_name, option])
CLI Example:
.. code-block:: bash
salt '*' ini.remove_option /path/to/ini section_name option_name
'''
inifile = _Ini.get_ini_file(file_name, separator=separator)
value = inifile.get(section, {}).pop(option, None)
inifile.flush()
return value
def get_section(file_name, section, separator='='):
'''
Retrieve a section from an ini file. Returns the section as dictionary. If
the section is not found, an empty dictionary is returned.
API Example:
.. code-block:: python
import salt
sc = salt.client.get_local_client()
sc.cmd('target', 'ini.get_section',
[path_to_ini_file, section_name])
CLI Example:
.. code-block:: bash
salt '*' ini.get_section /path/to/ini section_name
'''
inifile = _Ini.get_ini_file(file_name, separator=separator)
ret = {}
for key, value in inifile.get(section, {}).iteritems():
if key[0] != '#':
ret.update({key: value})
return ret
def remove_section(file_name, section, separator='='):
'''
Remove a section in an ini file. Returns the removed section as dictionary,
or ``None`` if nothing was removed.
API Example:
.. code-block:: python
import salt
sc = salt.client.get_local_client()
sc.cmd('target', 'ini.remove_section',
[path_to_ini_file, section_name])
CLI Example:
.. code-block:: bash
salt '*' ini.remove_section /path/to/ini section_name
'''
inifile = _Ini.get_ini_file(file_name, separator=separator)
section = inifile.pop(section, {})
inifile.flush()
ret = {}
for key, value in section.iteritems():
if key[0] != '#':
ret.update({key: value})
return ret
class _Section(OrderedDict):
def __init__(self, name, inicontents='', separator='=', commenter='#'):
super(_Section, self).__init__(self)
self.name = name
self.inicontents = inicontents
self.sep = separator
self.com = commenter
opt_regx_prefix = r'(\s*)(.+?)\s*'
opt_regx_suffix = r'\s*(.*)\s*'
self.opt_regx_str = r'{0}(\{1}){2}'.format(
opt_regx_prefix, self.sep, opt_regx_suffix
)
self.opt_regx = re.compile(self.opt_regx_str)
def refresh(self, inicontents=None):
comment_count = 1
unknown_count = 1
curr_indent = ''
inicontents = inicontents or self.inicontents
inicontents = inicontents.strip('\n')
if not inicontents:
return
for opt in self:
self.pop(opt)
for opt_str in inicontents.split('\n'):
# Match comments
com_match = com_regx.match(opt_str)
if com_match:
name = '#comment{0}'.format(comment_count)
self.com = com_match.group(1)
comment_count += 1
self.update({name: opt_str})
continue
# Add indented lines to the value of the previous entry.
indented_match = indented_regx.match(opt_str)
if indented_match:
indent = indented_match.group(1).replace('\t', ' ')
if indent > curr_indent:
options = self.keys()
if options:
prev_opt = options[-1]
value = self.get(prev_opt)
self.update({prev_opt: '\n'.join((value, opt_str))})
continue
# Match normal key+value lines.
opt_match = self.opt_regx.match(opt_str)
if opt_match:
curr_indent, name, self.sep, value = opt_match.groups()
curr_indent = curr_indent.replace('\t', ' ')
self.update({name: value})
continue
# Anything remaining is a mystery.
name = '#unknown{0}'.format(unknown_count)
self.update({name: opt_str})
unknown_count += 1
def _uncomment_if_commented(self, opt_key):
# should be called only if opt_key is not already present
# will uncomment the key if commented and create a place holder
# for the key where the correct value can be update later
# used to preserve the ordering of comments and commented options
# and to make sure options without sectons go above any section
options_backup = OrderedDict()
comment_index = None
for key, value in self.iteritems():
if comment_index is not None:
options_backup.update({key: value})
continue
if '#comment' not in key:
continue
opt_match = self.opt_regx.match(value.lstrip('#'))
if opt_match and opt_match.group(2) == opt_key:
comment_index = key
for key in options_backup:
self.pop(key)
self.pop(comment_index, None)
super(_Section, self).update({opt_key: None})
for key, value in options_backup.iteritems():
super(_Section, self).update({key: value})
def update(self, update_dict):
changes = {}
for key, value in update_dict.iteritems():
# Ensure the value is either a _Section or a string
if hasattr(value, 'iteritems'):
sect = _Section(
name=key, inicontents='',
separator=self.sep, commenter=self.com
)
sect.update(value)
value = sect
value_plain = value.as_dict()
else:
value = str(value)
value_plain = value
if key not in self:
changes.update({key: {'before': None,
'after': value_plain}})
# If it's not a section, it may already exist as a
# commented-out key/value pair
if not hasattr(value, 'iteritems'):
self._uncomment_if_commented(key)
super(_Section, self).update({key: value})
else:
curr_value = self.get(key, None)
if isinstance(curr_value, _Section):
sub_changes = curr_value.update(value)
if sub_changes:
changes.update({key: sub_changes})
else:
if curr_value != value:
changes.update({key: {'before': curr_value,
'after': value_plain}})
super(_Section, self).update({key: value})
return changes
def gen_ini(self):
yield '\n[{0}]\n'.format(self.name)
sections_dict = OrderedDict()
for name, value in self.iteritems():
if com_regx.match(name):
yield '{0}\n'.format(value)
elif isinstance(value, _Section):
sections_dict.update({name: value})
else:
yield '{0}{1}{2}\n'.format(
name,
(
' {0} '.format(self.sep) if self.sep != ' '
else self.sep
),
value
)
for name, value in sections_dict.iteritems():
for line in value.gen_ini():
yield line
def as_ini(self):
return ''.join(self.gen_ini())
def as_dict(self):
return dict(self)
def dump(self):
print(str(self))
def __repr__(self, _repr_running=None):
_repr_running = _repr_running or {}
super_repr = super(_Section, self).__repr__(_repr_running)
return '\n'.join((super_repr, json.dumps(self, indent=4)))
def __str__(self):
return json.dumps(self, indent=4)
def __eq__(self, item):
return (isinstance(item, self.__class__) and
self.name == item.name)
def __ne__(self, item):
return not (isinstance(item, self.__class__) and
self.name == item.name)
class _Ini(_Section):
def __init__(self, name, inicontents='', separator='=', commenter='#'):
super(_Ini, self).__init__(name, inicontents, separator, commenter)
def refresh(self, inicontents=None):
inicontents = inicontents or _fopen(self.name).read()
if not inicontents:
return
# Remove anything left behind from a previous run.
for opt in self:
self.pop(opt)
inicontents = ini_regx.split(inicontents)
inicontents.reverse()
# Pop anything defined outside of a section (ie. at the top of
# the ini file).
super(_Ini, self).refresh(inicontents.pop())
for section_name, sect_ini in self._gen_tuples(inicontents):
sect_obj = _Section(
section_name, sect_ini, separator=self.sep
)
sect_obj.refresh()
self.update({sect_obj.name: sect_obj})
def flush(self):
with _fopen(self.name, 'w') as outfile:
ini_gen = self.gen_ini()
next(ini_gen)
outfile.writelines(ini_gen)
@staticmethod
def get_ini_file(file_name, separator='='):
inifile = _Ini(file_name, separator=separator)
inifile.refresh()
return inifile
@staticmethod
def _gen_tuples(list_object):
while True:
try:
key = list_object.pop()
value = list_object.pop()
except IndexError:
raise StopIteration
else:
yield key, value
|
py | 7dfc50055b0ee02e1bb8677932aa89967f800388 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2018 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: panos_management_profile
short_description: Manage interface management profiles.
description:
- This module will allow you to manage interface management profiles on PAN-OS.
author: "Garfield Lee Freeman (@shinmog)"
version_added: "2.6"
requirements:
- pan-python can be obtained from PyPI U(https://pypi.python.org/pypi/pan-python)
- pandevice can be obtained from PyPI U(https://pypi.python.org/pypi/pandevice)
notes:
- Checkmode is supported.
- Panorama is supported.
extends_documentation_fragment:
- paloaltonetworks.panos.fragments.transitional_provider
- paloaltonetworks.panos.fragments.full_template_support
- paloaltonetworks.panos.fragments.state
options:
panorama_template:
description:
- B(Deprecated)
- Use I(template) instead.
- HORIZONTALLINE
- (Panorama only) The template name.
name:
description:
- The management profile name.
required: true
ping:
description:
- Enable ping
type: bool
telnet:
description:
- Enable telnet
type: bool
ssh:
description:
- Enable ssh
type: bool
http:
description:
- Enable http
type: bool
http_ocsp:
description:
- Enable http-ocsp
type: bool
https:
description:
- Enable https
type: bool
snmp:
description:
- Enable snmp
type: bool
response_pages:
description:
- Enable response pages
type: bool
userid_service:
description:
- Enable userid service
type: bool
userid_syslog_listener_ssl:
description:
- Enable userid syslog listener ssl
type: bool
userid_syslog_listener_udp:
description:
- Enable userid syslog listener udp
type: bool
permitted_ip:
description:
- The list of permitted IP addresses
type: list
commit:
description:
- Perform a commit if a change is made.
type: bool
default: false
'''
EXAMPLES = '''
- name: ensure mngt profile foo exists and allows ping and ssh
panos_management_profile:
provider: '{{ provider }}'
name: 'foo'
ping: true
ssh: true
- name: make sure mngt profile bar does not exist
panos_management_profile:
provider: '{{ provider }}'
name: 'bar'
state: 'absent'
'''
RETURN = '''
# Default return values.
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import get_exception
from ansible_collections.paloaltonetworks.panos.plugins.module_utils.panos import get_connection
try:
from pandevice.network import ManagementProfile
from pandevice.errors import PanDeviceError
except ImportError:
pass
def main():
helper = get_connection(
template=True,
template_stack=True,
with_classic_provider_spec=True,
with_state=True,
min_pandevice_version=(0, 8, 0),
argument_spec=dict(
name=dict(required=True),
ping=dict(type='bool'),
telnet=dict(type='bool'),
ssh=dict(type='bool'),
http=dict(type='bool'),
http_ocsp=dict(type='bool'),
https=dict(type='bool'),
snmp=dict(type='bool'),
response_pages=dict(type='bool'),
userid_service=dict(type='bool'),
userid_syslog_listener_ssl=dict(type='bool'),
userid_syslog_listener_udp=dict(type='bool'),
permitted_ip=dict(type='list'),
commit=dict(type='bool', default=False),
# TODO(gfreeman) - Removed in the next role release.
panorama_template=dict(),
),
)
module = AnsibleModule(
argument_spec=helper.argument_spec,
supports_check_mode=True,
required_one_of=helper.required_one_of,
)
# TODO(gfreeman) - Removed when "panorama_template" is removed.
if module.params['panorama_template'] is not None:
module.deprecate('Param "panorama_template" is deprecated; use "template"', '2.12')
if module.params['template'] is not None:
msg = [
'Both "template" and "panorama_template" have been given',
'Specify one or the other, not both.',
]
module.fail_json(msg='. '.join(msg))
module.params['template'] = module.params['panorama_template']
# Verify imports, build pandevice object tree.
parent = helper.get_pandevice_parent(module)
# Build the object based on the spec.
obj = ManagementProfile(
*[module.params[x] for x in (
'name', 'ping', 'telnet', 'ssh', 'http', 'http_ocsp', 'https',
'snmp', 'response_pages', 'userid_service',
'userid_syslog_listener_ssl', 'userid_syslog_listener_udp',
'permitted_ip')])
parent.add(obj)
# Retrieve current config.
try:
profiles = ManagementProfile.refreshall(parent, add=False)
except PanDeviceError as e:
module.fail_json(msg='Failed refresh: {0}'.format(e))
# Perform requested action.
changed, diff = helper.apply_state(obj, profiles, module)
if changed and module.params['commit']:
helper.commit(module)
# Done.
module.exit_json(changed=changed, diff=diff, msg="Done")
if __name__ == '__main__':
main()
|
py | 7dfc505edf0e10d6f6712345ccb7ee2269d8e6a4 | # Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.openstack.watchers.security_group
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Michael Stair <[email protected]>
"""
from security_monkey.watchers.openstack.openstack_watcher import OpenStackWatcher
from security_monkey import app
from cloudaux.orchestration.openstack.security_group import get_security_group, FLAGS
class OpenStackSecurityGroup(OpenStackWatcher):
index = 'openstack_securitygroup'
i_am_singular = 'Security Group'
i_am_plural = 'Security Groups'
account_type = 'OpenStack'
def __init__(self, *args, **kwargs):
super(OpenStackSecurityGroup, self).__init__(*args, **kwargs)
self.ephemeral_paths = ["assigned_to"]
self.item_type = 'securitygroup'
self.service = 'network'
self.generator = 'security_groups'
self.detail = app.config.get('SECURITYGROUP_INSTANCE_DETAIL', 'FULL')
def get_method(self, item, **kwargs):
result = super(OpenStackSecurityGroup, self).get_method(item, **kwargs)
flags = FLAGS.RULES
if not self.detail == 'NONE':
kwargs['instance_detail'] = self.detail
flags = flags | FLAGS.INSTANCES
return get_security_group(result, flags=flags, **kwargs)
|
py | 7dfc5098c1a126dfdf07d3e62a1ea7ce28fad9d8 | import asyncio
import base64
import copy
import os
import subprocess
from aiohttp import web
from multidict import CIMultiDict
from cryptography.fernet import Fernet
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from app.service.interfaces.i_file_svc import FileServiceInterface
from app.utility.base_service import BaseService
from app.utility.payload_encoder import xor_file, xor_bytes
FILE_ENCRYPTION_FLAG = '%encrypted%'
class FileSvc(FileServiceInterface, BaseService):
def __init__(self):
self.log = self.add_service('file_svc', self)
self.data_svc = self.get_service('data_svc')
self.special_payloads = dict()
self.encryptor = self._get_encryptor()
self.encrypt_output = False if self.get_config('encrypt_files') is False else True
self.packers = dict()
async def get_file(self, headers):
headers = CIMultiDict(headers)
if 'file' not in headers:
raise KeyError('File key was not provided')
packer = None
display_name = payload = headers.get('file')
if ':' in payload:
_, display_name = packer, payload = payload.split(':')
headers['file'] = payload
if any(payload.endswith(x) for x in [y for y in self.special_payloads if y.startswith('.')]):
payload, display_name = await self._operate_extension(payload, headers)
if self.is_uuid4(payload):
payload, display_name = self.get_payload_name_from_uuid(payload)
if payload in self.special_payloads:
payload, display_name = await self.special_payloads[payload](headers)
file_path, contents = await self.read_file(payload)
if packer:
if packer in self.packers:
file_path, contents = await self.get_payload_packer(packer).pack(file_path, contents)
else:
self.log.warning('packer <%s> not available for payload <%s>, returning unpacked' % (packer, payload))
if headers.get('xor_key'):
xor_key = headers['xor_key']
contents = xor_bytes(contents, xor_key.encode())
if headers.get('name'):
display_name = headers.get('name')
display_name = self.remove_xored_extension(file_path)
return file_path, contents, display_name
async def save_file(self, filename, payload, target_dir, encrypt=True):
self._save(os.path.join(target_dir, filename), payload, encrypt)
async def create_exfil_sub_directory(self, dir_name):
path = os.path.join(self.get_config('exfil_dir'), dir_name)
if not os.path.exists(path):
os.makedirs(path)
return path
async def save_multipart_file_upload(self, request, target_dir):
try:
reader = await request.multipart()
while True:
field = await reader.next()
if not field:
break
_, filename = os.path.split(field.filename)
await self.save_file(filename, bytes(await field.read()), target_dir)
self.log.debug('Uploaded file %s/%s' % (target_dir, filename))
return web.Response()
except Exception as e:
self.log.debug('Exception uploading file: %s' % e)
async def find_file_path(self, name, location=''):
for plugin in await self.data_svc.locate('plugins', match=dict(enabled=True)):
for subd in ['', 'data']:
file_path = await self.walk_file_path(os.path.join('plugins', plugin.name, subd, location), name)
if file_path:
return plugin.name, file_path
file_path = await self.walk_file_path(os.path.join('data'), name)
if file_path:
return None, file_path
return None, await self.walk_file_path('%s' % location, name)
async def read_file(self, name, location='payloads'):
_, file_name = await self.find_file_path(name, location=location)
if file_name:
if self.is_extension_xored(file_name):
return name, xor_file(file_name)
return name, self._read(file_name)
raise FileNotFoundError
def read_result_file(self, link_id, location='data/results'):
buf = self._read(os.path.join(location, link_id))
return buf.decode('utf-8')
def write_result_file(self, link_id, output, location='data/results'):
output = bytes(output, encoding='utf-8')
self._save(os.path.join(location, link_id), output)
async def add_special_payload(self, name, func):
"""
Call a special function when specific payloads are downloaded
:param name:
:param func:
:return:
"""
if callable(func): # Check to see if the passed function is already a callable function
self.special_payloads[name] = func
async def compile_go(self, platform, output, src_fle, arch='amd64', ldflags='-s -w', cflags='', buildmode='',
build_dir='.', loop=None):
env = copy.copy(os.environ)
env['GOARCH'] = arch
env['GOOS'] = platform
if cflags:
for cflag in cflags.split(' '):
name, value = cflag.split('=')
env[name] = value
args = ['go', 'build']
if buildmode:
args.append(buildmode)
if ldflags:
args.extend(['-ldflags', "{}".format(ldflags)])
args.extend(['-o', output, src_fle])
loop = loop if loop else asyncio.get_event_loop()
try:
await loop.run_in_executor(None, lambda: subprocess.check_output(args, cwd=build_dir, env=env))
except subprocess.CalledProcessError as e:
self.log.warning('Problem building golang executable {}: {} '.format(src_fle, e))
def get_payload_name_from_uuid(self, payload):
for t in ['standard_payloads', 'special_payloads']:
for k, v in self.get_config(prop=t, name='payloads').items():
if v['id'] == payload:
if v.get('obfuscation_name'):
return k, v['obfuscation_name'][0]
return k, k
return payload, payload
def get_payload_packer(self, packer):
return self.packers[packer].Packer(self)
def list_exfilled_files(self, startdir=None):
if not startdir:
startdir = self.get_config('exfil_dir')
if not os.path.exists(startdir):
return dict()
exfil_files = dict()
exfil_folders = [f.path for f in os.scandir(startdir) if f.is_dir()]
for d in exfil_folders:
exfil_key = d.split(os.sep)[-1]
exfil_files[exfil_key] = {}
for file in [f.path for f in os.scandir(d) if f.is_file()]:
exfil_files[exfil_key][file.split(os.sep)[-1]] = file
return exfil_files
@staticmethod
async def walk_file_path(path, target):
for root, _, files in os.walk(path):
if target in files:
return os.path.join(root, target)
xored_target = FileSvc.add_xored_extension(target)
if xored_target in files:
return os.path.join(root, xored_target)
return None
@staticmethod
def remove_xored_extension(filename):
if FileSvc.is_extension_xored(filename):
return filename.replace('.xored', '')
return filename
@staticmethod
def is_extension_xored(filename):
return filename.endswith('.xored')
@staticmethod
def add_xored_extension(filename):
if FileSvc.is_extension_xored(filename):
return filename
return '%s.xored' % filename
""" PRIVATE """
def _save(self, filename, content, encrypt=True):
if encrypt and (self.encryptor and self.encrypt_output):
content = bytes(FILE_ENCRYPTION_FLAG, 'utf-8') + self.encryptor.encrypt(content)
with open(filename, 'wb') as f:
f.write(content)
def _read(self, filename):
with open(filename, 'rb') as f:
buf = f.read()
if self.encryptor and buf.startswith(bytes(FILE_ENCRYPTION_FLAG, encoding='utf-8')):
buf = self.encryptor.decrypt(buf[len(FILE_ENCRYPTION_FLAG):])
return buf
def _get_encryptor(self):
generated_key = PBKDF2HMAC(algorithm=hashes.SHA256(),
length=32,
salt=bytes(self.get_config('crypt_salt'), 'utf-8'),
iterations=2 ** 20,
backend=default_backend())
return Fernet(base64.urlsafe_b64encode(generated_key.derive(bytes(self.get_config('encryption_key'), 'utf-8'))))
async def _operate_extension(self, payload, headers):
try:
target = '.' + payload.split('.')[-1]
return await self.special_payloads[target](self.get_services(), headers)
except Exception as e:
self.log.error('Error loading extension handler=%s, %s' % (payload, e))
def _go_vars(arch, platform):
return '%s GOARCH=%s %s GOOS=%s' % (_get_header(), arch, _get_header(), platform)
def _get_header():
return 'SET' if os.name == 'nt' else ''
|
py | 7dfc50c1c14a1f7bd212cd9ee2c3ebc9954affa0 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class BgpPeerStatusListResult(Model):
"""Response for list BGP peer status API service call.
:param value: List of BGP peers
:type value: list[~azure.mgmt.network.v2016_09_01.models.BgpPeerStatus]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[BgpPeerStatus]'},
}
def __init__(self, value=None):
super(BgpPeerStatusListResult, self).__init__()
self.value = value
|
py | 7dfc51d74c33fef55cb06f7fd7b4b09fda1e75d1 | # -*- coding: utf-8 -*-
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from pic.items import PicItem
class XhSpider(CrawlSpider):
name = 'xh'
# 限制爬虫只在该域名下爬
allowed_domains = ['xiaohuar.com']
start_urls = ['http://www.xiaohuar.com/list-1-1.html']
rules = (
# Extract links matching 'category.php' (but not matching 'subsection.php')
# and follow links from them (since no callback means follow=True by default).
# Rule(LinkExtractor(allow=('category\.php', ), deny=('subsection\.php', ))),
# Extract links matching 'item.php' and parse them with the spider's method parse_item
# Rule(LinkExtractor(allow=('item\.php', )), callback='parse_item'),
# callback不要使用默认的parse
Rule(LinkExtractor(allow=('list-1-(\d)+\.html', )), callback='parse_item', follow=True),
)
def parse_item(self, response):
allPics = response.xpath('//div[@class="img"]/a')
for pic in allPics:
item = PicItem()
item['name'] = pic.xpath('./img/@alt').extract_first()
item['addr'] = response.urljoin(pic.xpath('./img/@src').extract_first())
print item['name']
print item['addr']
# 返回爬取到的数据
yield item
#
# # 获取所有的地址链接
# for url in response.xpath("//a/@href"):
# yield response.follow(url,callback=self.parse)
|
py | 7dfc5225ef8b5eacee5571c5c05863b2a3d7748e | """
This module lets you practice the ACCUMULATOR pattern
in its simplest classic forms:
SUMMING: total = total + number
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher, Mark Hays,
Aaron Wilkin, their colleagues, and Liz Stutz.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
import math
def main():
""" Calls the TEST functions in this module. """
run_test_sum_cosines()
run_test_sum_square_roots()
def run_test_sum_cosines():
""" Tests the sum_cosines function. """
# -------------------------------------------------------------------------
# DONE: 2. Implement this function.
# It TESTS the sum_cosines function defined below.
# Include at least ** 3 ** tests.
#
# Use the same 4-step process as in implementing previous
# TEST functions, including the same way to print expected/actual.
# -------------------------------------------------------------------------
print()
print('--------------------------------------------------')
print('Testing the sum_cosines function:')
print('--------------------------------------------------')
# Test 1:
expected = 0.13416
answer = sum_cosines(3)
print('Test 1 expected:', expected)
print(' actual: ', answer)
# Test 2:
expected = 1.54030
answer = sum_cosines(1)
print('Test 2 expected:', expected)
print(' actual: ', answer)
# Test 3:
expected = 1.12416
answer = sum_cosines(2)
print('Test 3 expected:', expected)
print(' actual: ', answer)
def sum_cosines(n):
"""
What comes in: A non-negative integer n.
What goes out: The sum of the cosines of the integers
0, 1, 2, 3, ... n, inclusive, for the given n.
Side effects: None.
Example:
If n is 3, this function returns
cos(0) + cos(1) + cos(2) + cos(3) which is about 0.13416.
"""
# -------------------------------------------------------------------------
# DONE: 3. Implement and test this function.
# Note that you should write its TEST function first (above).
# That is called TEST-DRIVEN DEVELOPMENT (TDD).
#
# No fair running the code of sum_cosines to GENERATE
# test cases; that would defeat the purpose of TESTING!
# -------------------------------------------------------------------------
total = 0
for k in range(n+1):
total = total + (math.cos(k))
return total
def run_test_sum_square_roots():
""" Tests the sum_square_roots function. """
# -------------------------------------------------------------------------
# DONE: 4. Implement this function.
# It TESTS the sum_square_roots function defined below.
# Include at least ** 3 ** tests.
#
# Use the same 4-step process as in implementing previous
# TEST functions, including the same way to print expected/actual.
# -------------------------------------------------------------------------
print()
print('--------------------------------------------------')
print('Testing the sum_square_roots function:')
print('--------------------------------------------------')
# Test 1:
expected = 1.4142
answer = sum_square_roots(1)
print('Test 1 expected:', expected)
print(' actual: ', answer)
# Test 2:
expected = 11.854408
answer = sum_square_roots(5)
print('Test 2 expected:', expected)
print(' actual: ', answer)
# Test 3:
expected = 3.4142
answer = sum_square_roots(2)
print('Test 3 expected:', expected)
print(' actual: ', answer)
def sum_square_roots(n):
"""
What comes in: A non-negative integer n.
What goes out: The sum of the square roots of the integers
2, 4, 6, 8, ... 2n inclusive, for the given n.
So if n is 7, the last term of the sum is
the square root of 14 (not 7).
Side effects: None.
Example:
If n is 5, this function returns
sqrt(2) + sqrt(4) + sqrt(6) + sqrt(8) + sqrt(10),
which is about 11.854408.
"""
# -------------------------------------------------------------------------
# DONE: 5. Implement and test this function.
# Note that you should write its TEST function first (above).
# That is called TEST-DRIVEN DEVELOPMENT (TDD).
#
# No fair running the code of sum_square_roots to GENERATE
# test cases; that would defeat the purpose of TESTING!
# -------------------------------------------------------------------------
total = 0
for k in range(n + 1):
total = total + (math.sqrt(2 * k))
return total
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# -----------------------------------------------------------------------------
main()
|
py | 7dfc5257b9c0a49bc10e9089e77a00e4e7018744 | # -*- coding: utf-8 -*-
# @Time : 2020/12/22 11:01
# @Author : zyk
# @Email : [email protected]
# @File : __init__.py
# @Software: PyCharm
|
py | 7dfc53d1b304ad7c4d1500697cdb975b34b899c4 | # Copyright 2017-2020 Fodro
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import json
def create_member():
""""Create project"""
name = input("Name? ") # setup attributes
time = input("Time? ")
obj = input("Object? ")
cash = input("Cash? ")
member = { # create dictionary
'title': '------Project------',
'name': name,
'time': time,
'object': obj,
'cash': cash
}
return member
def save_by_json(infile, member):
""""Save dictionary from function create_member() to json"""
member_list = open(infile, mode='w', encoding='utf-8') # open file in write mode
json.dump(member, member_list) # dump list "members"
member_list.close() # close file
print("Successful")
def list_members(infile):
"""Print list of projects"""
member_list = open(infile, mode='r', encoding='utf-8') # open file in read mode
jlist = json.load(member_list) # load information from json
if len(jlist) != 0:
for project in jlist: # create cycle for
print(str(project['title'])) # print "border" between projects
print("Name: " + str(project['name'])) # print attributes of project
print("Time: " + str(project['time']))
print("Object: " + str(project['object']))
print("Cash: " + str(project['cash']))
else:
print("Empty")
member_list.close() # close file
def del_member(infile, member):
"""Delete project"""
found = False
del_mem = input("Type name of client: ") # take name of client
for i in member: # search project by name of client
if str(i['name']) == del_mem:
found = True
member.remove(i)
break
if not found:
print("Client not found")
else:
member_list = open(infile, mode='w', encoding='utf-8') # rewrite file
json.dump(member, member_list)
member_list.close()
print("Successful")
def edit(infile, member):
found = False
ed_mem = input("Enter name of client: ")
for i in member:
if i['name'] == ed_mem:
found = True
ans = "y"
while ans == "y":
ed_par = input("Enter attribute to change(name,time,object,cash): ")
value = input("Enter new value: ")
if ed_par == "name":
i['name'] = value
print("Successful")
elif ed_par == "time":
i['time'] = value
print("Successful")
elif ed_par == "object":
i['object'] = value
print("Successful")
elif ed_par == "cash":
i['cash'] = value
print("Successful")
else:
print("Attribute not found")
ans = input("Edit one more time? (y/n) ").lower()
break
if not found:
print("Client not found")
else:
member_list = open(infile, mode='w', encoding='utf-8') # rewrite file
json.dump(member, member_list)
member_list.close() |
py | 7dfc5525d2075372069e57c001111a3a39e5eecc | from __future__ import unicode_literals
import binascii
from nacl.secret import SecretBox
from pymacaroons import Caveat
from pymacaroons.utils import (
convert_to_bytes,
truncate_or_pad,
generate_derived_key,
sign_third_party_caveat,
)
from pymacaroons.exceptions import MacaroonUnmetCaveatException
from .base_third_party import (
BaseThirdPartyCaveatDelegate,
BaseThirdPartyCaveatVerifierDelegate,
)
class ThirdPartyCaveatDelegate(BaseThirdPartyCaveatDelegate):
def __init__(self, *args, **kwargs):
super(ThirdPartyCaveatDelegate, self).__init__(*args, **kwargs)
def add_third_party_caveat(self,
macaroon,
location,
key,
key_id,
**kwargs):
derived_key = truncate_or_pad(
generate_derived_key(convert_to_bytes(key))
)
old_key = truncate_or_pad(binascii.unhexlify(macaroon.signature_bytes))
box = SecretBox(key=old_key)
verification_key_id = box.encrypt(
derived_key, nonce=kwargs.get('nonce')
)
caveat = Caveat(
caveat_id=key_id,
location=location,
verification_key_id=verification_key_id,
version=macaroon.version
)
macaroon.caveats.append(caveat)
encode_key = binascii.unhexlify(macaroon.signature_bytes)
macaroon.signature = sign_third_party_caveat(
encode_key,
caveat._verification_key_id,
caveat._caveat_id
)
return macaroon
class ThirdPartyCaveatVerifierDelegate(BaseThirdPartyCaveatVerifierDelegate):
def __init__(self, *args, **kwargs):
super(ThirdPartyCaveatVerifierDelegate, self).__init__(*args, **kwargs)
def verify_third_party_caveat(self,
verifier,
caveat,
root,
macaroon,
discharge_macaroons,
signature):
caveat_macaroon = self._caveat_macaroon(caveat, discharge_macaroons)
caveat_key = self._extract_caveat_key(signature, caveat)
caveat_met = verifier.verify_discharge(
root,
caveat_macaroon,
caveat_key,
discharge_macaroons=discharge_macaroons
)
return caveat_met
def update_signature(self, signature, caveat):
return binascii.unhexlify(
sign_third_party_caveat(
signature,
caveat._verification_key_id,
caveat._caveat_id
)
)
def _caveat_macaroon(self, caveat, discharge_macaroons):
# TODO: index discharge macaroons by identifier
caveat_macaroon = next(
(m for m in discharge_macaroons
if m.identifier_bytes == caveat.caveat_id_bytes), None)
if not caveat_macaroon:
raise MacaroonUnmetCaveatException(
'Caveat not met. No discharge macaroon found for identifier: '
'{}'.format(caveat.caveat_id_bytes)
)
return caveat_macaroon
def _extract_caveat_key(self, signature, caveat):
key = truncate_or_pad(signature)
box = SecretBox(key=key)
decrypted = box.decrypt(caveat._verification_key_id)
return decrypted
|
py | 7dfc5550783c451c902aa6e9765b4ce8b1e28c8f | from django.contrib.auth import get_user_model, authenticate
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
class UserSerializer(serializers.ModelSerializer):
"""Serializer for the users object"""
class Meta:
model = get_user_model()
fields = ('email', 'password', 'name')
extra_kwargs = {'password': {'write_only': True, 'min_length': 5}}
def create(self, validated_data):
"""Create a new user with encrypted password and return it"""
return get_user_model().objects.create_user(**validated_data)
def update(self, instance, validated_data):
"""Update a user setting a password correctly and return it"""
password = validated_data.pop('password', None)
user = super().update(instance, validated_data)
if password:
user.set_password(password)
user.save()
return user
class AuthTokenSerializer(serializers.Serializer):
"""Serializer for the authtoken object"""
email = serializers.CharField()
password = serializers.CharField(
style={'input_type': 'password'},
trim_whitespace=False,
)
def validate(self, attrs):
"""Validate and authenticate the user"""
email = attrs.get('email')
password = attrs.get('password')
user = authenticate(
request=self.context.get('request'),
username=email,
password=password
)
if not user:
msg = _('Unable to authenticate with provided credentials')
raise serializers.ValidationError(msg, code='authentication')
attrs['user'] = user
return attrs
|
py | 7dfc55af75328775b1d9e9abc358301541231f7c | # Copyright (c) 2015 Mitch Garnaat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import unittest
import json
from placebo.serializer import serialize, deserialize, utc
date_sample = {
"LoginProfile": {
"UserName": "baz",
"CreateDate": datetime.datetime(2015, 1, 4, 9, 1, 2, 0, tzinfo=utc),
}
}
date_json = """{"LoginProfile": {"CreateDate": {"__class__": "datetime", "day": 4, "hour": 9, "microsecond": 0, "minute": 1, "month": 1, "second": 2, "year": 2015}, "UserName": "baz"}}"""
class TestSerializers(unittest.TestCase):
def test_datetime_to_json(self):
result = json.dumps(date_sample, default=serialize, sort_keys=True)
self.assertEqual(result, date_json)
def test_datetime_from_json(self):
response = json.loads(date_json, object_hook=deserialize)
self.assertEqual(response, date_sample)
|
py | 7dfc55bc30c5b178b1a746cca5374718f0b8923e | import os,struct, signal
from typing import Any, Dict
import lldb
from lldb.plugins.scripted_process import ScriptedProcess
from lldb.plugins.scripted_process import ScriptedThread
class InvalidScriptedProcess(ScriptedProcess):
def __init__(self, target: lldb.SBTarget, args : lldb.SBStructuredData):
super().__init__(target, args)
def get_memory_region_containing_address(self, addr: int) -> lldb.SBMemoryRegionInfo:
return None
def get_thread_with_id(self, tid: int):
return {}
def get_registers_for_thread(self, tid: int):
return {}
def read_memory_at_address(self, addr: int, size: int) -> lldb.SBData:
return None
def get_loaded_images(self):
return self.loaded_images
def get_process_id(self) -> int:
return 666
def should_stop(self) -> bool:
return True
def is_alive(self) -> bool:
return True
def get_scripted_thread_plugin(self):
return InvalidScriptedThread.__module__ + "." + InvalidScriptedThread.__name__
class InvalidScriptedThread(ScriptedThread):
def __init__(self, process, args):
super().__init__(process, args)
def get_thread_id(self) -> int:
return 0x19
def get_name(self) -> str:
return InvalidScriptedThread.__name__ + ".thread-1"
def get_state(self) -> int:
return lldb.eStateInvalid
def get_stop_reason(self) -> Dict[str, Any]:
return { "type": lldb.eStopReasonSignal, "data": {
"signal": signal.SIGINT
} }
def get_stackframes(self):
class ScriptedStackFrame:
def __init__(idx, cfa, pc, symbol_ctx):
self.idx = idx
self.cfa = cfa
self.pc = pc
self.symbol_ctx = symbol_ctx
symbol_ctx = lldb.SBSymbolContext()
frame_zero = ScriptedStackFrame(0, 0x42424242, 0x5000000, symbol_ctx)
self.frames.append(frame_zero)
return self.frame_zero[0:0]
def get_register_context(self) -> str:
return None
def __lldb_init_module(debugger, dict):
if not 'SKIP_SCRIPTED_PROCESS_LAUNCH' in os.environ:
debugger.HandleCommand(
"process launch -C %s.%s" % (__name__,
InvalidScriptedProcess.__name__))
else:
print("Name of the class that will manage the scripted process: '%s.%s'"
% (__name__, InvalidScriptedProcess.__name__)) |
py | 7dfc55f4e191ade9903cd88b9e55d6fd1ef47858 | from App import App
import options
def main():
app = App(options)
app.generate_nodes()
app.start()
if __name__ == '__main__':
main()
|
py | 7dfc56c8e87f421332c19c72342a1970b207aa41 | from ultimate_tictactoe.ultimate_tictactoe import env, raw_env
|
py | 7dfc57123d7fc8f773b08ebdd62a55b6db11b88f | import numpy
from generate import *
def generate():
vectors = []
x, y = random_complex64(256), random_complex64(256)
vectors.append(TestVector([], [x, y], [x + y], "2 256 ComplexFloat32 inputs, 256 ComplexFloat32 output"))
x, y = random_float32(256), random_float32(256)
vectors.append(TestVector([], [x, y], [x + y], "2 256 Float32 inputs, 256 Float32 output"))
return BlockSpec("AddBlock", vectors, 1e-6)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.