hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c1667eccd6f32cd7ccfd94c268837b5cda92f3bc
| 2,598 |
py
|
Python
|
Calibration/TkAlCaRecoProducers/python/ALCARECOSiStripCalMinBiasAAG_cff.py
|
SWuchterl/cmssw
|
769b4a7ef81796579af7d626da6039dfa0347b8e
|
[
"Apache-2.0"
] | 6 |
2017-09-08T14:12:56.000Z
|
2022-03-09T23:57:01.000Z
|
Calibration/TkAlCaRecoProducers/python/ALCARECOSiStripCalMinBiasAAG_cff.py
|
SWuchterl/cmssw
|
769b4a7ef81796579af7d626da6039dfa0347b8e
|
[
"Apache-2.0"
] | 545 |
2017-09-19T17:10:19.000Z
|
2022-03-07T16:55:27.000Z
|
Calibration/TkAlCaRecoProducers/python/ALCARECOSiStripCalMinBiasAAG_cff.py
|
SWuchterl/cmssw
|
769b4a7ef81796579af7d626da6039dfa0347b8e
|
[
"Apache-2.0"
] | 14 |
2017-10-04T09:47:21.000Z
|
2019-10-23T18:04:45.000Z
|
import FWCore.ParameterSet.Config as cms
# Set the HLT paths
import HLTrigger.HLTfilters.hltHighLevel_cfi
ALCARECOSiStripCalMinBiasAAGHLT = HLTrigger.HLTfilters.hltHighLevel_cfi.hltHighLevel.clone(
andOr = True, ## choose logical OR between Triggerbits
## HLTPaths = [
## #Minimum Bias
## "HLT_MinBias*"
## ],
eventSetupPathsKey = 'SiStripCalMinBiasAAG',
throw = False # tolerate triggers stated above, but not available
)
# Select only events where tracker had HV on (according to DCS bit information)
# AND respective partition is in the run (according to FED information)
import CalibTracker.SiStripCommon.SiStripDCSFilter_cfi
DCSStatusForSiStripCalMinBiasAAG = CalibTracker.SiStripCommon.SiStripDCSFilter_cfi.siStripDCSFilter.clone()
# Select only good tracks
import Alignment.CommonAlignmentProducer.AlignmentTrackSelector_cfi
ALCARECOSiStripCalMinBiasAAG = Alignment.CommonAlignmentProducer.AlignmentTrackSelector_cfi.AlignmentTrackSelector.clone()
ALCARECOSiStripCalMinBiasAAG.filter = True ##do not store empty events
ALCARECOSiStripCalMinBiasAAG.src = 'generalTracks'
ALCARECOSiStripCalMinBiasAAG.applyBasicCuts = True
ALCARECOSiStripCalMinBiasAAG.ptMin = 0.8 ##GeV
ALCARECOSiStripCalMinBiasAAG.nHitMin = 6 ## at least 6 hits required
ALCARECOSiStripCalMinBiasAAG.chi2nMax = 10.
ALCARECOSiStripCalMinBiasAAG.GlobalSelector.applyIsolationtest = False
ALCARECOSiStripCalMinBiasAAG.GlobalSelector.applyGlobalMuonFilter = False
ALCARECOSiStripCalMinBiasAAG.GlobalSelector.applyJetCountFilter = False
ALCARECOSiStripCalMinBiasAAG.TwoBodyDecaySelector.applyMassrangeFilter = False
ALCARECOSiStripCalMinBiasAAG.TwoBodyDecaySelector.applyChargeFilter = False
ALCARECOSiStripCalMinBiasAAG.TwoBodyDecaySelector.applyAcoplanarityFilter = False
ALCARECOSiStripCalMinBiasAAG.TwoBodyDecaySelector.applyMissingETFilter = False
# Sequence #
seqALCARECOSiStripCalMinBiasAAG = cms.Sequence(ALCARECOSiStripCalMinBiasAAGHLT*
DCSStatusForSiStripCalMinBiasAAG *
ALCARECOSiStripCalMinBiasAAG)
## customizations for the pp_on_AA eras
from Configuration.Eras.Modifier_pp_on_XeXe_2017_cff import pp_on_XeXe_2017
from Configuration.Eras.Modifier_pp_on_AA_2018_cff import pp_on_AA_2018
(pp_on_XeXe_2017 | pp_on_AA_2018).toModify(ALCARECOSiStripCalMinBiasAAGHLT,
eventSetupPathsKey='SiStripCalMinBiasAAGHI'
)
| 50.941176 | 122 | 0.765589 |
40cdcd1898605a4abeff81b1aa7828f19641802a
| 5,596 |
py
|
Python
|
contentcuration/contentcuration/tests/test_channelcache.py
|
nucleogenesis/studio
|
4b892bf140fd93ceec869f2422b460fe2770bba5
|
[
"MIT"
] | null | null | null |
contentcuration/contentcuration/tests/test_channelcache.py
|
nucleogenesis/studio
|
4b892bf140fd93ceec869f2422b460fe2770bba5
|
[
"MIT"
] | 4 |
2016-05-06T17:19:30.000Z
|
2019-03-15T01:51:24.000Z
|
contentcuration/contentcuration/tests/test_channelcache.py
|
nucleogenesis/studio
|
4b892bf140fd93ceec869f2422b460fe2770bba5
|
[
"MIT"
] | 4 |
2016-10-18T22:49:08.000Z
|
2019-09-17T11:20:51.000Z
|
#!/usr/bin/env python
#
# These are tests for the ChannelCache class.
#
from .base import StudioTestCase
from .testdata import channel
from .testdata import node
from contentcuration.utils.channelcache import ChannelCacher
class ChannelCacherTestCase(StudioTestCase):
NUM_INITIAL_PUBLIC_CHANNELS = 2
def setUp(self):
super(ChannelCacherTestCase, self).setUp()
self.channels = []
for _ in range(self.NUM_INITIAL_PUBLIC_CHANNELS):
c = channel().make_public(bypass_signals=True)
self.channels.append(c)
def test_returns_public_channels(self):
"""
Returns the list of public channels.
"""
real_channel_ids = sorted([c.id for c in self.channels])
cached_channel_ids = sorted([c.id for c in ChannelCacher.get_public_channels()])
assert (real_channel_ids # the channels we know are public...
== cached_channel_ids) # ...should be present in get_public_channels
def test_new_public_channel_not_in_cache(self):
"""
Check that our cache is indeed a cache by not returning any new public
channels created after regenerating our cache.
"""
# force fill our public channel cache
ChannelCacher.regenerate_public_channel_cache()
# create our new channel and bypass signals when creating it
new_public_channel = channel()
new_public_channel.make_public(bypass_signals=True)
# fetch our cached channel list
cached_channels = ChannelCacher.get_public_channels()
# make sure our new public channel isn't in the cache
assert new_public_channel not in cached_channels
class ChannelTokenCacheTestCase(StudioTestCase):
"""
Tests for caching tokens using the ChannelSpecificCacher proxy object.
"""
def setUp(self):
super(ChannelTokenCacheTestCase, self).setUp()
self.channel = channel()
def test_channel_get_human_token_returns_token_if_present(self):
"""
Check that cache.get_human_token() returns the same thing as
the real channel.get_human_token().
"""
c = self.channel
c.make_token()
ccache = ChannelCacher.for_channel(c)
assert ccache.get_human_token() == c.get_human_token()
def test_channel_get_channel_id_token_returns_channel_id_token(self):
"""
Check that cache.get_channel_id_token() returns the same thing as
the real channel.get_channel_id_token().
"""
c = self.channel
c.make_token()
ccache = ChannelCacher.for_channel(c)
assert ccache.get_channel_id_token() == c.get_channel_id_token()
class ChannelResourceCountCacheTestCase(StudioTestCase):
def setUp(self):
super(ChannelResourceCountCacheTestCase, self).setUp()
self.channel = channel()
def test_get_resource_count_returns_same_as_channel_get_resource_count(self):
"""
Check that get_resource_count() returns the same thing as
channel.get_resource_count() when cache is unfilled yet. That should be
the case on a newly created channel.
"""
ccache = ChannelCacher.for_channel(self.channel)
assert ccache.get_resource_count() == self.channel.get_resource_count()
def test_get_resource_count_is_really_a_cache(self):
"""
Check that our count is wrong when we insert a new content node.
"""
ccache = ChannelCacher.for_channel(self.channel)
# fill our cache with a value first by calling get_resource_count()
ccache.get_resource_count()
# add our new content node
node(
parent=self.channel.main_tree,
data={
"kind_id": "video",
"node_id": "nicevid",
"title": "Bad vid",
}
)
# check that our cache's count is now less than the real count
assert ccache.get_resource_count() < self.channel.get_resource_count()
class ChannelGetDateModifiedCacheTestCase(StudioTestCase):
"""
Tests for ChannelCacher.get_date_modified()
"""
def setUp(self):
super(ChannelGetDateModifiedCacheTestCase, self).setUp()
self.channel = channel()
def test_returns_the_same_as_real_get_date_modified(self):
"""
When called with the cache unfilled, ChannelCacher.get_date_modified()
should return the same thing as channel.get_date_modified().
"""
ccache = ChannelCacher.for_channel(self.channel)
assert ccache.get_date_modified() == self.channel.get_date_modified()
def test_get_date_modified_really_is_a_cache(self):
"""
Check that the cache is really a cache by seeing if the cache value is not
the same as channel.get_date_modified() when we add a new node. If it
gets updated, then the cache is either too short lived, or it's not
really a cachd at all!
"""
ccache = ChannelCacher.for_channel(self.channel)
# fill the cache by calling get_date_modified once
ccache.get_date_modified()
# add a new node to the channel
node(
parent=self.channel.main_tree,
data={
"node_id": "videoz",
"title": "new vid",
"kind_id": "video",
}
)
# check that the cached modified date is not equal to the channel's new
# modified date
assert ccache.get_date_modified() <= self.channel.get_date_modified()
| 33.508982 | 88 | 0.658327 |
a94307161d43171bddb64e92bbaf491ea0989b0f
| 2,742 |
py
|
Python
|
python/paddle/fluid/tests/custom_kernel/custom_kernel_dot_c_setup.py
|
Shaun2016/Paddle
|
b963903806c8a6694df79b42aaab6578a0ef6afb
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/tests/custom_kernel/custom_kernel_dot_c_setup.py
|
Shaun2016/Paddle
|
b963903806c8a6694df79b42aaab6578a0ef6afb
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/tests/custom_kernel/custom_kernel_dot_c_setup.py
|
Shaun2016/Paddle
|
b963903806c8a6694df79b42aaab6578a0ef6afb
|
[
"Apache-2.0"
] | 1 |
2021-12-09T08:59:17.000Z
|
2021-12-09T08:59:17.000Z
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from paddle.fluid import core
from distutils.sysconfig import get_python_lib
from distutils.core import setup, Extension
from setuptools.command.build_ext import build_ext
# refer: https://note.qidong.name/2018/03/setup-warning-strict-prototypes
# Avoid a gcc warning below:
# cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid
# for C/ObjC but not for C++
class BuildExt(build_ext):
def build_extensions(self):
if '-Wstrict-prototypes' in self.compiler.compiler_so:
self.compiler.compiler_so.remove('-Wstrict-prototypes')
super(BuildExt, self).build_extensions()
# cc flags
paddle_extra_compile_args = [
'-std=c++14',
'-shared',
'-fPIC',
'-Wno-parentheses',
'-DPADDLE_WITH_CUSTOM_KERNEL',
]
if core.is_compiled_with_npu():
paddle_extra_compile_args += ['-D_GLIBCXX_USE_CXX11_ABI=0']
# include path
site_packages_path = get_python_lib()
paddle_custom_kernel_include = [
os.path.join(site_packages_path, 'paddle', 'include'),
]
# include path third_party
compile_third_party_path = os.path.join(os.environ['PADDLE_ROOT'],
'build/third_party')
paddle_custom_kernel_include += [
os.path.join(compile_third_party_path, 'boost/src/extern_boost'), # boost
os.path.join(compile_third_party_path, 'install/gflags/include'), # gflags
os.path.join(compile_third_party_path, 'install/glog/include'), # glog
]
# libs path
paddle_custom_kernel_library_dir = [
os.path.join(site_packages_path, 'paddle', 'fluid'),
]
# libs
libs = [':core_avx.so']
if not core.has_avx_core and core.has_noavx_core:
libs = [':core_noavx.so']
custom_kernel_dot_module = Extension(
'custom_kernel_dot',
sources=['custom_kernel_dot_c.cc'],
include_dirs=paddle_custom_kernel_include,
library_dirs=paddle_custom_kernel_library_dir,
libraries=libs,
extra_compile_args=paddle_extra_compile_args)
setup(name='custom_kernel_dot_c',
version='1.0',
description='custom kernel fot compiling',
cmdclass={'build_ext': BuildExt},
ext_modules=[custom_kernel_dot_module])
| 33.439024 | 79 | 0.735959 |
875dfd3c233c3f3839e491d2e6a1e0baee5f40b2
| 22,350 |
py
|
Python
|
pyflaski/pyflaski/idendrogram.py
|
mpg-age-bioinformatics/flaski
|
f56e00dd80d8706ecb8593ba6585a97eed881896
|
[
"MIT"
] | 9 |
2020-08-03T01:22:59.000Z
|
2022-03-03T02:02:04.000Z
|
pyflaski/pyflaski/idendrogram.py
|
mpg-age-bioinformatics/flaski
|
f56e00dd80d8706ecb8593ba6585a97eed881896
|
[
"MIT"
] | 79 |
2020-06-03T06:34:46.000Z
|
2021-09-22T13:31:43.000Z
|
pyflaski/pyflaski/idendrogram.py
|
mpg-age-bioinformatics/flaski
|
f56e00dd80d8706ecb8593ba6585a97eed881896
|
[
"MIT"
] | 5 |
2020-10-05T10:20:23.000Z
|
2022-03-01T14:23:12.000Z
|
#from matplotlib.figure import Figure
import plotly.express as px
import plotly.graph_objects as go
import plotly.figure_factory as ff
from scipy.spatial.distance import pdist
import scipy.cluster.hierarchy as sch
from collections import OrderedDict
import numpy as np
import sys
def GET_COLOR(x):
if str(x)[:3].lower() == "rgb":
vals=x.split("rgb(")[-1].split(")")[0].split(",")
vals=[ float(s.strip(" ")) for s in vals ]
return vals
else:
return str(x)
def GET_COLORLIST(rgb,fillcolor,tmp,pa,pab):
if pa[rgb] != "":
color=GET_COLOR(pa[rgb]).split(",")
else:
if pa[fillcolor]=="None":
color=[None]
else:
color=[pa[fillcolor]]
return color
def make_figure(df,pa):
"""Generates figure.
Args:
df (pandas.core.frame.DataFrame): Pandas DataFrame containing the input data.
pa (dict): A dictionary of the style { "argument":"value"} as outputted by `figure_defaults`.
Returns:
A Plotly figure
"""
#UPLOAD ARGUMENTS
# for large datasets one needs to increase the recursion limit from 3000 to 10000.
# the problem here is that this
#limit = sys.getrecursionlimit()
#print(limit)
#sys.setrecursionlimit(3000)
tmp=df.copy()
fig = go.Figure( )
# MAIN FIGURE
#Load checkboxes
pab={}
for arg in ["show_legend","upper_axis","lower_axis","left_axis","right_axis",\
"tick_left_axis","tick_lower_axis","tick_upper_axis","tick_right_axis"]:
if pa[arg] in ["off",".off"]:
pab[arg]=False
else:
pab[arg]=True
#Load booleans
booleans=[]
for arg in booleans:
if pa[arg]=="False":
pab[arg]=False
elif pa[arg]=="True":
pab[arg]=True
else:
pab[arg]=pa[arg]
#Load floats
floats=["color_threshold","x","y","axis_line_width","ticks_line_width","opacity","ticks_length","x_lower_limit","x_upper_limit","y_lower_limit","y_upper_limit","spikes_thickness","xticks_rotation",\
"yticks_rotation","xticks_fontsize","yticks_fontsize","grid_width","legend_borderwidth","legend_tracegroupgap","legend_x",\
"legend_y","fig_width","fig_height","marker_opacity","marker_size","marker_line_width","marker_line_outlierwidth"]
for a in floats:
if pa[a] == "" or pa[a]=="None" or pa[a]==None:
pab[a]=None
else:
pab[a]=float(pa[a])
#Load integers
integers=["label_fontsize","legend_fontsize","legend_title_fontsize","title_fontsize","maxxticks","maxyticks"]
for a in integers:
if pa[a] == "" or pa[a]=="None" or pa[a]==None:
pab[a]=None
else:
pab[a]=int(pa[a])
#Load Nones
possible_nones=["title_fontcolor","axis_line_color","ticks_color","spikes_color","label_fontcolor",\
"paper_bgcolor","plot_bgcolor","grid_color","legend_bgcolor","legend_bordercolor","legend_fontcolor","legend_title_fontcolor",\
"title_fontfamily","label_fontfamily","legend_fontfamily","legend_title_fontfamily","marker_outliercolor","marker_fillcolor",\
"marker_line_color","marker_line_outliercolor"]
for p in possible_nones:
if pa[p] == "None" or pa[p]=="Default" :
pab[p]=None
else:
pab[p]=pa[p]
#MAIN BODY
color=GET_COLORLIST("color_rgb","color_value",tmp,pa,pab)
if pa["labels"]!="":
labels=pa["labels"].split(",")
else:
if pa["labelcol"]=="None":
labels=[""]*tmp.shape[0]
else:
labels=list(tmp[pa["labelcol"]])
fig=ff.create_dendrogram(tmp[pa["datacols"]],orientation=pa["orientation"],colorscale=color,color_threshold=pab["color_threshold"],\
distfun=lambda x: pdist(x, pa["dist_func"]),linkagefun=lambda x: sch.linkage(x, pa["link_func"]),labels=labels)
#UPDATE LAYOUT OF PLOTS
#Figure size
fig.update_layout( width=pab["fig_width"], height=pab["fig_height"] ) # autosize=False,
#Update title
title=dict(text=pa["title"],font=dict(family=pab["title_fontfamily"],size=pab["title_fontsize"],color=pab["title_fontcolor"]),\
xref=pa["xref"],yref=pa["yref"],x=pab["x"],y=pab["y"],xanchor=pa["title_xanchor"],yanchor=pa["title_yanchor"])
fig.update_layout(title=title)
#Update axes
fig.update_xaxes(zeroline=False, showline=pab["lower_axis"], linewidth=pab["axis_line_width"], linecolor=pab["axis_line_color"])
fig.update_yaxes(zeroline=False, showline=pab["left_axis"], linewidth=pab["axis_line_width"], linecolor=pab["axis_line_color"])
#Update ticks
if pab["tick_lower_axis"]==False and pab["tick_right_axis"]==False and pab["tick_left_axis"]==False and pab["tick_upper_axis"]==False:
pa["ticks_direction_value"]=""
ticks=""
else:
ticks=pa["ticks_direction_value"]
fig.update_xaxes(ticks=ticks, tickwidth=pab["ticks_line_width"], tickcolor=pab["ticks_color"], ticklen=pab["ticks_length"])
fig.update_yaxes(ticks=ticks, tickwidth=pab["ticks_line_width"], tickcolor=pab["ticks_color"], ticklen=pab["ticks_length"])
#Update mirror property of axis based on ticks and axis selected by user
#Determines if the axis lines or/and ticks are mirrored to the opposite side of the plotting area.
# If "True", the axis lines are mirrored. If "ticks", the axis lines and ticks are mirrored. If "False", mirroring is disable.
# If "all", axis lines are mirrored on all shared-axes subplots. If "allticks", axis lines and ticks are mirrored on all shared-axes subplots.
if pab["upper_axis"]==True and pab["tick_upper_axis"]==True:
fig.update_xaxes(mirror="ticks")
elif pab["upper_axis"]==True and pab["tick_upper_axis"]==False:
fig.update_xaxes(mirror=True)
else:
fig.update_xaxes(mirror=False)
if pab["right_axis"]==True and pab["tick_right_axis"]==True:
fig.update_yaxes(mirror="ticks")
elif pab["right_axis"]==True and pab["tick_right_axis"]==False:
fig.update_yaxes(mirror=True)
else:
fig.update_xaxes(mirror=False)
if (pa["x_lower_limit"]!="") and (pa["x_upper_limit"]!="") :
xmin=pab["x_lower_limit"]
xmax=pab["x_upper_limit"]
fig.update_xaxes(range=[xmin, xmax])
if (pa["y_lower_limit"]!="") and (pa["y_upper_limit"]!="") :
ymin=pab["y_lower_limit"]
ymax=pab["y_upper_limit"]
fig.update_yaxes(range=[ymin, ymax])
if pa["maxxticks"]!="":
fig.update_xaxes(nticks=pab["maxxticks"])
if pa["maxyticks"]!="":
fig.update_yaxes(nticks=pab["maxyticks"])
#Update spikes
if pa["spikes_value"]=="both":
fig.update_xaxes(showspikes=True,spikecolor=pab["spikes_color"],spikethickness=pab["spikes_thickness"],spikedash=pa["spikes_dash"],spikemode=pa["spikes_mode"])
fig.update_yaxes(showspikes=True,spikecolor=pab["spikes_color"],spikethickness=pab["spikes_thickness"],spikedash=pa["spikes_dash"],spikemode=pa["spikes_mode"])
elif pa["spikes_value"]=="x":
fig.update_xaxes(showspikes=True,spikecolor=pab["spikes_color"],spikethickness=pab["spikes_thickness"],spikedash=pa["spikes_dash"],spikemode=pa["spikes_mode"])
elif pa["spikes_value"]=="y":
fig.update_yaxes(showspikes=True,spikecolor=pab["spikes_color"],spikethickness=pab["spikes_thickness"],spikedash=pa["spikes_dash"],spikemode=pa["spikes_mode"])
elif pa["spikes_value"]=="None":
fig.update_xaxes(showspikes=None)
fig.update_yaxes(showspikes=None)
#UPDATE X AXIS AND Y AXIS LAYOUT
xaxis=dict(visible=True, title=dict(text=pa["xlabel"],font=dict(family=pab["label_fontfamily"],size=pab["label_fontsize"],color=pab["label_fontcolor"])))
yaxis=dict(visible=True, title=dict(text=pa["ylabel"],font=dict(family=pab["label_fontfamily"],size=pab["label_fontsize"],color=pab["label_fontcolor"])))
fig.update_layout(paper_bgcolor=pab["paper_bgcolor"],plot_bgcolor=pab["plot_bgcolor"],xaxis = xaxis,yaxis = yaxis)
fig.update_xaxes(tickangle=pab["xticks_rotation"], tickfont=dict(size=pab["xticks_fontsize"]))
fig.update_yaxes(tickangle=pab["yticks_rotation"], tickfont=dict(size=pab["yticks_fontsize"]))
#UPDATE GRID PROPERTIES
if pa["grid_value"] == "None":
fig.update_xaxes(showgrid=False)
fig.update_yaxes(showgrid=False)
elif pa["grid_value"]=="x":
fig.update_yaxes(showgrid=True,gridcolor=pab["grid_color"],gridwidth=pab["grid_width"])
elif pa["grid_value"]=="y":
fig.update_xaxes(showgrid=True,gridcolor=pab["grid_color"],gridwidth=pab["grid_width"])
elif pa["grid_value"]=="both":
fig.update_xaxes(showgrid=True,gridcolor=pab["grid_color"],gridwidth=pab["grid_width"])
fig.update_yaxes(showgrid=True,gridcolor=pab["grid_color"],gridwidth=pab["grid_width"])
fig.update_layout(template='plotly_white')
#UPDATE LEGEND PROPERTIES
if pab["show_legend"]==True:
if pa["legend_orientation"]=="vertical":
legend_orientation="v"
elif pa["legend_orientation"]=="horizontal":
legend_orientation="h"
fig.update_layout(showlegend=True,legend=dict(x=pab["legend_x"],y=pab["legend_y"],bgcolor=pab["legend_bgcolor"],bordercolor=pab["legend_bordercolor"],\
borderwidth=pab["legend_borderwidth"],valign=pa["legend_valign"],\
font=dict(family=pab["legend_fontfamily"],size=pab["legend_fontsize"],color=pab["legend_fontcolor"]),orientation=legend_orientation,\
traceorder=pa["legend_traceorder"],tracegroupgap=pab["legend_tracegroupgap"],\
title=dict(text=pa["legend_title"],side=pa["legend_side"],font=dict(family=pab["legend_title_fontfamily"],size=pab["legend_title_fontsize"],\
color=pab["legend_title_fontcolor"]))))
else:
fig.update_layout(showlegend=False)
return fig
STANDARD_SIZES=[str(i) for i in list(range(1,101))]
STANDARD_COLORS=["None","aliceblue","antiquewhite","aqua","aquamarine","azure","beige",\
"bisque","black","blanchedalmond","blue","blueviolet","brown","burlywood",\
"cadetblue","chartreuse","chocolate","coral","cornflowerblue","cornsilk",\
"crimson","cyan","darkblue","darkcyan","darkgoldenrod","darkgray","darkgrey",\
"darkgreen","darkkhaki","darkmagenta","darkolivegreen","darkorange","darkorchid",\
"darkred","darksalmon","darkseagreen","darkslateblue","darkslategray","darkslategrey",\
"darkturquoise","darkviolet","deeppink","deepskyblue","dimgray","dimgrey","dodgerblue",\
"firebrick","floralwhite","forestgreen","fuchsia","gainsboro","ghostwhite","gold",\
"goldenrod","gray","grey","green","greenyellow","honeydew","hotpink","indianred","indigo",\
"ivory","khaki","lavender","lavenderblush","lawngreen","lemonchiffon","lightblue","lightcoral",\
"lightcyan","lightgoldenrodyellow","lightgray","lightgrey","lightgreen","lightpink","lightsalmon",\
"lightseagreen","lightskyblue","lightslategray","lightslategrey","lightsteelblue","lightyellow",\
"lime","limegreen","linen","magenta","maroon","mediumaquamarine","mediumblue","mediumorchid",\
"mediumpurple","mediumseagreen","mediumslateblue","mediumspringgreen","mediumturquoise",\
"mediumvioletred","midnightblue","mintcream","mistyrose","moccasin","navajowhite","navy",\
"oldlace","olive","olivedrab","orange","orangered","orchid","palegoldenrod","palegreen",\
"paleturquoise","palevioletred","papayawhip","peachpuff","peru","pink","plum","powderblue",\
"purple","red","rosybrown","royalblue","rebeccapurple","saddlebrown","salmon","sandybrown",\
"seagreen","seashell","sienna","silver","skyblue","slateblue","slategray","slategrey","snow",\
"springgreen","steelblue","tan","teal","thistle","tomato","turquoise","violet","wheat","white",\
"whitesmoke","yellow","yellowgreen"]
LINE_STYLES=["solid", "dot", "dash", "longdash", "dashdot","longdashdot"]
STANDARD_BARMODES=["stack", "group","overlay","relative"]
STANDARD_ORIENTATIONS=['top','right','bottom','left']
STANDARD_ALIGNMENTS=["left","right","auto"]
STANDARD_VERTICAL_ALIGNMENTS=["top", "middle","bottom"]
STANDARD_FONTS=["Arial", "Balto", "Courier New", "Default", "Droid Sans", "Droid Serif", "Droid Sans Mono",\
"Gravitas One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New Roman"]
TICKS_DIRECTIONS=["inside","outside",'']
LEGEND_LOCATIONS=['best','upper right','upper left','lower left','lower right','right','center left','center right','lower center','upper center','center']
MODES=["expand",None]
STANDARD_HOVERINFO=["x", "y", "z", "text", "name","all","none","skip","x+y","x+text","x+name",\
"y+text","y+name","text+name","x+y+name","x+y+text","x+text+name","y+text+name"]
STANDARD_ERRORBAR_TYPES=["percent","constant","sqrt"]
STANDARD_REFERENCES=["container","paper"]
STANDARD_TITLE_XANCHORS=["auto","left","center","right"]
STANDARD_TITLE_YANCHORS=["top","middle","bottom"]
STANDARD_LEGEND_XANCHORS=["auto","left","center","right"]
STANDARD_LEGEND_YANCHORS=["auto","top","middle","bottom"]
STANDARD_TRACEORDERS=["reversed", "grouped", "reversed+grouped", "normal"]
STANDARD_SIDES=["top","left","top left"]
STANDARD_SPIKEMODES=["toaxis", "across", "marker","toaxis+across","toaxis+marker","across+marker","toaxis+across+marker"]
STANDARD_SCALEMODES=["width","count"]
STANDARD_SYMBOLS=["0","circle","100","circle-open","200","circle-dot","300","circle-open-dot","1","square",\
"101","square-open","201","square-dot","301","square-open-dot","2","diamond","102","diamond-open","202",\
"diamond-dot","302","diamond-open-dot","3","cross","103","cross-open","203","cross-dot","303","cross-open-dot",\
"4","x","104","x-open","204","x-dot","304","x-open-dot","5","triangle-up","105","triangle-up-open","205",\
"triangle-up-dot","305","triangle-up-open-dot","6","triangle-down","106","triangle-down-open","206","triangle-down-dot",\
"306","triangle-down-open-dot","7","triangle-left","107","triangle-left-open","207","triangle-left-dot","307",\
"triangle-left-open-dot","8","triangle-right","108","triangle-right-open","208","triangle-right-dot","308",\
"triangle-right-open-dot","9","triangle-ne","109","triangle-ne-open","209","triangle-ne-dot","309",\
"triangle-ne-open-dot","10","triangle-se","110","triangle-se-open","210","triangle-se-dot","310","triangle-se-open-dot",\
"11","triangle-sw","111","triangle-sw-open","211","triangle-sw-dot","311","triangle-sw-open-dot","12","triangle-nw",\
"112","triangle-nw-open","212","triangle-nw-dot","312","triangle-nw-open-dot","13","pentagon","113","pentagon-open",\
"213","pentagon-dot","313","pentagon-open-dot","14","hexagon","114","hexagon-open","214","hexagon-dot","314","hexagon-open-dot",\
"15","hexagon2","115","hexagon2-open","215","hexagon2-dot","315","hexagon2-open-dot","16","octagon","116","octagon-open",\
"216","octagon-dot","316","octagon-open-dot","17","star","117","star-open","217","star-dot","317","star-open-dot","18",\
"hexagram","118","hexagram-open","218","hexagram-dot","318","hexagram-open-dot","19","star-triangle-up","119",\
"star-triangle-up-open","219","star-triangle-up-dot","319","star-triangle-up-open-dot","20","star-triangle-down","120",\
"star-triangle-down-open","220","star-triangle-down-dot","320","star-triangle-down-open-dot","21","star-square","121",\
"star-square-open","221","star-square-dot","321","star-square-open-dot","22","star-diamond","122","star-diamond-open",\
"222","star-diamond-dot","322","star-diamond-open-dot","23","diamond-tall","123","diamond-tall-open","223",\
"diamond-tall-dot","323","diamond-tall-open-dot","24","diamond-wide","124","diamond-wide-open","224","diamond-wide-dot",\
"324","diamond-wide-open-dot","25","hourglass","125","hourglass-open","26","bowtie","126","bowtie-open","27","circle-cross",\
"127","circle-cross-open","28","circle-x","128","circle-x-open","29","square-cross","129","square-cross-open","30",\
"square-x","130","square-x-open","31","diamond-cross","131","diamond-cross-open","32","diamond-x","132","diamond-x-open",\
"33","cross-thin","133","cross-thin-open","34","x-thin","134","x-thin-open","35","asterisk","135","asterisk-open","36",\
"hash","136","hash-open","236","hash-dot","336","hash-open-dot","37","y-up","137","y-up-open","38","y-down","138",\
"y-down-open","39","y-left","139","y-left-open","40","y-right","140","y-right-open","41","line-ew","141","line-ew-open",\
"42","line-ns","142","line-ns-open","43","line-ne","143","line-ne-open","44","line-nw","144","line-nw-open"]
STANDARD_DISTFUNCS=['braycurtis','canberra','chebyshev','cityblock','correlation','cosine','dice','euclidean','hamming','jaccard', \
'jensenshannon','kulsinski', 'mahalanobis','matching','minkowski', 'rogerstanimoto', 'russellrao','seuclidean', \
'sokalmichener','sokalsneath', 'sqeuclidean','yule']
STANDARD_LINKAGE=["single","complete","average","weighted","centroid","median","ward"]
def figure_defaults():
"""Generates default figure arguments.
Returns:
dict: A dictionary of the style { "argument":"value"}
"""
# https://matplotlib.org/3.1.1/api/markers_api.html
# https://matplotlib.org/2.0.2/api/colors_api.html
# lists allways need to have thee default value after the list
# eg.:
# "title_size":standard_sizes,\
# "titles":"20"
# "fig_size_x"="6"
# "fig_size_y"="6"
plot_arguments={"fig_width":"600.0",\
"fig_height":"600.0",\
"title":'iDendrogram plot',\
"title_fontsize":"20",\
"title_fontfamily":"Default",\
"title_fontcolor":"None",\
"titles":"20.0",\
"opacity":"1.0",\
"paper_bgcolor":"white",\
"plot_bgcolor":"white",\
"color_value":"None",\
"color_rgb":"",\
"hover_text":"",\
"color_threshold":"",\
"labelcol":"",\
"labelcols":[],\
"labels":"",\
"cols":[],\
"datacols":[],\
"dist_func":"euclidean",\
"dist_funcs":STANDARD_DISTFUNCS,\
"link_func":"single",\
"link_funcs":STANDARD_LINKAGE,\
"hoverinfos":STANDARD_HOVERINFO,\
"hover_alignments":STANDARD_ALIGNMENTS,\
"references":STANDARD_REFERENCES,\
"scalemodes":STANDARD_SCALEMODES,\
"marker_symbol":"circle",\
"marker_symbols":STANDARD_SYMBOLS,\
"marker_outliercolor":"None",\
"marker_opacity":"1.0",\
"marker_size":"6.0",\
"marker_color_rgb":"",\
"marker_fillcolor":"None",\
"marker_line_color":"None",\
"marker_line_color_rgb":"",\
"marker_line_width":"1.0",\
"marker_line_outlierwidth":"1.0",\
"marker_line_outliercolor":"None",\
"xref":"container",\
"yref":"container",\
"x":"0.5",\
"y":"0.9",\
"title_xanchors":STANDARD_TITLE_XANCHORS,\
"title_yanchors":STANDARD_TITLE_YANCHORS,\
"title_xanchor":"auto",\
"title_yanchor":"auto",\
"show_legend":".off",\
"axis_line_width":1.0,\
"axis_line_color":"lightgrey",\
"ticks_line_width":1.0,\
"ticks_color":"lightgrey",\
"groups":[],\
"groups_settings":dict(),\
"log_scale":".off",\
"fonts":STANDARD_FONTS,\
"colors":STANDARD_COLORS,\
"linestyles":LINE_STYLES,\
"linestyle_value":"",\
"orientations":STANDARD_ORIENTATIONS,\
"orientation":"bottom",\
"fontsizes":STANDARD_SIZES,\
"xlabel_size":STANDARD_SIZES,\
"ylabel_size":STANDARD_SIZES,\
"xlabel":"",\
"ylabel":"",\
"label_fontfamily":"Default",\
"label_fontsize":"15",\
"label_fontcolor":"None",\
"xlabels":"14",\
"ylabels":"14",\
"left_axis":".on" ,\
"right_axis":".on",\
"upper_axis":".on",\
"lower_axis":".on",\
"tick_left_axis":".on" ,\
"tick_right_axis":".off",\
"tick_upper_axis":".off",\
"tick_lower_axis":".on",\
"ticks_direction":TICKS_DIRECTIONS,\
"ticks_direction_value":TICKS_DIRECTIONS[1],\
"ticks_length":"6.0",\
"xticks_fontsize":"14",\
"yticks_fontsize":"14",\
"xticks_rotation":"0",\
"yticks_rotation":"0",\
"x_lower_limit":"",\
"y_lower_limit":"",\
"x_upper_limit":"",\
"y_upper_limit":"",\
"maxxticks":"",\
"maxyticks":"",\
"spikes":["None","both","x","y"],\
"spikes_value":"None",\
"spikes_color":"None",\
"spikes_thickness":"3.0",\
"dashes":LINE_STYLES,\
"spikes_dash":"dash",\
"spikes_mode":"toaxis",\
"spikes_modes":STANDARD_SPIKEMODES,\
"grid":["None","both","x","y"],\
"grid_value":"None",\
"grid_width":"1",\
"grid_color":"lightgrey",\
"legend_title":"",\
"legend_bgcolor":"None",\
"legend_borderwidth":"0",\
"legend_bordercolor":"None",\
"legend_fontfamily":"Default",\
"legend_fontsize":"12",\
"legend_fontcolor":"None",\
"legend_title_fontfamily":"Default",\
"legend_title_fontsize":"12",\
"legend_title_fontcolor":"None",\
"legend_orientation":"vertical",\
"legend_orientations":["vertical","horizontal"],\
"traceorders":STANDARD_TRACEORDERS,\
"legend_traceorder":"normal",\
"legend_tracegroupgap":"10",\
"legend_y":"1",\
"legend_x":"1.02",\
"legend_xanchor":"left",\
"legend_yanchor":"auto",\
"legend_xanchors":STANDARD_LEGEND_XANCHORS,\
"legend_yanchors":STANDARD_LEGEND_YANCHORS,\
"legend_valign":"middle",\
"valignments":STANDARD_VERTICAL_ALIGNMENTS,\
"sides":STANDARD_SIDES,\
"legend_side":"left",\
"download_format":["png","pdf","svg"],\
"downloadf":"pdf",\
"downloadn":"idendrogram",\
"session_downloadn":"MySession.idendrogram.plot",\
"inputsessionfile":"Select file..",\
"session_argumentsn":"MyArguments.idendrogram.plot",\
"inputargumentsfile":"Select file.."
}
return plot_arguments
| 47.251586 | 202 | 0.63736 |
30eb50111fa27898698159ac8e2dd677e77d1f94
| 15,490 |
py
|
Python
|
WebServer.py
|
seriousthoj/PlexConnect
|
9c726f8cae8d13479df51ae8e89900ef30756c63
|
[
"MIT"
] | null | null | null |
WebServer.py
|
seriousthoj/PlexConnect
|
9c726f8cae8d13479df51ae8e89900ef30756c63
|
[
"MIT"
] | null | null | null |
WebServer.py
|
seriousthoj/PlexConnect
|
9c726f8cae8d13479df51ae8e89900ef30756c63
|
[
"MIT"
] | 1 |
2019-01-30T21:19:11.000Z
|
2019-01-30T21:19:11.000Z
|
#!/usr/bin/env python
"""
Sources:
http://fragments.turtlemeat.com/pythonwebserver.php
http://www.linuxjournal.com/content/tech-tip-really-simple-http-server-python
...stackoverflow.com and such
after 27Aug - Apple's switch to https:
- added https WebServer with SSL encryption - needs valid (private) vertificate on aTV and server
- for additional information see http://langui.sh/2013/08/27/appletv-ssl-plexconnect/
Thanks to reaperhulk for showing this solution!
"""
import sys
import string, cgi, time
from os import sep, path
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from SocketServer import ThreadingMixIn
import ssl
from multiprocessing import Pipe # inter process communication
import urllib, StringIO, gzip
import signal
import traceback
import Settings, ATVSettings
from Debug import * # dprint()
import XMLConverter # XML_PMS2aTV, XML_PlayVideo
import re
import Localize
import Subtitle
g_param = {}
def setParams(param):
global g_param
g_param = param
def JSConverter(file, options):
f = open(sys.path[0] + "/assets/js/" + file)
JS = f.read()
f.close()
# PlexConnect {{URL()}}->baseURL
for path in set(re.findall(r'\{\{URL\((.*?)\)\}\}', JS)):
JS = JS.replace('{{URL(%s)}}' % path, g_param['baseURL']+path)
# localization
JS = Localize.replaceTEXT(JS, options['aTVLanguage']).encode('utf-8')
return JS
class MyHandler(BaseHTTPRequestHandler):
# Fixes slow serving speed under Windows
def address_string(self):
host, port = self.client_address[:2]
#return socket.getfqdn(host)
return host
def log_message(self, format, *args):
pass
def compress(self, data):
buf = StringIO.StringIO()
zfile = gzip.GzipFile(mode='wb', fileobj=buf, compresslevel=9)
zfile.write(data)
zfile.close()
return buf.getvalue()
def sendResponse(self, data, type, enableGzip):
self.send_response(200)
self.send_header('Server', 'PlexConnect')
self.send_header('Content-type', type)
try:
accept_encoding = map(string.strip, string.split(self.headers["accept-encoding"], ","))
except KeyError:
accept_encoding = []
if enableGzip and \
g_param['CSettings'].getSetting('allow_gzip_atv')=='True' and \
'gzip' in accept_encoding:
self.send_header('Content-encoding', 'gzip')
self.end_headers()
self.wfile.write(self.compress(data))
else:
self.end_headers()
self.wfile.write(data)
def do_GET(self):
global g_param
try:
dprint(__name__, 2, "http request header:\n{0}", self.headers)
dprint(__name__, 2, "http request path:\n{0}", self.path)
# check for PMS address
PMSaddress = ''
pms_end = self.path.find(')')
if self.path.startswith('/PMS(') and pms_end>-1:
PMSaddress = urllib.unquote_plus(self.path[5:pms_end])
self.path = self.path[pms_end+1:]
# break up path, separate PlexConnect options
# clean path needed for filetype decoding
parts = re.split(r'[?&]', self.path, 1) # should be '?' only, but we do some things different :-)
if len(parts)==1:
self.path = parts[0]
options = {}
query = ''
else:
self.path = parts[0]
# break up query string
options = {}
query = ''
parts = parts[1].split('&')
for part in parts:
if part.startswith('PlexConnect'):
# get options[]
opt = part.split('=', 1)
if len(opt)==1:
options[opt[0]] = ''
else:
options[opt[0]] = urllib.unquote(opt[1])
else:
# recreate query string (non-PlexConnect) - has to be merged back when forwarded
if query=='':
query = '?' + part
else:
query += '&' + part
# get aTV language setting
options['aTVLanguage'] = Localize.pickLanguage(self.headers.get('Accept-Language', 'en'))
# add client address - to be used in case UDID is unknown
if 'X-Forwarded-For' in self.headers:
options['aTVAddress'] = self.headers['X-Forwarded-For'].split(',', 1)[0]
else:
options['aTVAddress'] = self.client_address[0]
# get aTV hard-/software parameters
options['aTVFirmwareVersion'] = self.headers.get('X-Apple-TV-Version', '5.1')
options['aTVScreenResolution'] = self.headers.get('X-Apple-TV-Resolution', '720')
dprint(__name__, 2, "pms address:\n{0}", PMSaddress)
dprint(__name__, 2, "cleaned path:\n{0}", self.path)
dprint(__name__, 2, "PlexConnect options:\n{0}", options)
dprint(__name__, 2, "additional arguments:\n{0}", query)
if 'User-Agent' in self.headers and \
'AppleTV' in self.headers['User-Agent']:
# recieve simple logging messages from the ATV
if 'PlexConnectATVLogLevel' in options:
dprint('ATVLogger', int(options['PlexConnectATVLogLevel']), options['PlexConnectLog'])
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.end_headers()
return
# serve "*.cer" - Serve up certificate file to atv
if self.path.endswith(".cer"):
dprint(__name__, 1, "serving *.cer: "+self.path)
if g_param['CSettings'].getSetting('certfile').startswith('.'):
# relative to current path
cfg_certfile = sys.path[0] + sep + g_param['CSettings'].getSetting('certfile')
else:
# absolute path
cfg_certfile = g_param['CSettings'].getSetting('certfile')
cfg_certfile = path.normpath(cfg_certfile)
cfg_certfile = path.splitext(cfg_certfile)[0] + '.cer'
try:
f = open(cfg_certfile, "rb")
except:
dprint(__name__, 0, "Failed to access certificate: {0}", cfg_certfile)
return
self.sendResponse(f.read(), 'text/xml', False)
f.close()
return
# serve .js files to aTV
# application, main: ignore path, send /assets/js/application.js
# otherwise: path should be '/js', send /assets/js/*.js
dirname = path.dirname(self.path)
basename = path.basename(self.path)
if basename in ("application.js", "main.js", "javascript-packed.js", "bootstrap.js") or \
basename.endswith(".js") and dirname == '/js':
if basename in ("main.js", "javascript-packed.js", "bootstrap.js"):
basename = "application.js"
dprint(__name__, 1, "serving /js/{0}", basename)
JS = JSConverter(basename, options)
self.sendResponse(JS, 'text/javascript', True)
return
# proxy phobos.apple.com to support PlexConnect main icon
if "a1.phobos.apple.com" in self.headers['Host']:
resource = self.headers['Host']+self.path
icon = g_param['CSettings'].getSetting('icon')
if basename.startswith(icon):
icon_res = basename[len(icon):] # cut string from settings, keeps @720.png/@1080.png
resource = sys.path[0] + '/assets/icons/icon'+icon_res
dprint(__name__, 1, "serving "+self.headers['Host']+self.path+" with "+resource)
r = open(resource, "rb")
else:
r = urllib.urlopen('http://'+resource)
self.sendResponse(r.read(), 'image/png', False)
r.close()
return
# serve "*.jpg" - thumbnails for old-style mainpage
if self.path.endswith(".jpg"):
dprint(__name__, 1, "serving *.jpg: "+self.path)
f = open(sys.path[0] + sep + "assets" + self.path, "rb")
self.sendResponse(f.read(), 'image/jpeg', False)
f.close()
return
# serve "*.png" - only png's support transparent colors
if self.path.endswith(".png"):
dprint(__name__, 1, "serving *.png: "+self.path)
f = open(sys.path[0] + sep + "assets" + self.path, "rb")
self.sendResponse(f.read(), 'image/png', False)
f.close()
return
# serve subtitle file - transcoded to aTV subtitle json
if 'PlexConnect' in options and \
options['PlexConnect']=='Subtitle':
dprint(__name__, 1, "serving subtitle: "+self.path)
XML = Subtitle.getSubtitleJSON(PMSaddress, self.path + query, options)
self.sendResponse(XML, 'application/json', True)
return
# get everything else from XMLConverter - formerly limited to trailing "/" and &PlexConnect Cmds
if True:
dprint(__name__, 1, "serving .xml: "+self.path)
XML = XMLConverter.XML_PMS2aTV(PMSaddress, self.path + query, options)
self.sendResponse(XML, 'text/xml', True)
return
"""
# unexpected request
self.send_error(403,"Access denied: %s" % self.path)
"""
else:
"""
Added Up Page for docker helthcheck
self.send_error(403,"Not Serving Client %s" % self.client_address[0])
"""
dprint(__name__, 1, "serving *.html: "+self.path)
f = open(sys.path[0] + sep + "assets/templates/up.html")
self.sendResponse(f.read(), 'text/html', False)
f.close()
except IOError:
dprint(__name__, 0, 'File Not Found:\n{0}', traceback.format_exc())
self.send_error(404,"File Not Found: %s" % self.path)
except:
dprint(__name__, 0, 'Internal Server Error:\n{0}', traceback.format_exc())
self.send_error(500,"Internal Server Error: %s" % self.path)
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
def Run(cmdPipe, param):
if not __name__ == '__main__':
signal.signal(signal.SIGINT, signal.SIG_IGN)
dinit(__name__, param) # init logging, WebServer process
cfg_IP_WebServer = param['IP_self']
cfg_Port_WebServer = param['CSettings'].getSetting('port_webserver')
try:
server = ThreadedHTTPServer((cfg_IP_WebServer,int(cfg_Port_WebServer)), MyHandler)
server.timeout = 1
except Exception, e:
dprint(__name__, 0, "Failed to connect to HTTP on {0} port {1}: {2}", cfg_IP_WebServer, cfg_Port_WebServer, e)
sys.exit(1)
socketinfo = server.socket.getsockname()
dprint(__name__, 0, "***")
dprint(__name__, 0, "WebServer: Serving HTTP on {0} port {1}.", socketinfo[0], socketinfo[1])
dprint(__name__, 0, "***")
setParams(param)
XMLConverter.setParams(param)
XMLConverter.setATVSettings(param['CATVSettings'])
try:
while True:
# check command
if cmdPipe.poll():
cmd = cmdPipe.recv()
if cmd=='shutdown':
break
# do your work (with timeout)
server.handle_request()
except KeyboardInterrupt:
signal.signal(signal.SIGINT, signal.SIG_IGN) # we heard you!
dprint(__name__, 0,"^C received.")
finally:
dprint(__name__, 0, "Shutting down (HTTP).")
server.socket.close()
def Run_SSL(cmdPipe, param):
if not __name__ == '__main__':
signal.signal(signal.SIGINT, signal.SIG_IGN)
dinit(__name__, param) # init logging, WebServer process
cfg_IP_WebServer = param['IP_self']
cfg_Port_SSL = param['CSettings'].getSetting('port_ssl')
if param['CSettings'].getSetting('certfile').startswith('.'):
# relative to current path
cfg_certfile = sys.path[0] + sep + param['CSettings'].getSetting('certfile')
else:
# absolute path
cfg_certfile = param['CSettings'].getSetting('certfile')
cfg_certfile = path.normpath(cfg_certfile)
try:
certfile = open(cfg_certfile, 'r')
except:
dprint(__name__, 0, "Failed to access certificate: {0}", cfg_certfile)
sys.exit(1)
certfile.close()
try:
server = ThreadedHTTPServer((cfg_IP_WebServer,int(cfg_Port_SSL)), MyHandler)
server.socket = ssl.wrap_socket(server.socket, certfile=cfg_certfile, server_side=True)
server.timeout = 1
except Exception, e:
dprint(__name__, 0, "Failed to connect to HTTPS on {0} port {1}: {2}", cfg_IP_WebServer, cfg_Port_SSL, e)
sys.exit(1)
socketinfo = server.socket.getsockname()
dprint(__name__, 0, "***")
dprint(__name__, 0, "WebServer: Serving HTTPS on {0} port {1}.", socketinfo[0], socketinfo[1])
dprint(__name__, 0, "***")
setParams(param)
XMLConverter.setParams(param)
XMLConverter.setATVSettings(param['CATVSettings'])
try:
while True:
# check command
if cmdPipe.poll():
cmd = cmdPipe.recv()
if cmd=='shutdown':
break
# do your work (with timeout)
server.handle_request()
except KeyboardInterrupt:
signal.signal(signal.SIGINT, signal.SIG_IGN) # we heard you!
dprint(__name__, 0,"^C received.")
finally:
dprint(__name__, 0, "Shutting down (HTTPS).")
server.socket.close()
if __name__=="__main__":
cmdPipe = Pipe()
cfg = Settings.CSettings()
param = {}
param['CSettings'] = cfg
param['CATVSettings'] = ATVSettings.CATVSettings()
param['IP_self'] = '192.168.178.20' # IP_self?
param['baseURL'] = 'http://'+ param['IP_self'] +':'+ cfg.getSetting('port_webserver')
param['HostToIntercept'] = cfg.getSetting('hosttointercept')
if len(sys.argv)==1:
Run(cmdPipe[1], param)
elif len(sys.argv)==2 and sys.argv[1]=='SSL':
Run_SSL(cmdPipe[1], param)
| 38.725 | 118 | 0.538347 |
d46d53d79d2704b2e2cecbd546e3bb19755eb9b0
| 305 |
py
|
Python
|
home/templatetags/custom_tags.py
|
mritunjayagarwal/school-management-system
|
1cb42345ac793d80d4a676c47f0a85c197b889ae
|
[
"MIT"
] | null | null | null |
home/templatetags/custom_tags.py
|
mritunjayagarwal/school-management-system
|
1cb42345ac793d80d4a676c47f0a85c197b889ae
|
[
"MIT"
] | null | null | null |
home/templatetags/custom_tags.py
|
mritunjayagarwal/school-management-system
|
1cb42345ac793d80d4a676c47f0a85c197b889ae
|
[
"MIT"
] | null | null | null |
from django import template
register = template.Library()
@register.filter(name = 'split')
def split(value, arg):
return value.split(arg)
@register.filter(name = 'to_list')
def to_list(value):
return list(value)
@register.filter(name = 'int_to_str')
def int_to_str(value):
return str(value)
| 21.785714 | 37 | 0.721311 |
aae1c1c9e45be41c209a4268e587bc7d6673656f
| 775 |
py
|
Python
|
simulations/utils/cost.py
|
danibachar/Kube-Load-Balancing
|
8b9ea68ddbb46cc730a02ffe30cc68b3d65ca491
|
[
"MIT"
] | null | null | null |
simulations/utils/cost.py
|
danibachar/Kube-Load-Balancing
|
8b9ea68ddbb46cc730a02ffe30cc68b3d65ca491
|
[
"MIT"
] | null | null | null |
simulations/utils/cost.py
|
danibachar/Kube-Load-Balancing
|
8b9ea68ddbb46cc730a02ffe30cc68b3d65ca491
|
[
"MIT"
] | null | null | null |
import pandas as pd
# Each criteria get equal weight for now
def simple_min_addative_weight(
price,
min_price,
price_weight,
latency,
min_latency,
latency_weight
):
price_part = 0
if price > 0:
price_part = (min_price/price)*price_weight
latency_part = 0
if latency > 0:
latency_part = (min_latency/latency)*latency_weight
return price_part + latency_part
def simple_max_addative_weight(
price,
max_price,
price_weight,
latency,
max_latency,
latency_weight
):
if max_price == 0:
raise
if max_latency == 0:
raise
price_part = (price / max_price) * price_weight
latency_part = (latency / max_latency) * latency_weight
return price_part + latency_part
| 19.871795 | 59 | 0.665806 |
7d8201f013c8bcf6d7dc5ebe1a20c2de1d269fdf
| 24,059 |
py
|
Python
|
Oscillations.py
|
zleizzo/gradual
|
3e48769c4e30a7f1577b360e3100335d08e40752
|
[
"MIT"
] | null | null | null |
Oscillations.py
|
zleizzo/gradual
|
3e48769c4e30a7f1577b360e3100335d08e40752
|
[
"MIT"
] | null | null | null |
Oscillations.py
|
zleizzo/gradual
|
3e48769c4e30a7f1577b360e3100335d08e40752
|
[
"MIT"
] | null | null | null |
import torch
import copy
import os
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_spd_matrix
from collections import deque
from scipy.stats import sem
from tqdm import tqdm
import matplotlib as mpl
linewidth = 3.
mpl.rcParams['lines.linewidth'] = linewidth
import warnings
warnings.filterwarnings("ignore", category=np.VisibleDeprecationWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
plt.rcParams.update({'font.size': 22})
def L(th, mu):
return -torch.dot(th, mu)
class ToyLinear2:
def __init__(self, d, R, delta0, mean_noise, A = None, b = None, seed = 0):
torch.manual_seed(seed)
np.random.seed(seed)
self.d = d
self.R = R
self.delta0 = delta0
self.mean_noise = mean_noise
self.seed = seed
if A is None:
# self.A = -0.8 * torch.eye(d)
self.A = -0.8 * torch.tensor(make_spd_matrix(d, random_state = 0)).float()
else:
self.A = A
if b is None:
self.b = 2 * torch.ones(d)
else:
self.b = b
# A should be symmetric and negative definite
self.opt_th = torch.linalg.solve(-2 * self.A, self.b)
self.opt_l = self.lt_loss(self.opt_th)
self.stab_th = torch.linalg.solve(-self.A, self.b)
self.stab_l = self.lt_loss(self.stab_th)
self.init_th = (self.opt_th + self.stab_th) / 2 + torch.randn(d)
self.init_mu = torch.randn(d)
##############################################################################
# Helper methods
##############################################################################
def mu_star(self, th):
return self.A @ th + self.b
def lt_mu(self, th):
return (self.delta0 / (2 - self.delta0)) * self.mu_star(th)
def m(self, th, mu):
return self.delta0 * self.mu_star(th) - (1 - self.delta0) * mu
def lt_loss(self, th):
return L(th, self.lt_mu(th))
##############################################################################
##############################################################################
# PGD
##############################################################################
def PGD(self, T, lr, k, horizon):
theta_t = copy.deepcopy(self.init_th)
mu_t = copy.deepcopy(self.init_mu)
est_mu_t = mu_t + self.mean_noise * torch.randn(self.d)
thetas = deque(maxlen = horizon)
mus = deque(maxlen = horizon)
thetas_hist = deque()
# warmup = self.d
warmup = 4
with torch.no_grad():
for t in range(k * warmup):
thetas_hist.append(theta_t.clone())
if t % k == k - 1: # Only updated theta every k steps
mus.append(est_mu_t.clone())
thetas.append(theta_t.clone())
theta_t = torch.clamp(theta_t + lr * torch.randn(self.d), -self.R, self.R)
# grad = -est_mu_t
# theta_t -= lr * grad
mu_t = self.m(theta_t, mu_t)
est_mu_t = mu_t + self.mean_noise * torch.randn(self.d)
for t in range(T - k * warmup):
thetas_hist.append(theta_t.clone())
if t % k == k - 1: # Only updated theta every k steps
est_mu_t = mu_t + self.mean_noise * torch.randn(self.d)
mus.append(est_mu_t.clone())
thetas.append(theta_t.clone())
Delta_thetas = torch.column_stack([th - thetas[-1] for th in thetas])
Delta_mus = torch.column_stack([mu - mus[-1] for mu in mus])
try:
dmu_dth = (Delta_mus @ torch.linalg.pinv(Delta_thetas))
perf_grad_approx = -(est_mu_t + dmu_dth.T @ theta_t)
except RuntimeError:
# print(f'Jacobian estimation failed (lr={round(lr,4)}, H={horizon}, k={k}')
break
theta_t = torch.clamp(theta_t - lr * perf_grad_approx, -self.R, self.R)
mu_t = self.m(theta_t, mu_t)
while len(thetas_hist) < T:
thetas_hist.append(self.R * torch.ones(self.d))
return thetas_hist
##############################################################################
##############################################################################
# SPGD
##############################################################################
def approx_lt_dmu(self, st_mu_grad, k = None):
# Approximates dmu_1* / dth given estimates for the derivatives of the
# short-term mean.
# These derivatives should be collected in st_mu_grad where the first
# d columns are d(st_mu) / dth and the last d columns are d(st_mu) / dmu.
# k = number of steps to approximate with. If k = None, then we use the
# approximation given by k --> \infty. Note that this is actually not valid
# when || d2 m || >= 1.
d1 = st_mu_grad[:, :self.d]
d2 = st_mu_grad[:, self.d:]
if k is None:
return torch.linalg.solve(torch.eye(self.d) - d2, d1)
else:
return torch.linalg.solve(torch.eye(self.d) - d2, (torch.eye(self.d) - torch.linalg.matrix_power(d1, k)) @ d1)
def SPGD(self, T, lr, k, H, pert):
theta_t = copy.deepcopy(self.init_th)
mu_tm1 = copy.deepcopy(self.init_mu)
est_mu_tm1 = mu_tm1 + self.mean_noise * torch.randn(self.d)
params_t = torch.cat([theta_t, mu_tm1])
thetas = deque()
inputs = deque(maxlen = H)
outputs = deque(maxlen = H)
warmup = self.d
with torch.no_grad():
for t in range(warmup):
thetas.append(theta_t.clone())
inputs.append(params_t.clone())
mu_t = self.m(theta_t, mu_tm1)
est_mu_t = mu_t + self.mean_noise * torch.randn(self.d)
outputs.append(est_mu_t)
theta_t = torch.clamp(theta_t + lr * torch.randn(self.d), -self.R, self.R)
mu_tm1 = mu_t
est_mu_tm1 = est_mu_t
params_t = torch.cat([theta_t, est_mu_tm1])
for t in range(T - warmup):
thetas.append(theta_t.clone())
inputs.append(params_t.clone())
mu_t = self.m(theta_t, mu_tm1)
est_mu_t = mu_t + self.mean_noise * torch.randn(self.d)
outputs.append(est_mu_t)
Delta_inputs = torch.column_stack([i - inputs[-1] for i in inputs])
Delta_outputs = torch.column_stack([o - outputs[-1] for o in outputs])
try:
grad_m = (Delta_outputs @ torch.linalg.pinv(Delta_inputs))
long_term_grad_approx = -(est_mu_t + self.approx_lt_dmu(grad_m, k).T @ theta_t + pert * torch.randn(self.d))
except RuntimeError:
# print(f'Jacobian estimation failed (lr={round(lr,4)}, H={H}, k={k}, pert={round(pert,4)})')
break
theta_t = torch.clamp(theta_t - lr * long_term_grad_approx, -self.R, self.R)
mu_tm1 = mu_t
est_mu_tm1 = est_mu_t
params_t = torch.cat([theta_t, est_mu_tm1])
while len(thetas) < T:
thetas.append(self.R * torch.ones(self.d))
return thetas
##############################################################################
# RGD
##############################################################################
def RGD(self, T, lr, k = 1):
theta_t = copy.deepcopy(self.init_th)
mu_t = copy.deepcopy(self.init_mu)
thetas = deque()
with torch.no_grad():
for t in range(T):
thetas.append(theta_t.clone().detach())
grad = -(mu_t + self.mean_noise * torch.randn(self.d))
theta_t = torch.clamp(theta_t - lr * grad, -self.R, self.R)
mu_t = self.m(theta_t, mu_t)
return thetas
##############################################################################
# Flaxman black-box DFO
##############################################################################
def DFO(self, T, lr, perturbation, k = 1):
thetas = deque()
queries = deque()
with torch.no_grad():
internal_t = copy.deepcopy(self.init_th)
mu_t = copy.deepcopy(self.init_mu)
u_t = torch.randn(self.d)
u_t /= torch.linalg.norm(u_t)
deploy_t = torch.clamp(internal_t + perturbation * u_t, -self.R, self.R).clone()
for t in range(T):
thetas.append(internal_t.clone().detach())
queries.append(deploy_t.clone().detach())
if t % k == 0: # Only updated theta every k steps
loss = -torch.dot(deploy_t, mu_t + self.mean_noise * torch.randn(self.d))
grad = (self.d * loss / perturbation) * u_t
internal_t = torch.clamp(internal_t - lr * grad, -self.R, self.R)
u_t = torch.randn(self.d)
u_t /= torch.linalg.norm(u_t)
deploy_t = torch.clamp(internal_t + perturbation * u_t, -self.R, self.R).clone()
mu_t = self.m(deploy_t, mu_t).clone()
return thetas, queries
##############################################################################
# Run experiments
##############################################################################
def experiment(num_trials, T, lrs, dfo_perts, Hs, waits, ks, spgd_perts, d, R, delta0, mean_noise, A = None, b = None, seed = 0):
rgd_th = np.empty((len(lrs), num_trials), dtype=object)
dfo_i_th = np.empty((len(lrs), len(dfo_perts), len(waits), num_trials), dtype=object)
dfo_q_th = np.empty((len(lrs), len(dfo_perts), len(waits), num_trials), dtype=object)
pgd_th = np.empty((len(lrs), len(Hs), len(waits), num_trials), dtype=object)
spgd_th = np.empty((len(lrs), len(spgd_perts), len(Hs), len(ks), num_trials), dtype=object)
rgd_l = np.empty((len(lrs), num_trials), dtype=object)
dfo_i_l = np.empty((len(lrs), len(dfo_perts), len(waits), num_trials), dtype=object)
dfo_q_l = np.empty((len(lrs), len(dfo_perts), len(waits), num_trials), dtype=object)
pgd_l = np.empty((len(lrs), len(Hs), len(waits), num_trials), dtype=object)
spgd_l = np.empty((len(lrs), len(spgd_perts), len(Hs), len(ks), num_trials), dtype=object)
rgd_d = np.empty((len(lrs), num_trials), dtype=object)
dfo_i_d = np.empty((len(lrs), len(dfo_perts), len(waits), num_trials), dtype=object)
dfo_q_d = np.empty((len(lrs), len(dfo_perts), len(waits), num_trials), dtype=object)
pgd_d = np.empty((len(lrs), len(Hs), len(waits), num_trials), dtype=object)
spgd_d = np.empty((len(lrs), len(spgd_perts), len(Hs), len(ks), num_trials), dtype=object)
for r in tqdm(range(num_trials)):
expt = ToyLinear2(d, R, delta0, mean_noise, A, b, seed + r)
# RGD experiments
print('Running RGD...')
for i in range(len(lrs)):
# lr = lrs[i]
lr = 0.4
wait = 1
traj = expt.RGD(T, lr)
rgd_l[i, r] = np.array([expt.lt_loss(th) for th in traj], dtype=float)
rgd_th[i, r] = np.array(traj)
rgd_d[i, r] = np.array([torch.linalg.norm(th - expt.opt_th) for th in traj], dtype=float)
# DFO experiments
print('Running DFO...')
for i in range(len(lrs)):
for j in range(len(dfo_perts)):
for k in range(len(waits)):
lr = lrs[i]
pert = dfo_perts[j]
wait = waits[k]
# T = int(10 / lr)
i_traj, q_traj = expt.DFO(T, lr, pert, wait)
dfo_i_l[i, j, k, r] = np.array([expt.lt_loss(th) for th in i_traj], dtype=float)
dfo_q_l[i, j, k, r] = np.array([expt.lt_loss(th) for th in q_traj], dtype=float)
dfo_i_th[i, j, k, r] = np.array(i_traj)
dfo_q_th[i, j, k, r] = np.array(q_traj)
dfo_i_d[i, j, k, r] = np.array([torch.linalg.norm(th - expt.opt_th) for th in traj], dtype=float)
dfo_q_d[i, j, k, r] = np.array([torch.linalg.norm(th - expt.opt_th) for th in traj], dtype=float)
# PGD experiments
print('Running PGD...')
for i in range(len(lrs)):
for j in range(len(Hs)):
for k in range(len(waits)):
lr = lrs[i]
H = Hs[j]
wait = waits[k]
# T = int(10 / lr)
traj = expt.PGD(T, lr, wait, horizon = H)
pgd_l[i, j, k, r] = np.array([expt.lt_loss(th) for th in traj], dtype=float)
pgd_th[i, j, k, r] = np.array(traj)
pgd_d[i, j, k, r] = np.array([torch.linalg.norm(th - expt.opt_th) for th in traj], dtype=float)
# SPGD experiments
print('Running SPGD...')
for i in range(len(lrs)):
for j in range(len(spgd_perts)):
for l in range(len(Hs)):
for m in range(len(ks)):
lr = lrs[i]
pert = spgd_perts[j]
H = Hs[l]
k = ks[m]
# T = int(10 / lr)
traj = expt.SPGD(T, lr, k, H, pert)
spgd_l[i, j, l, m, r] = np.array([expt.lt_loss(th) for th in traj], dtype=float)
spgd_th[i, j, l, m, r] = np.array(traj)
spgd_d[i, j, l, m, r] = np.array([torch.linalg.norm(th - expt.opt_th) for th in traj], dtype=float)
results = {'rgd_th': rgd_th, 'rgd_l': rgd_l, 'rgd_d': rgd_d,
'dfo_i_th': dfo_i_th, 'dfo_i_l': dfo_i_l, 'dfo_i_d': dfo_i_d,
'dfo_q_th': dfo_q_th, 'dfo_q_l': dfo_q_l, 'dfo_q_d': dfo_q_d,
'pgd_th': pgd_th, 'pgd_l': pgd_l, 'pgd_d': pgd_d,
'spgd_th': spgd_th, 'spgd_l': spgd_l, 'spgd_d': spgd_d}
return results
# d = 5
d = 2
R = 5.
lrs = [10 ** (-k/2) for k in range(1, 7)]
waits = [1, 5, 10, 20]
# ks = [1, 10, None]
ks = [None]
dfo_perts = [10 ** (-k/2) for k in range(4)]
spgd_perts = [0] + [10 ** (-k/2) for k in range(4)]
Hs = [None] + [d + k for k in range(d + 1)]
spgd_Hs = [None] + [2 * d + k for k in range(d + 1)]
def make_plots(delta0, mean_noise, T):
d = 5
R = 10.
num_trials = 5
results = experiment(num_trials, T, lrs, dfo_perts, Hs, waits, ks, spgd_perts, d, R, delta0, mean_noise)
###############################################################################
# Find best hyperparams for each method
###############################################################################
rgd_l = results['rgd_l']
dfo_i_l = results['dfo_i_l']
dfo_q_l = results['dfo_q_l']
pgd_l = results['pgd_l']
spgd_l = results['spgd_l']
rgd_l_mean = np.mean(rgd_l, axis = -1)
dfo_i_l_mean = np.mean(dfo_i_l, axis = -1)
dfo_q_l_mean = np.mean(dfo_q_l, axis = -1)
pgd_l_mean = np.mean(pgd_l, axis = -1)
spgd_l_mean = np.mean(spgd_l, axis = -1)
rgd_end = np.zeros(rgd_l_mean.shape).flatten()
dfo_i_end = np.zeros(dfo_i_l_mean.shape).flatten()
dfo_q_end = np.zeros(dfo_q_l_mean.shape).flatten()
pgd_end = np.zeros(pgd_l_mean.shape).flatten()
spgd_end = np.zeros(spgd_l_mean.shape).flatten()
for i, loss_traj in enumerate(rgd_l_mean.flatten()):
T = len(loss_traj)
rgd_end[i] = np.mean(loss_traj[-int(T/10):])
for i, loss_traj in enumerate(dfo_i_l_mean.flatten()):
T = len(loss_traj)
dfo_i_end[i] = np.mean(loss_traj[-int(T/10):])
for i, loss_traj in enumerate(pgd_l_mean.flatten()):
T = len(loss_traj)
pgd_end[i] = np.mean(loss_traj[-int(T/10):])
for i, loss_traj in enumerate(spgd_l_mean.flatten()):
T = len(loss_traj)
spgd_end[i] = np.mean(loss_traj[-int(T/10):])
rgd_best = np.unravel_index(np.nanargmin(rgd_end), rgd_l_mean.shape)
dfo_best = np.unravel_index(np.nanargmin(dfo_i_end), dfo_i_l_mean.shape)
pgd_best = np.unravel_index(np.nanargmin(pgd_end), pgd_l_mean.shape)
spgd_best = np.unravel_index(np.nanargmin(spgd_end), spgd_l_mean.shape)
print('')
print(f'RGD: lr={round(lrs[rgd_best[0]], 3)}')
print(f'DFO: lr={round(lrs[dfo_best[0]], 3)}, pert={round(dfo_perts[dfo_best[1]], 3)}, wait={waits[dfo_best[2]]}')
print(f'PGD: lr={round(lrs[pgd_best[0]], 3)}, H={Hs[pgd_best[1]]}, wait={waits[pgd_best[2]]}')
print(f'SPGD: lr={round(lrs[spgd_best[0]], 3)}, pert={round(spgd_perts[spgd_best[1]], 3)}, H={Hs[spgd_best[2]]}, k={ks[spgd_best[3]]}')
###############################################################################
# Make plots
###############################################################################
expt = ToyLinear2(d, R, delta0, mean_noise)
opt_l = expt.opt_l
stab_l = expt.stab_l
best_rgd_l = np.asfarray([x for x in rgd_l[rgd_best]])
best_dfo_l = np.asfarray([x for x in dfo_i_l[dfo_best]])
best_pgd_l = np.asfarray([x for x in pgd_l[pgd_best]])
best_spgd_l = np.asfarray([x for x in spgd_l[spgd_best]])
rgd_l_m = np.mean(best_rgd_l, axis=0)
dfo_l_m = np.mean(best_dfo_l, axis=0)
pgd_l_m = np.mean(best_pgd_l, axis=0)
spgd_l_m = np.mean(best_spgd_l, axis=0)
rgd_l_sem = sem(best_rgd_l, axis=0, nan_policy='propagate')
dfo_l_sem = sem(best_dfo_l, axis=0)
pgd_l_sem = sem(best_pgd_l, axis=0)
spgd_l_sem = sem(best_spgd_l, axis=0)
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
plt.figure()
plt.plot(opt_l * np.ones(T), label = 'OPT', color = colors[0])
plt.plot(stab_l * np.ones(T), label = 'STAB', color = colors[1])
plt.plot(rgd_l_m, label = 'RGD', color=colors[5])
plt.fill_between(range(T), rgd_l_m + rgd_l_sem, rgd_l_m - rgd_l_sem, color=colors[5], alpha=0.3)
plt.plot(dfo_l_m, label = 'DFO', color=colors[7])
plt.fill_between(range(T), dfo_l_m + dfo_l_sem, dfo_l_m - dfo_l_sem, color=colors[7], alpha=0.3)
plt.plot(pgd_l_m, label = 'PGD', color=colors[4])
plt.fill_between(range(T), pgd_l_m + pgd_l_sem, pgd_l_m - pgd_l_sem, color=colors[4], alpha=0.3)
plt.plot(spgd_l_m, label = 'SPGD', color=colors[2])
plt.fill_between(range(T), spgd_l_m + spgd_l_sem, spgd_l_m - spgd_l_sem, color=colors[2], alpha=0.3)
# plt.title(f'δ0={round(delta0, 3)}, mn={mean_noise}')
plt.xlabel('Training iteration')
plt.ylabel('Loss')
# plt.ylim((opt_l - 0.5, stab_l + 0.5))
plt.legend()
# plt.savefig(f'plots/toy_linear/{round(delta0, 3)}_{mean_noise}_{T}.pdf')
# plt.savefig(f'plots/toy_linear/final/{round(delta0, 3)}_{mean_noise}.pdf')
return rgd_best, best_rgd_l, dfo_best, best_dfo_l, pgd_best, best_pgd_l, spgd_best, best_spgd_l
# return spgd_best
# delta0s = [1 - (0.01) ** (1 / k) for k in range(1, 17)]
# delta0s = [1 - (0.01) ** (2 ** -k) for k in range(7)]
# settle_steps = [2 ** k for k in range(7)]
# settle_steps = [128, 512, 2048]
settle_steps = [32]
delta0s = [1 - (0.01) ** (1 / k) for k in settle_steps]
rgds = [None for _ in range(len(delta0s))]
rgd_ls = [None for _ in range(len(delta0s))]
dfos = [None for _ in range(len(delta0s))]
dfo_ls = [None for _ in range(len(delta0s))]
pgds = [None for _ in range(len(delta0s))]
pgd_ls = [None for _ in range(len(delta0s))]
spgds = [None for _ in range(len(delta0s))]
spgd_ls = [None for _ in range(len(delta0s))]
for i in tqdm(range(len(delta0s))):
delta0 = delta0s[i]
T = 50
mean_noise = 0.001
# mean_noise = 0.
rgds[i], rgd_ls[i], dfos[i], dfo_ls[i], pgds[i], pgd_ls[i], spgds[i], spgd_ls[i] = make_plots(delta0, mean_noise, T)
# spgds[i] = make_plots(delta0, mean_noise, T)
rgd_end_m = np.zeros(len(delta0s))
rgd_end_sem = np.zeros(len(delta0s))
dfo_end_m = np.zeros(len(delta0s))
dfo_end_sem = np.zeros(len(delta0s))
pgd_end_m = np.zeros(len(delta0s))
pgd_end_sem = np.zeros(len(delta0s))
spgd_end_m = np.zeros(len(delta0s))
spgd_end_sem = np.zeros(len(delta0s))
for i in range(len(delta0s)):
rgd_end_m[i] = np.mean(rgd_ls[i][:, -5:])
rgd_end_sem[i] = sem(rgd_ls[i][:, -5:], axis=None)
dfo_end_m[i] = np.mean(dfo_ls[i][:, -5:])
dfo_end_sem[i] = sem(dfo_ls[i][:, -5:], axis=None)
pgd_end_m[i] = np.mean(pgd_ls[i][:, -5:])
pgd_end_sem[i] = sem(pgd_ls[i][:, -5:], axis=None)
spgd_end_m[i] = np.mean(spgd_ls[i][:, -5:])
spgd_end_sem[i] = sem(spgd_ls[i][:, -5:], axis=None)
expt = ToyLinear2(d, R, delta0, mean_noise)
opt_l = expt.opt_l
stab_l = expt.stab_l
# xs = [2 ** k for k in range(7)]
xs = settle_steps
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
plt.figure()
plt.plot(xs, [1. for _ in range(len(delta0s))], label = 'OPT', color = colors[0])
plt.plot(xs, [0. for _ in range(len(delta0s))], label = 'STAB', color = colors[1])
plt.plot(xs, rgd_end_m / opt_l, label = 'RGD', color = colors[5])
plt.fill_between(xs, (rgd_end_m + rgd_end_sem) / opt_l, (rgd_end_m - rgd_end_sem) / opt_l, color = colors[5], alpha = 0.3)
plt.plot(xs, dfo_end_m / opt_l, label = 'DFO', color = colors[7])
plt.fill_between(xs, (dfo_end_m + dfo_end_sem) / opt_l, (dfo_end_m - dfo_end_sem) / opt_l, color = colors[7], alpha = 0.3)
plt.plot(xs, pgd_end_m / opt_l, label = 'PGD', color = colors[4])
plt.fill_between(xs, (pgd_end_m + pgd_end_sem) / opt_l, (pgd_end_m - pgd_end_sem) / opt_l, color = colors[4], alpha = 0.3)
plt.plot(xs, spgd_end_m / opt_l, label = 'SPGD', color = colors[2])
plt.fill_between(xs, (spgd_end_m + spgd_end_sem) / opt_l, (spgd_end_m - spgd_end_sem) / opt_l, color = colors[2], alpha = 0.3)
plt.xlabel('Steps for mean to settle')
plt.ylabel('% of optimal revenue')
plt.xscale('log')
# plt.xticks([2**k for k in range(7)], [2**k for k in range(7)])
plt.xticks(xs, xs)
leg = plt.legend()
for legobj in leg.legendHandles:
legobj.set_linewidth(linewidth)
leg.set_draggable(state=True)
mus = np.zeros((32, 2))
mus[0] = np.random.randn(2)
# mus[0] = -2 * np.ones(2)
# th = np.zeros(2)
th = np.random.randn(2)
for t in range(1, 32):
mus[t] = expt.m(th, mus[t-1])
lt = expt.lt_mu(th)
plt.figure()
plt.scatter(mus[0,0], mus[0,1], c='c', marker='x', label='Initial mean')
plt.scatter(mus[1:, 0], mus[1:, 1], c = range(31), cmap = 'viridis', label = 'Mean updates')
plt.scatter(lt[0], lt[1], c = 'r', marker='*', label = 'Long-term mean')
plt.legend()
| 40.640203 | 139 | 0.510744 |
ffe46067db10706dbc78a7cb11eafb3aecd868cc
| 1,534 |
py
|
Python
|
testbuilder/core/management/load_engine.py
|
AsafSilman/testbuilder
|
931dd09483f86346575f22031413f47d2b22aa8e
|
[
"Apache-2.0"
] | 2 |
2018-08-05T18:26:11.000Z
|
2018-08-08T10:05:36.000Z
|
testbuilder/core/management/load_engine.py
|
AsafSilman/testbuilder
|
931dd09483f86346575f22031413f47d2b22aa8e
|
[
"Apache-2.0"
] | 14 |
2018-08-03T07:03:20.000Z
|
2018-08-10T09:54:21.000Z
|
testbuilder/core/management/load_engine.py
|
AsafSilman/testbuilder
|
931dd09483f86346575f22031413f47d2b22aa8e
|
[
"Apache-2.0"
] | null | null | null |
"""Loads instance of TBBaseEngine"""
from testbuilder.conf import settings
from testbuilder.core.engine import TBEngine
def load_engine():
"""
Create a TBEngine instance, load all settings from testbuilder settings
Returns:
TBEngine -- The loaded engine instance
"""
engine=TBEngine()
## Step 1. Load Interfaces
for interface_name in settings["INSTALLED_INTERFACES"]:
interface_module = settings["INSTALLED_INTERFACES"][interface_name]
engine.load_interface(interface_name, interface_module)
## Step 2. Load Middlwares
for middleware_name in settings["INSTALLED_MIDDLEWARES"]:
middleware_module = settings["INSTALLED_MIDDLEWARES"][middleware_name]
engine.load_middleware(middleware_name, middleware_module)
## Step 3. Load ObjectMap Parsers
for objmap_parser_name in settings["INSTALLED_OBJECTMAPS_PARSERS"]:
objmap_parser_module = settings["INSTALLED_OBJECTMAPS_PARSERS"][objmap_parser_name]
engine.load_objectmap_parser(objmap_parser_name, objmap_parser_module)
## Step 4. Load TestLoaders
for testloader_name in settings["INSTALLED_TESTLOADERS"]:
testloader_module = settings["INSTALLED_TESTLOADERS"][testloader_name]
engine.load_testloader(testloader_name, testloader_module)
## Step 5. Load profiles
for profile_name in settings["INSTALLED_PROFILES"]:
profile = settings["INSTALLED_PROFILES"][profile_name]
engine.load_profile(profile_name, profile)
return engine
| 34.088889 | 91 | 0.744459 |
17b27040a233b04dd637e53daaa3a92472d1e94e
| 10,526 |
py
|
Python
|
python_cowbull_game/v2_deprecated/GameController.py
|
dsandersAzure/python_cowbull_game
|
82a0d8ee127869123d4fad51a8cd1707879e368f
|
[
"Apache-2.0"
] | 1 |
2017-05-01T20:13:40.000Z
|
2017-05-01T20:13:40.000Z
|
python_cowbull_game/v2_deprecated/GameController.py
|
dsandersAzure/python_cowbull_game
|
82a0d8ee127869123d4fad51a8cd1707879e368f
|
[
"Apache-2.0"
] | null | null | null |
python_cowbull_game/v2_deprecated/GameController.py
|
dsandersAzure/python_cowbull_game
|
82a0d8ee127869123d4fad51a8cd1707879e368f
|
[
"Apache-2.0"
] | null | null | null |
import json
import logging
from python_cowbull_game.GameObject import GameObject
from python_digits import DigitWord
from python_cowbull_game.v2_deprecated.GameMode import GameMode
class GameController(object):
schema = {
"type": "object",
"properties":
{
"key": {"type": "string"},
"status": {"type": "string"},
"ttl": {"type": "integer"},
"answer": {
"type": "array",
"items": { "digit": {"type": "string"}}
},
"mode": {"type": "string"},
"guesses_remaining": {"type": "integer"},
"guesses_made": {"type": "integer"}
}
}
def __init__(self, game):
"""Initialize the Game."""
super(GameController, self).__init__()
self._game_modes = [
GameMode(
mode="normal", priority=2, digits=4, digit_type=DigitWord.DIGIT, guesses_allowed=10
),
GameMode(
mode="easy", priority=1, digits=3, digit_type=DigitWord.DIGIT, guesses_allowed=6
),
GameMode(
mode="hard", priority=3, digits=6, digit_type=DigitWord.DIGIT, guesses_allowed=6
),
GameMode(
mode="hex", priority=4, digits=4, digit_type=DigitWord.HEXDIGIT, guesses_allowed=10
)
]
if game:
self.load(game=game)
else:
self._game = None
self._mode = None
#
# Properties
#
@property
def digits_required(self):
self._validate()
_mode = self._load_mode(self._mode)
return _mode.digits
@property
def digits_type(self):
self._validate()
_mode = self._load_mode(self._mode)
return _mode.digit_type
@property
def guesses_allowed(self):
self._validate()
_mode = self._load_mode(self._mode)
return _mode.guesses_allowed
@property
def key(self):
self._validate()
return self._game.key
@property
def game_modes(self):
return sorted(self._game_modes, key=lambda x: x.priority)
@property
def game_mode_names(self):
return [game_mode.mode for game_mode in sorted(self._game_modes, key=lambda x: x.priority)]
@property
def game(self):
return self._game
@property
def mode(self):
return self._mode
@mode.setter
def mode(self, value):
if value not in self.game_mode_names:
raise ValueError("Unsupported game mode {}. Valid choices are {}".format(
value, ', '.join(self.game_mode_names))
)
self._mode = value
#
# 'public' methods
#
def add_mode(self, game_mode=None):
if game_mode is None:
raise ValueError("game_mode must be provided!")
if isinstance(game_mode, GameMode):
print("Adding {}".format(game_mode.mode))
self._game_modes.append(game_mode)
elif isinstance(game_mode, list):
for i in game_mode:
self.add_mode(game_mode=i)
else:
raise TypeError("game_mode must be of type {}".format(GameMode))
def remove_mode(self, index=None):
if len(self._game_modes) < 1:
raise IndexError("There are no game modes to remove!")
if index is None or not isinstance(index, int) or index > len(self._game_modes):
raise ValueError("Index (int) must be specified within valid range")
del(self._game_modes[index])
def replace_mode(self, index=None, game_mode=None):
if game_mode is None:
raise ValueError("game_mode must be provided!")
def replace_modes(self, game_modes=None):
ex_msg = "game_modes must be a list of {}".format(GameMode)
if game_modes is None:
raise ValueError(ex_msg)
if not isinstance(game_modes, list):
raise TypeError(ex_msg)
self._game_modes = []
for i in game_modes:
self.add_mode(game_mode=i)
def new(self, mode=None):
if mode is None:
_mode = self._game_modes[0]
else:
self.mode = mode
_mode = self._load_mode(mode)
self._game = GameObject(game_mode=_mode)
#return self._game
def save(self):
self._validate()
return json.dumps(self._game.dump())
def load(self, game=None):
ex_msg = "Game must be passed as a serialized JSON string."
if not game:
raise ValueError(ex_msg)
if not isinstance(game, str):
raise TypeError(ex_msg)
game_dict = json.loads(game)
if not 'mode' in game_dict:
raise ValueError("Mode is not provided in JSON; game cannot be loaded!")
_mode = self._load_mode(game_dict['mode'])
self._game = GameObject(game_mode=_mode, source_dict=game_dict)
def guess(self, *args):
self._validate()
"""
guess() allows a guess to be made. Before the guess is made, the method
checks to see if the game has been won, lost, or there are no tries
remaining. It then creates a return object stating the number of bulls
(direct matches), cows (indirect matches), an analysis of the guess (a
list of analysis objects), and a status.
:param args: any number of integers (or string representations of integers)
to the number of Digits in the answer; i.e. in normal mode, there would be
a DigitWord to guess of 4 digits, so guess would expect guess(1, 2, 3, 4)
and a shorter (guess(1, 2)) or longer (guess(1, 2, 3, 4, 5)) sequence will
raise an exception.
:return: a JSON object containing the analysis of the guess:
{
"cows": {"type": "integer"},
"bulls": {"type": "integer"},
"analysis": {"type": "array of DigitWordAnalysis"},
"status": {"type": "string"}
}
"""
logging.debug("guess called.")
logging.debug("Validating game object")
self._validate(op="guess")
logging.debug("Building return object")
_return_results = {
"cows": None,
"bulls": None,
"analysis": [],
"status": ""
}
logging.debug("Check if game already won, lost, or too many tries.")
if self._game.status == GameObject.GAME_WON:
_return_results["message"] = self._start_again("You already won!")
elif self._game.status == GameObject.GAME_LOST:
_return_results["message"] = self._start_again("You have made too many guesses, you lost!")
elif self._game.guesses_remaining < 1:
_return_results["message"] = self._start_again("You have run out of tries, sorry!")
else:
logging.debug("Creating a DigitWord for the guess.")
_mode = self._load_mode(self._game.mode)
guess = DigitWord(*args, wordtype=_mode.digit_type)
logging.debug("Validating guess.")
self._game.guesses_remaining -= 1
self._game.guesses_made += 1
logging.debug("Initializing return object.")
_return_results["analysis"] = []
_return_results["cows"] = 0
_return_results["bulls"] = 0
logging.debug("Asking the underlying GameObject to compare itself to the guess.")
for i in self._game.answer.compare(guess):
logging.debug("Iteration of guesses. Processing guess {}".format(i.index))
if i.match is True:
logging.debug("Bull found. +1")
_return_results["bulls"] += 1
elif i.in_word is True:
logging.debug("Cow found. +1")
_return_results["cows"] += 1
logging.debug("Add analysis to return object")
_return_results["analysis"].append(i.get_object())
logging.debug("Checking if game won or lost.")
if _return_results["bulls"] == len(self._game.answer.word):
logging.debug("Game was won.")
self._game.status = GameObject.GAME_WON
self._game.guesses_remaining = 0
_return_results["message"] = "Well done! You won the game with your " \
"answers {}".format(self._game.answer_str)
elif self._game.guesses_remaining < 1:
logging.debug("Game was lost.")
self._game.status = GameObject.GAME_LOST
_return_results["message"] = "Sorry, you lost! The correct answer was " \
"{}".format(self._game.answer_str)
_return_results["status"] = self._game.status
logging.debug("Returning results.")
return _return_results
#
# 'private' methods
#
def _start_again(self, message=None):
"""Simple method to form a start again message and give the answer in readable form."""
logging.debug("Start again message delivered: {}".format(message))
the_answer = self._game.answer_str
return "{0} The correct answer was {1}. Please start a new game.".format(
message,
the_answer
)
def _load_mode(self, mode):
_mode = [game_mode for game_mode in self._game_modes if game_mode.mode == mode]
if len(_mode) < 1:
raise ValueError("No mode was found for {}".format(mode))
_mode = _mode[0]
if not _mode:
raise ValueError("For some reason, the mode is defined but unavailable!")
return _mode
def _validate(self, op="unknown"):
"""
A helper method to provide validation of the game object (_g). If the
game object does not exist or if (for any reason) the object is not a GameObject,
then an exception will be raised.
:param op: A string describing the operation (e.g. guess, save, etc.) taking place
:return: Nothing
"""
if self._game is None:
raise ValueError(
"GameController:{}: ".format(op) +
"Game must be instantiated before using - call new() to start a new game, "
"or load() to load from JSON."
)
| 34.854305 | 103 | 0.569352 |
66c818238bb587a61a34f1d4a23d7b201e980dcc
| 1,018 |
py
|
Python
|
checkov/kubernetes/checks/resource/k8s/KubeletAnonymousAuth.py
|
vangundy-jason-pfg/checkov
|
2fb50908f62390c98dda665f1fa94fe24806b654
|
[
"Apache-2.0"
] | null | null | null |
checkov/kubernetes/checks/resource/k8s/KubeletAnonymousAuth.py
|
vangundy-jason-pfg/checkov
|
2fb50908f62390c98dda665f1fa94fe24806b654
|
[
"Apache-2.0"
] | null | null | null |
checkov/kubernetes/checks/resource/k8s/KubeletAnonymousAuth.py
|
vangundy-jason-pfg/checkov
|
2fb50908f62390c98dda665f1fa94fe24806b654
|
[
"Apache-2.0"
] | null | null | null |
from checkov.common.models.enums import CheckCategories, CheckResult
from checkov.kubernetes.checks.resource.base_spec_check import BaseK8Check
class KubeletAnonymousAuth(BaseK8Check):
def __init__(self):
# CIS-1.6 4.2.1
id = "CKV_K8S_138"
name = "Ensure that the --anonymous-auth argument is set to false"
categories = [CheckCategories.KUBERNETES]
supported_entities = ['containers']
super().__init__(name=name, id=id, categories=categories, supported_entities=supported_entities)
def get_resource_id(self, conf):
return f'{conf["parent"]} - {conf["name"]}' if conf.get('name') else conf["parent"]
def scan_spec_conf(self, conf):
if "command" in conf:
if "kubelet" in conf["command"]:
if "--anonymous-auth=true" in conf["command"] or "--anonymous-auth=false" not in conf["command"]:
return CheckResult.FAILED
return CheckResult.PASSED
check = KubeletAnonymousAuth()
| 37.703704 | 113 | 0.660118 |
f0c75f29d46a1d9e99ff79e496780dae59d4d9be
| 608 |
py
|
Python
|
factory-method/python/DictionaryFactoryMethod.py
|
koonchen/awesome-design-patterns
|
53891e5331b211bce7c2b3f8ea6fdd1a9f4086d1
|
[
"MIT"
] | 5 |
2018-10-19T15:30:00.000Z
|
2018-11-30T06:02:12.000Z
|
factory-method/python/DictionaryFactoryMethod.py
|
koonchen/awesome-design-patterns
|
53891e5331b211bce7c2b3f8ea6fdd1a9f4086d1
|
[
"MIT"
] | null | null | null |
factory-method/python/DictionaryFactoryMethod.py
|
koonchen/awesome-design-patterns
|
53891e5331b211bce7c2b3f8ea6fdd1a9f4086d1
|
[
"MIT"
] | 2 |
2018-10-19T15:30:08.000Z
|
2019-02-20T15:28:38.000Z
|
#!/usr/bin/env python
# coding:utf8
class ChineseGetter(object):
def __init__(self):
self.trans = dict(dog="狗", cat="猫")
def get(self, item):
return self.trans.get(item, str(item))
class EnglishGetter(object):
def get(self, item):
return str(item)
def get_localizer(language="English"):
languages = dict(English=EnglishGetter, Chinese=ChineseGetter)
return languages[language]()
if __name__ == "__main__":
e, c = get_localizer(), get_localizer(language="Chinese")
for item in "dog parrot cat bear".split():
print(e.get(item), c.get(item))
| 22.518519 | 66 | 0.654605 |
9cd43654f9e8c7d8513293e21ce0daccc7240295
| 8,308 |
py
|
Python
|
kubernetes_asyncio/client/models/v2beta1_metric_status.py
|
olitheolix/kubernetes_asyncio
|
344426793e4e4b653bcd8e4a29c6fa4766e1fff7
|
[
"Apache-2.0"
] | 1 |
2020-03-25T01:24:27.000Z
|
2020-03-25T01:24:27.000Z
|
kubernetes_asyncio/client/models/v2beta1_metric_status.py
|
olitheolix/kubernetes_asyncio
|
344426793e4e4b653bcd8e4a29c6fa4766e1fff7
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/client/models/v2beta1_metric_status.py
|
olitheolix/kubernetes_asyncio
|
344426793e4e4b653bcd8e4a29c6fa4766e1fff7
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.10.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class V2beta1MetricStatus(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'external': 'V2beta1ExternalMetricStatus',
'object': 'V2beta1ObjectMetricStatus',
'pods': 'V2beta1PodsMetricStatus',
'resource': 'V2beta1ResourceMetricStatus',
'type': 'str'
}
attribute_map = {
'external': 'external',
'object': 'object',
'pods': 'pods',
'resource': 'resource',
'type': 'type'
}
def __init__(self, external=None, object=None, pods=None, resource=None, type=None): # noqa: E501
"""V2beta1MetricStatus - a model defined in Swagger""" # noqa: E501
self._external = None
self._object = None
self._pods = None
self._resource = None
self._type = None
self.discriminator = None
if external is not None:
self.external = external
if object is not None:
self.object = object
if pods is not None:
self.pods = pods
if resource is not None:
self.resource = resource
self.type = type
@property
def external(self):
"""Gets the external of this V2beta1MetricStatus. # noqa: E501
external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster). # noqa: E501
:return: The external of this V2beta1MetricStatus. # noqa: E501
:rtype: V2beta1ExternalMetricStatus
"""
return self._external
@external.setter
def external(self, external):
"""Sets the external of this V2beta1MetricStatus.
external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster). # noqa: E501
:param external: The external of this V2beta1MetricStatus. # noqa: E501
:type: V2beta1ExternalMetricStatus
"""
self._external = external
@property
def object(self):
"""Gets the object of this V2beta1MetricStatus. # noqa: E501
object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object). # noqa: E501
:return: The object of this V2beta1MetricStatus. # noqa: E501
:rtype: V2beta1ObjectMetricStatus
"""
return self._object
@object.setter
def object(self, object):
"""Sets the object of this V2beta1MetricStatus.
object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object). # noqa: E501
:param object: The object of this V2beta1MetricStatus. # noqa: E501
:type: V2beta1ObjectMetricStatus
"""
self._object = object
@property
def pods(self):
"""Gets the pods of this V2beta1MetricStatus. # noqa: E501
pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value. # noqa: E501
:return: The pods of this V2beta1MetricStatus. # noqa: E501
:rtype: V2beta1PodsMetricStatus
"""
return self._pods
@pods.setter
def pods(self, pods):
"""Sets the pods of this V2beta1MetricStatus.
pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value. # noqa: E501
:param pods: The pods of this V2beta1MetricStatus. # noqa: E501
:type: V2beta1PodsMetricStatus
"""
self._pods = pods
@property
def resource(self):
"""Gets the resource of this V2beta1MetricStatus. # noqa: E501
resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. # noqa: E501
:return: The resource of this V2beta1MetricStatus. # noqa: E501
:rtype: V2beta1ResourceMetricStatus
"""
return self._resource
@resource.setter
def resource(self, resource):
"""Sets the resource of this V2beta1MetricStatus.
resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. # noqa: E501
:param resource: The resource of this V2beta1MetricStatus. # noqa: E501
:type: V2beta1ResourceMetricStatus
"""
self._resource = resource
@property
def type(self):
"""Gets the type of this V2beta1MetricStatus. # noqa: E501
type is the type of metric source. It will be one of \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object. # noqa: E501
:return: The type of this V2beta1MetricStatus. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this V2beta1MetricStatus.
type is the type of metric source. It will be one of \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object. # noqa: E501
:param type: The type of this V2beta1MetricStatus. # noqa: E501
:type: str
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V2beta1MetricStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 36.279476 | 350 | 0.633245 |
aed5c55aa5c5bd81fa5c98126f5c976b6799fea9
| 2,950 |
py
|
Python
|
tests/test_extra_verifications.py
|
jekel/sanic-jwt
|
4f6ab07376ad90011d40e205feacae8219359d6f
|
[
"MIT"
] | 226 |
2017-09-05T08:23:58.000Z
|
2022-03-28T09:23:47.000Z
|
tests/test_extra_verifications.py
|
jekel/sanic-jwt
|
4f6ab07376ad90011d40e205feacae8219359d6f
|
[
"MIT"
] | 179 |
2017-09-27T08:33:16.000Z
|
2022-01-28T20:35:23.000Z
|
tests/test_extra_verifications.py
|
jekel/sanic-jwt
|
4f6ab07376ad90011d40e205feacae8219359d6f
|
[
"MIT"
] | 45 |
2017-10-14T10:26:46.000Z
|
2022-02-04T15:01:20.000Z
|
from sanic import Sanic
from sanic.response import json
from sanic_jwt import Initialize, protected
def test_extra_verification_passing(app_with_extra_verification):
sanic_app, sanic_jwt = app_with_extra_verification
_, response = sanic_app.test_client.post(
"/auth", json={"username": "user1", "password": "abcxyz"}
)
access_token = response.json.get(
sanic_jwt.config.access_token_name(), None
)
_, response = sanic_app.test_client.get(
"/protected",
headers={"Authorization": "Bearer {}".format(access_token)},
)
assert response.status == 401
assert "Verifications were not met." in response.json.get("reasons")
_, response = sanic_app.test_client.post(
"/auth", json={"username": "user2", "password": "abcxyz"}
)
access_token = response.json.get(
sanic_jwt.config.access_token_name(), None
)
_, response = sanic_app.test_client.get(
"/protected",
headers={"Authorization": "Bearer {}".format(access_token)},
)
assert response.status == 200
def test_extra_verification_non_boolean_return(authenticate):
def bad_return(payload):
return 123
extra_verifications = [bad_return]
sanic_app = Sanic("sanic-jwt-test")
sanic_jwt = Initialize(
sanic_app,
debug=True,
authenticate=authenticate,
extra_verifications=extra_verifications,
)
@sanic_app.route("/protected")
@protected()
async def protected_request(request):
return json({"protected": True})
_, response = sanic_app.test_client.post(
"/auth", json={"username": "user1", "password": "abcxyz"}
)
access_token = response.json.get(
sanic_jwt.config.access_token_name(), None
)
_, response = sanic_app.test_client.get(
"/protected",
headers={"Authorization": "Bearer {}".format(access_token)},
)
assert response.status == 500
assert "Verifications must be a callable object "
def test_extra_verification_non_callable(authenticate):
extra_verifications = [123]
sanic_app = Sanic("sanic-jwt-test")
sanic_jwt = Initialize(
sanic_app,
debug=True,
authenticate=authenticate,
extra_verifications=extra_verifications,
)
@sanic_app.route("/protected")
@protected()
async def protected_request(request):
return json({"protected": True})
_, response = sanic_app.test_client.post(
"/auth", json={"username": "user1", "password": "abcxyz"}
)
access_token = response.json.get(
sanic_jwt.config.access_token_name(), None
)
_, response = sanic_app.test_client.get(
"/protected",
headers={"Authorization": "Bearer {}".format(access_token)},
)
assert response.status == 500
assert "Verifications must be a callable object "
"returning a boolean value." in response.json.get("reasons")
| 27.314815 | 72 | 0.656949 |
f7ae2650f53a2d646cf7d166dc5546419f8d3b22
| 16,557 |
py
|
Python
|
References/Geovana Neves/TCC_Geovana_Neves_GitHub/Turboprop/_baseline/Optimize.py
|
Vinicius-Tanigawa/Undergraduate-Research-Project
|
e92372f07882484b127d7affe305eeec2238b8a9
|
[
"MIT"
] | null | null | null |
References/Geovana Neves/TCC_Geovana_Neves_GitHub/Turboprop/_baseline/Optimize.py
|
Vinicius-Tanigawa/Undergraduate-Research-Project
|
e92372f07882484b127d7affe305eeec2238b8a9
|
[
"MIT"
] | null | null | null |
References/Geovana Neves/TCC_Geovana_Neves_GitHub/Turboprop/_baseline/Optimize.py
|
Vinicius-Tanigawa/Undergraduate-Research-Project
|
e92372f07882484b127d7affe305eeec2238b8a9
|
[
"MIT"
] | null | null | null |
# Optimize.py
# Created: Feb 2016, M. Vegh
# Modified: Aug 2017, E. Botero
# Aug 2018, T. MacDonald
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
import SUAVE
from SUAVE.Core import Units, Data
import numpy as np
import Vehicles
import Analyses
import Missions
import Procedure
import Plot_Mission
import matplotlib.pyplot as plt
import matplotlib
from SUAVE.Optimization import Nexus, carpet_plot
import SUAVE.Optimization.Package_Setups.scipy_setup as scipy_setup
from copy import deepcopy
from timeit import time
# ----------------------------------------------------------------------
# Run the whole thing
# ----------------------------------------------------------------------
def main():
t0 = time.time()
problem = setup()
## Base Input Values
output = problem.objective()
print output
Plot_Mission.plot_mission(problem, -1)
# # Uncomment to view contours of the design space
# variable_sweep(problem)
print ' '
print ' Initial Guess Results '
print ' '
print 'Fuel Burn = ', float(problem.summary.base_mission_fuelburn)
print 'Cruise Fuel = ', float(problem.summary.cruise_fuel)
print 'Block Fuel = ', float(problem.summary.block_fuel)
print 'MTOW = ', float(problem.summary.MTOW)
print 'BOW = ', float(problem.summary.BOW)
print 'TOFL = ', float(problem.summary.takeoff_field_length)
print 'GRAD = ', float(problem.summary.second_segment_climb_gradient_takeoff)
print 'Cruise Alt = ', float(problem.summary.cruise_altitude)
print 'Design Ran = ', float(problem.summary.design_range)
print 'Cruise Ran = ', float(problem.summary.cruise_range)
print 'Total Ran = ', float(problem.summary.total_range)
print 'Time To Cli = ', float(problem.summary.time_to_climb_value)
print 'TOW HH = ', float(problem.summary.TOW_HH)
print 'Fuel HH = ', float(problem.summary.FUEL_HH)
print ' '
print 'Constraints = ', problem.all_constraints()
print ' '
# ------------------------------------------------------------------
# Pareto
fuelburn = []
allconstraints = []
MTOW = []
finalvalue = []
grad = []
tofl = []
variations = []
fid = open('DoE_results.txt', 'w') # Open output file
fid.write(' DoE results \n')
fid.close()
deltas = [1.01]
original_values = deepcopy(problem.optimization_problem.inputs[:, 1])
original_bounds = deepcopy(problem.optimization_problem.inputs[:, 2])
for k, delta in enumerate(deltas):
for i in range(len(problem.optimization_problem.inputs)-1):
fid = open('DoE_results.txt', 'ab') # Open output file
fid.write('DoE - parameter study. Run number: '+str(i+(len(problem.optimization_problem.inputs)-1)*k)+' \n')
print ('DoE - parameter study. Run number: '+str(i+(len(problem.optimization_problem.inputs)-1)*k))
inputs = [1., 1., 1., 1., 1., 1.]
inputs[i] = delta
variations.append(inputs)
print ('Scaling Inputs: '+str(inputs))
# reset values
problem.optimization_problem.inputs[:, 1] = deepcopy(original_values)
problem.optimization_problem.inputs[:, 2] = deepcopy(original_bounds)
# changing parameters values
scaling = problem.optimization_problem.inputs[:, 1]
scaled_inputs = np.multiply(inputs, scaling)
problem.optimization_problem.inputs[:, 1] = scaled_inputs
# changing parameters bounds
bounds = problem.optimization_problem.inputs[:, 2]
bounds[i] = list(bounds[i])
bounds[i][0] = bounds[i][0] * inputs[i]
bounds[i][1] = bounds[i][1] * inputs[i]
bounds[i] = tuple(bounds[i])
problem.optimization_problem.inputs[:, 2] = bounds
output = scipy_setup.SciPy_Solve(problem, solver='SLSQP')
print output
print ' '
print ' Final Results '
print ' '
print 'Fuel Burn = ', float(problem.summary.base_mission_fuelburn)
print 'Cruise Fuel = ', float(problem.summary.cruise_fuel)
print 'Block Fuel = ', float(problem.summary.block_fuel)
print 'MTOW = ', float(problem.summary.MTOW)
print 'BOW = ', float(problem.summary.BOW)
print 'TOFL = ', float(problem.summary.takeoff_field_length)
print 'GRAD = ', float(problem.summary.second_segment_climb_gradient_takeoff)
print 'Cruise Alt = ', float(problem.summary.cruise_altitude)
print 'Design Ran = ', float(problem.summary.design_range)
print 'Cruise Ran = ', float(problem.summary.cruise_range)
print 'Total Ran = ', float(problem.summary.total_range)
print 'Time To Cli = ', float(problem.summary.time_to_climb_value)
print 'TOW HH = ', float(problem.summary.TOW_HH)
print 'Fuel HH = ', float(problem.summary.FUEL_HH)
print ' '
print 'Constraints = ', problem.all_constraints()
Plot_Mission.plot_mission(problem, i)
finalvalue.append(output)
fuelburn.append(problem.summary.base_mission_fuelburn)
allconstraints.append(problem.all_constraints())
grad.append(problem.summary.second_segment_climb_gradient_takeoff)
tofl.append(problem.summary.takeoff_field_length)
MTOW.append(problem.summary.MTOW)
fid.write(str(fuelburn[-1])+' \n')
fid.write(str(grad[-1]) + ' \n')
fid.write(str(tofl[-1]) + ' \n')
fid.write(str(MTOW[-1]) + ' \n')
fid.write(str(allconstraints[-1]) + ' \n')
fid.write(str(variations[-1]) + ' \n')
fid.write(str(finalvalue[-1]) + ' \n')
fid.write('\n \n')
fid.close()
fid = open('DoE_results.txt', 'ab') # Open output file
elapsed = time.time() - t0
fid.write('Total run time: ' + str(elapsed))
print('Total run time: ' + str(elapsed))
fid.close()
return
# ----------------------------------------------------------------------
# Inputs, Objective, & Constraints
# ----------------------------------------------------------------------
def setup():
nexus = Nexus()
problem = Data()
nexus.optimization_problem = problem
# -------------------------------------------------------------------
# Inputs
# -------------------------------------------------------------------
# [ tag , initial, (lb,ub) , scaling , units ]
problem.inputs = np.array([
['wing_area', 61., (61.0, 61.0), 100., Units.meter**2],
['aspect_ratio', 12., (12., 12.), 100.0, Units.less],
['t_c_ratio', 0.15, (0.15, 0.15), 1., Units.less],
['sweep_angle', 3., (3., 3.0), 100.0, Units.deg],
['taper_ratio', 0.53, (0.53, 0.53), 1., Units.less],
['cruise_range', 336.9, (250., 450.), 1000.0, Units.nautical_miles],
# ['beta', 1., (1., 1.), 1., Units.less],
])
# -------------------------------------------------------------------
# Objective
# -------------------------------------------------------------------
# throw an error if the user isn't specific about wildcards
# [ tag, scaling, units ]
problem.objective = np.array([
['Nothing', 1., Units.kg],
])
# -------------------------------------------------------------------
# Constraints
# -------------------------------------------------------------------
# [ tag, sense, edge, scaling, units ]
# CONSTRAINTS ARE SET TO BE BIGGER THAN ZERO, SEE PROCEDURE (SciPy's SLSQP optimization algorithm assumes this form)
problem.constraints = np.array([
# ['design_range_margin', '=', 0., 100., Units.nautical_miles], # Range consistency
# ['fuel_margin', '>', 0., 1000., Units.kg], #fuel margin defined here as fuel
['Throttle_min', '>', 0., 1., Units.less],
['Throttle_max', '>', 0., 1., Units.less],
# ['tofl_mtow_margin', '>', 0., 100., Units.m], # take-off field length
['mzfw_consistency', '>', 0., 1000., Units.kg], # MZFW consistency
['design_range_ub', '>', 0., 1., Units.nautical_miles], # Range consistency
['design_range_lb', '>', 0., 1., Units.nautical_miles], # Range consistency
# ['time_to_climb', '>', 0., 10., Units.min], # Time to climb consistency
# ['climb_gradient', '>', 0., 1., Units.less], # second segment climb gradient
# ['lfl_mlw_margin', '>', 0., 100., Units.m], # landing field length
# ['max_fuel_margin', '>', 0., 1000., Units.kg], # max fuel margin
# ['range_HH_margin', '>', 0., 1000., Units.nautical_miles], # Range for Hot and High
# ['TOW_HH_margin', '>', 0., 1000., Units.kg], # TOW for Hot and High
# ['MTOW', '>', 0., 100000., Units.kg], # TOW for Hot and High
# ['BOW', '>', 0., 1., Units.kg], # TOW for Hot and High
])
# -------------------------------------------------------------------
# Aliases
# -------------------------------------------------------------------
# [ 'alias' , ['data.path1.name','data.path2.name'] ]
problem.aliases = [
['wing_area', ['vehicle_configurations.*.wings.main_wing.areas.reference',
'vehicle_configurations.*.reference_area' ]],
['aspect_ratio', 'vehicle_configurations.*.wings.main_wing.aspect_ratio' ],
['taper_ratio', 'vehicle_configurations.*.wings.main_wing.taper' ],
['t_c_ratio', 'vehicle_configurations.*.wings.main_wing.thickness_to_chord' ],
['sweep_angle', 'vehicle_configurations.*.wings.main_wing.sweeps.quarter_chord'],
['cruise_range', 'missions.base.segments.cruise.distance' ],
['Nothing', 'summary.nothing' ],
# ['fuel_burn', 'summary.base_mission_fuelburn' ],
['fuel_margin', 'summary.fuel_margin' ],
['Throttle_min', 'summary.throttle_min' ],
['Throttle_max', 'summary.throttle_max' ],
['tofl_mtow_margin', 'summary.takeoff_field_length_margin' ],
['mzfw_consistency', 'summary.mzfw_consistency' ],
# ['design_range_margin', 'summary.design_range_margin'],
['design_range_ub', 'summary.design_range_ub' ],
['design_range_lb', 'summary.design_range_lb' ],
['time_to_climb', 'summary.time_to_climb' ],
['climb_gradient', 'summary.climb_gradient' ],
['lfl_mlw_margin', 'summary.lfl_mlw_margin' ],
['max_fuel_margin', 'summary.max_fuel_margin' ],
# ['range_HH_margin', 'summary.range_HH_margin'],
['TOW_HH_margin', 'summary.TOW_HH_margin'],
# ['MTOW', 'summary.MTOW'],
# ['BOW', 'summary.BOW'],
['beta', 'vehicle_configurations.base.wings.main_wing.beta'],
['objective', 'summary.objective'],
]
# -------------------------------------------------------------------
# Vehicles
# -------------------------------------------------------------------
nexus.vehicle_configurations = Vehicles.setup()
# -------------------------------------------------------------------
# Analyses
# -------------------------------------------------------------------
nexus.analyses = Analyses.setup(nexus.vehicle_configurations)
nexus.analyses.vehicle = Data()
nexus.analyses.vehicle = nexus.vehicle_configurations.base
# -------------------------------------------------------------------
# Missions
# -------------------------------------------------------------------
nexus.missions = Missions.setup(nexus.analyses)
# -------------------------------------------------------------------
# Procedure
# -------------------------------------------------------------------
nexus.procedure = Procedure.setup()
# -------------------------------------------------------------------
# Summary
# -------------------------------------------------------------------
nexus.summary = Data()
nexus.total_number_of_iterations = 0
return nexus
def variable_sweep(problem):
from matplotlib import rcParams
rcParams['font.family'] = 'times new roman'
# rcParams['font.times-new-roman'] = ['times new roman']
number_of_points = 5
outputs = carpet_plot(problem, number_of_points, 0, 0) #run carpet plot, suppressing default plots
inputs = outputs.inputs
objective = outputs.objective
constraints = outputs.constraint_val
plt.figure(0)
CS = plt.contourf(inputs[0,:],inputs[1,:], objective, 20, linewidths=2)
cbar = plt.colorbar(CS)
cbar.ax.set_ylabel('Fuel Burn (kg)')
CS_const = plt.contour(inputs[0,:],inputs[1,:], constraints[-1,:,:],cmap=plt.get_cmap('hot'))
plt.clabel(CS_const, inline=1, fontsize=12, family='times new roman')
cbar = plt.colorbar(CS_const)
# plt.FontProperties(family='times new roman', style='italic', size=12)
cbar.ax.set_ylabel('BOW (kg)')
# font = matplotlib.font_manager.FontProperties(family='times new roman', style='italic', size=12)
# CS_const.font_manager.FontProperties.set_family(family='times new roman')
plt.xlabel('Wing Area (m^2)')
plt.ylabel('Aspect Ratio (-)')
plt.legend(loc='upper left')
# plt.show(block=True)
plt.show()
number_of_points = 5
outputs = carpet_plot(problem, number_of_points, 0, 0, sweep_index_0=1, sweep_index_1=3) # run carpet plot, suppressing default plots
inputs = outputs.inputs
objective = outputs.objective
constraints = outputs.constraint_val
plt.figure(0)
CS = plt.contourf(inputs[0, :], inputs[1, :], objective, 20, linewidths=2)
cbar = plt.colorbar(CS)
cbar.ax.set_ylabel('Fuel Burn (kg)')
CS_const = plt.contour(inputs[0, :], inputs[1, :], constraints[-1, :, :], cmap=plt.get_cmap('hot'))
plt.clabel(CS_const, inline=1, fontsize=10)
cbar = plt.colorbar(CS_const)
cbar.ax.set_ylabel('BOW (kg)')
plt.xlabel('AR (-)')
plt.ylabel('Sweep Angle (Deg)')
plt.legend(loc='upper left')
plt.show()
number_of_points = 5
outputs = carpet_plot(problem, number_of_points, 0, 0, sweep_index_0=2,
sweep_index_1=3) # run carpet plot, suppressing default plots
inputs = outputs.inputs
objective = outputs.objective
constraints = outputs.constraint_val
plt.figure(0)
CS = plt.contourf(inputs[0, :], inputs[1, :], objective, 20, linewidths=2)
cbar = plt.colorbar(CS)
cbar.ax.set_ylabel('Fuel Burn (kg)')
CS_const = plt.contour(inputs[0, :], inputs[1, :], constraints[-1, :, :], cmap=plt.get_cmap('hot'))
plt.clabel(CS_const, inline=1, fontsize=10)
cbar = plt.colorbar(CS_const)
cbar.ax.set_ylabel('BOW (kg)')
plt.xlabel('t/c (-)')
plt.ylabel('Sweep Angle (Deg)')
plt.legend(loc='upper left')
plt.show(block=True)
return
if __name__ == '__main__':
main()
| 46.119777 | 138 | 0.497614 |
d49bbdd9b59fc7fc5f7c61bb7c10993ec6f65a69
| 862 |
py
|
Python
|
Python/leetcode/Maximum Length of Pair Chain/solution.py
|
pterodragon/programming
|
1e9c10490c3aaebe51bc06f8e0c6566e9f8b461c
|
[
"MIT"
] | 2 |
2018-10-06T14:43:13.000Z
|
2019-02-03T13:07:14.000Z
|
Python/leetcode/Maximum Length of Pair Chain/solution.py
|
pterodragon/programming
|
1e9c10490c3aaebe51bc06f8e0c6566e9f8b461c
|
[
"MIT"
] | null | null | null |
Python/leetcode/Maximum Length of Pair Chain/solution.py
|
pterodragon/programming
|
1e9c10490c3aaebe51bc06f8e0c6566e9f8b461c
|
[
"MIT"
] | null | null | null |
class Solution:
def findLongestChain(self, pairs):
"""
:type pairs: List[List[int]]
:rtype: int
"""
"""
sort pairs first
only choose the smallest pairs if they have the same first element
i.e. [1,2] and [1,10], delete the [1, 10]
dp[x] = longest chain that uses pair on index x
solution = dp[n - 1]
dp[0] = 1
dp[a + 1] = max(dp[x] for x in range(a + 1) \
if pairs[x][1] < pairs[a + 1][0])
"""
if not pairs:
return 0
pairs.sort()
n = len(pairs)
dp = [1 for _ in range(n)]
dp[0] = 1
for a in range(1, n):
for b in reversed(range(a)):
if pairs[b][1] < pairs[a][0]:
dp[a] = dp[b] + 1
break
return dp[n - 1]
| 26.9375 | 74 | 0.436195 |
f4b6b076e71902917a23c4cdb3e78ed0803dd053
| 2,978 |
py
|
Python
|
app/api/users.py
|
cuongbm/microblog
|
16b47b11b1f2b2877462c86873eb435beb10b545
|
[
"MIT"
] | null | null | null |
app/api/users.py
|
cuongbm/microblog
|
16b47b11b1f2b2877462c86873eb435beb10b545
|
[
"MIT"
] | null | null | null |
app/api/users.py
|
cuongbm/microblog
|
16b47b11b1f2b2877462c86873eb435beb10b545
|
[
"MIT"
] | null | null | null |
from flask import jsonify, request, url_for
from app import db
from app.api import bp
from app.api.auth import token_auth
from app.api.errors import bad_request
from app.api.pagination import PaginationAPI
from app.models import User
@bp.route('/users/<int:id>', methods=['GET'])
@token_auth.login_required
def get_user(id):
return jsonify(User.query.get_or_404(id).to_dict())
@bp.route('/users', methods=['GET'])
@token_auth.login_required
def get_users():
page = request.args.get('page', 1, type=int)
per_page = min(request.args.get('per_page', 10, type=int), 100)
data = PaginationAPI.to_collection_dict(User.query, page, per_page, 'api.get_users')
return jsonify(data)
@bp.route('/users/<int:id>/followers', methods=['GET'])
@token_auth.login_required
def get_followers(id):
user = User.query.get_or_404(id)
page = request.args.get('page', 1, type=int)
per_page = min(request.args.get('per_page', 10, type=int), 100)
data = PaginationAPI.to_collection_dict(user.followers, page, per_page,
'api.get_followers', id=id)
return jsonify(data)
@bp.route('/users/<int:id>/followed', methods=['GET'])
@token_auth.login_required
def get_followed(id):
user = User.query.get_or_404(id)
page = request.args.get('page', 1, type=int)
per_page = min(request.args.get('per_page', 10, type=int), 100)
data = PaginationAPI.to_collection_dict(user.followed, page, per_page,
'api.get_followed', id=id)
return jsonify(data)
@bp.route('/users', methods=['POST'])
@token_auth.login_required
def create_user():
data = request.get_json() or {}
if 'username' not in data or 'email' not in data or 'password' not in data:
return bad_request('must include username, email and password fields')
if User.query.filter_by(username=data['username']).first():
return bad_request('please use a different username')
if User.query.filter_by(email=data['email']).first():
return bad_request('please use a different email address')
user = User()
user.from_dict(data, new_user=True)
db.session.add(user)
db.session.commit()
response = jsonify(user.to_dict())
response.headers['Location'] = url_for('api.get_user', id=user.id)
return response
@bp.route('/users/<int:id>', methods=['PUT'])
@token_auth.login_required
def update_user(id):
user = User.query.get_or_404(id)
data = request.get_json() or {}
if 'username' in data and data['username'] != user.username and \
User.query.filter_by(username=data['username']).first():
return bad_request('please use a different username')
if 'email' in data and data['email'] != user.email and \
User.query.filter_by(email=data['email']).first():
return bad_request('please use a different email address')
user.from_dict(data, new_user=False)
db.session.commit()
return jsonify(user.to_dict())
| 35.879518 | 88 | 0.680658 |
bbfc9eb4069c568a732c65077cb5a8442cdb433e
| 909 |
py
|
Python
|
salt/modules/win_shadow.py
|
gotcha/salt
|
7b84c704777d3d2062911895dc3fdf93d40e9848
|
[
"Apache-2.0"
] | 2 |
2019-03-30T02:12:56.000Z
|
2021-03-08T18:59:46.000Z
|
salt/modules/win_shadow.py
|
gotcha/salt
|
7b84c704777d3d2062911895dc3fdf93d40e9848
|
[
"Apache-2.0"
] | null | null | null |
salt/modules/win_shadow.py
|
gotcha/salt
|
7b84c704777d3d2062911895dc3fdf93d40e9848
|
[
"Apache-2.0"
] | 1 |
2020-12-04T11:28:06.000Z
|
2020-12-04T11:28:06.000Z
|
'''
Manage the shadow file
'''
import salt.utils
def __virtual__():
'''
Only works on Windows systems
'''
if salt.utils.is_windows():
return 'shadow'
return False
def info(name):
'''
Return information for the specified user
This is just returns dummy data so that salt states can work.
CLI Example::
salt '*' shadow.info root
'''
ret = {
'name': name,
'pwd': '',
'lstchg': '',
'min': '',
'max': '',
'warn': '',
'inact': '',
'expire': ''}
return ret
def set_password(name, password):
'''
Set the password for a named user.
CLI Example::
salt '*' shadow.set_password root mysecretpassword
'''
cmd = 'net user {0} {1}'.format(name, password)
ret = __salt__['cmd.run_all'](cmd)
return not ret['retcode']
| 18.18 | 65 | 0.517052 |
196c539a80470d230e17bc9645e1bb1c3150f1f2
| 2,299 |
py
|
Python
|
samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_batch_read_feature_values_async.py
|
lclc19/python-aiplatform
|
d8da2e365277441abadb04328943f23345d72b0e
|
[
"Apache-2.0"
] | 7 |
2021-02-21T10:39:41.000Z
|
2021-12-07T07:31:28.000Z
|
samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_batch_read_feature_values_async.py
|
lclc19/python-aiplatform
|
d8da2e365277441abadb04328943f23345d72b0e
|
[
"Apache-2.0"
] | 6 |
2021-02-02T23:46:11.000Z
|
2021-11-15T01:46:02.000Z
|
samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_batch_read_feature_values_async.py
|
lclc19/python-aiplatform
|
d8da2e365277441abadb04328943f23345d72b0e
|
[
"Apache-2.0"
] | 4 |
2021-01-28T23:25:45.000Z
|
2021-08-30T01:55:16.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for BatchReadFeatureValues
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_BatchReadFeatureValues_async]
from google.cloud import aiplatform_v1
async def sample_batch_read_feature_values():
"""Snippet for batch_read_feature_values"""
# Create a client
client = aiplatform_v1.FeaturestoreServiceAsyncClient()
# Initialize request argument(s)
csv_read_instances = aiplatform_v1.CsvSource()
csv_read_instances.gcs_source.uris = ['uris_value']
destination = aiplatform_v1.FeatureValueDestination()
destination.bigquery_destination.output_uri = "output_uri_value"
entity_type_specs = aiplatform_v1.EntityTypeSpec()
entity_type_specs.entity_type_id = "entity_type_id_value"
entity_type_specs.feature_selector.id_matcher.ids = ['ids_value']
request = aiplatform_v1.BatchReadFeatureValuesRequest(
csv_read_instances=csv_read_instances,
featurestore="projects/{project}/locations/{location}/featurestores/{featurestore}",
destination=destination,
entity_type_specs=entity_type_specs,
)
# Make the request
operation = client.batch_read_feature_values(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
print(response)
# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_BatchReadFeatureValues_async]
| 36.492063 | 93 | 0.775555 |
597880ad121cc788cd55ef901b9fe28bf47a73b5
| 26,141 |
py
|
Python
|
trac/db/api.py
|
davigo/tracsoftpi
|
2a325bdee77903bd5ee2c13df28732bceaf37470
|
[
"BSD-3-Clause"
] | null | null | null |
trac/db/api.py
|
davigo/tracsoftpi
|
2a325bdee77903bd5ee2c13df28732bceaf37470
|
[
"BSD-3-Clause"
] | null | null | null |
trac/db/api.py
|
davigo/tracsoftpi
|
2a325bdee77903bd5ee2c13df28732bceaf37470
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2020 Edgewall Software
# Copyright (C) 2005 Christopher Lenz <[email protected]>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at https://trac.edgewall.org/log/.
#
# Author: Christopher Lenz <[email protected]>
import importlib
import os
import time
import urllib
from abc import ABCMeta, abstractmethod
from trac import db_default
from trac.api import IEnvironmentSetupParticipant, ISystemInfoProvider
from trac.config import BoolOption, ConfigurationError, IntOption, Option
from trac.core import *
from trac.db.pool import ConnectionPool
from trac.db.schema import Table
from trac.db.util import ConnectionWrapper
from trac.util.concurrency import ThreadLocal
from trac.util.html import tag
from trac.util.text import unicode_passwd
from trac.util.translation import _, tag_
class DbContextManager(object):
"""Database Context Manager
The outermost `DbContextManager` will close the connection.
"""
db = None
def __init__(self, env):
self.dbmgr = DatabaseManager(env)
def execute(self, query, params=None):
"""Shortcut for directly executing a query."""
with self as db:
return db.execute(query, params)
__call__ = execute
def executemany(self, query, params=None):
"""Shortcut for directly calling "executemany" on a query."""
with self as db:
return db.executemany(query, params)
class TransactionContextManager(DbContextManager):
"""Transactioned Database Context Manager for retrieving a
`~trac.db.util.ConnectionWrapper`.
The outermost such context manager will perform a commit upon
normal exit or a rollback after an exception.
"""
def __enter__(self):
db = self.dbmgr._transaction_local.wdb # outermost writable db
if not db:
db = self.dbmgr._transaction_local.rdb # reuse wrapped connection
if db:
db = ConnectionWrapper(db.cnx, db.log)
else:
db = self.dbmgr.get_connection()
self.dbmgr._transaction_local.wdb = self.db = db
return db
def __exit__(self, et, ev, tb):
if self.db:
self.dbmgr._transaction_local.wdb = None
if et is None:
self.db.commit()
else:
self.db.rollback()
if not self.dbmgr._transaction_local.rdb:
self.db.close()
class QueryContextManager(DbContextManager):
"""Database Context Manager for retrieving a read-only
`~trac.db.util.ConnectionWrapper`.
"""
def __enter__(self):
db = self.dbmgr._transaction_local.rdb # outermost readonly db
if not db:
db = self.dbmgr._transaction_local.wdb # reuse wrapped connection
if db:
db = ConnectionWrapper(db.cnx, db.log, readonly=True)
else:
db = self.dbmgr.get_connection(readonly=True)
self.dbmgr._transaction_local.rdb = self.db = db
return db
def __exit__(self, et, ev, tb):
if self.db:
self.dbmgr._transaction_local.rdb = None
if not self.dbmgr._transaction_local.wdb:
self.db.close()
class ConnectionBase(object):
"""Abstract base class for database connection classes."""
__metaclass__ = ABCMeta
@abstractmethod
def cast(self, column, type):
"""Returns a clause casting `column` as `type`."""
pass
@abstractmethod
def concat(self, *args):
"""Returns a clause concatenating the sequence `args`."""
pass
@abstractmethod
def drop_column(self, table, column):
"""Drops the `column` from `table`."""
pass
@abstractmethod
def drop_table(self, table):
"""Drops the `table`."""
pass
@abstractmethod
def get_column_names(self, table):
"""Returns the list of the column names in `table`."""
pass
@abstractmethod
def get_last_id(self, cursor, table, column='id'):
"""Returns the current value of the primary key sequence for `table`.
The `column` of the primary key may be specified, which defaults
to `id`."""
pass
@abstractmethod
def get_sequence_names(self):
"""Returns a list of the sequence names."""
pass
@abstractmethod
def get_table_names(self):
"""Returns a list of the table names."""
pass
@abstractmethod
def has_table(self, table):
"""Returns whether the table exists."""
pass
@abstractmethod
def like(self):
"""Returns a case-insensitive `LIKE` clause."""
pass
@abstractmethod
def like_escape(self, text):
"""Returns `text` escaped for use in a `LIKE` clause."""
pass
@abstractmethod
def prefix_match(self):
"""Return a case sensitive prefix-matching operator."""
pass
@abstractmethod
def prefix_match_value(self, prefix):
"""Return a value for case sensitive prefix-matching operator."""
pass
@abstractmethod
def quote(self, identifier):
"""Returns the quoted `identifier`."""
pass
@abstractmethod
def reset_tables(self):
"""Deletes all data from the tables and resets autoincrement indexes.
:return: list of names of the tables that were reset.
"""
pass
@abstractmethod
def update_sequence(self, cursor, table, column='id'):
"""Updates the current value of the primary key sequence for `table`.
The `column` of the primary key may be specified, which defaults
to `id`."""
pass
class IDatabaseConnector(Interface):
"""Extension point interface for components that support the
connection to relational databases.
"""
def get_supported_schemes():
"""Return the connection URL schemes supported by the
connector, and their relative priorities as an iterable of
`(scheme, priority)` tuples.
If `priority` is a negative number, this is indicative of an
error condition with the connector. An error message should be
attached to the `error` attribute of the connector.
"""
def get_connection(path, log=None, **kwargs):
"""Create a new connection to the database."""
def get_exceptions():
"""Return an object (typically a module) containing all the
backend-specific exception types as attributes, named
according to the Python Database API
(http://www.python.org/dev/peps/pep-0249/).
"""
def init_db(path, schema=None, log=None, **kwargs):
"""Initialize the database."""
def destroy_db(self, path, log=None, **kwargs):
"""Destroy the database."""
def db_exists(self, path, log=None, **kwargs):
"""Return `True` if the database exists."""
def to_sql(table):
"""Return the DDL statements necessary to create the specified
table, including indices."""
def backup(dest):
"""Backup the database to a location defined by
trac.backup_dir"""
def get_system_info():
"""Yield a sequence of `(name, version)` tuples describing the
name and version information of external packages used by the
connector.
"""
class DatabaseManager(Component):
"""Component used to manage the `IDatabaseConnector` implementations."""
implements(IEnvironmentSetupParticipant, ISystemInfoProvider)
connectors = ExtensionPoint(IDatabaseConnector)
connection_uri = Option('trac', 'database', 'sqlite:db/trac.db',
"""Database connection
[wiki:TracEnvironment#DatabaseConnectionStrings string] for this
project""")
backup_dir = Option('trac', 'backup_dir', 'db',
"""Database backup location""")
timeout = IntOption('trac', 'timeout', '20',
"""Timeout value for database connection, in seconds.
Use '0' to specify ''no timeout''.""")
debug_sql = BoolOption('trac', 'debug_sql', False,
"""Show the SQL queries in the Trac log, at DEBUG level.
""")
def __init__(self):
self._cnx_pool = None
self._transaction_local = ThreadLocal(wdb=None, rdb=None)
def init_db(self):
connector, args = self.get_connector()
args['schema'] = db_default.schema
connector.init_db(**args)
def destroy_db(self):
connector, args = self.get_connector()
# Connections to on-disk db must be closed before deleting it.
self.shutdown()
connector.destroy_db(**args)
def db_exists(self):
connector, args = self.get_connector()
return connector.db_exists(**args)
def create_tables(self, schema):
"""Create the specified tables.
:param schema: an iterable of table objects.
:since: version 1.0.2
"""
connector = self.get_connector()[0]
with self.env.db_transaction as db:
for table in schema:
for sql in connector.to_sql(table):
db(sql)
def drop_columns(self, table, columns):
"""Drops the specified columns from table.
:since: version 1.2
"""
table_name = table.name if isinstance(table, Table) else table
with self.env.db_transaction as db:
if not db.has_table(table_name):
raise self.env.db_exc.OperationalError('Table %s not found' %
db.quote(table_name))
for col in columns:
db.drop_column(table_name, col)
def drop_tables(self, schema):
"""Drop the specified tables.
:param schema: an iterable of `Table` objects or table names.
:since: version 1.0.2
"""
with self.env.db_transaction as db:
for table in schema:
table_name = table.name if isinstance(table, Table) else table
db.drop_table(table_name)
def insert_into_tables(self, data_or_callable):
"""Insert data into existing tables.
:param data_or_callable: Nested tuples of table names, column names
and row data::
(table1,
(column1, column2),
((row1col1, row1col2),
(row2col1, row2col2)),
table2, ...)
or a callable that takes a single parameter
`db` and returns the aforementioned nested
tuple.
:since: version 1.1.3
"""
with self.env.db_transaction as db:
data = data_or_callable(db) if callable(data_or_callable) \
else data_or_callable
for table, cols, vals in data:
db.executemany("INSERT INTO %s (%s) VALUES (%s)"
% (db.quote(table), ','.join(cols),
','.join(['%s'] * len(cols))), vals)
def reset_tables(self):
"""Deletes all data from the tables and resets autoincrement indexes.
:return: list of names of the tables that were reset.
:since: version 1.1.3
"""
with self.env.db_transaction as db:
return db.reset_tables()
def upgrade_tables(self, new_schema):
"""Upgrade table schema to `new_schema`, preserving data in
columns that exist in the current schema and `new_schema`.
:param new_schema: tuple or list of `Table` objects
:since: version 1.2
"""
with self.env.db_transaction as db:
cursor = db.cursor()
for new_table in new_schema:
temp_table_name = new_table.name + '_old'
has_table = self.has_table(new_table)
if has_table:
old_column_names = set(self.get_column_names(new_table))
new_column_names = {col.name for col in new_table.columns}
column_names = old_column_names & new_column_names
if column_names:
cols_to_copy = ','.join(db.quote(name)
for name in column_names)
cursor.execute("""
CREATE TEMPORARY TABLE %s AS SELECT * FROM %s
""" % (db.quote(temp_table_name),
db.quote(new_table.name)))
self.drop_tables((new_table,))
self.create_tables((new_table,))
if has_table and column_names:
cursor.execute("""
INSERT INTO %s (%s) SELECT %s FROM %s
""" % (db.quote(new_table.name), cols_to_copy,
cols_to_copy, db.quote(temp_table_name)))
for col in new_table.columns:
if col.auto_increment:
db.update_sequence(cursor, new_table.name,
col.name)
self.drop_tables((temp_table_name,))
def get_connection(self, readonly=False):
"""Get a database connection from the pool.
If `readonly` is `True`, the returned connection will purposely
lack the `rollback` and `commit` methods.
"""
if not self._cnx_pool:
connector, args = self.get_connector()
self._cnx_pool = ConnectionPool(5, connector, **args)
db = self._cnx_pool.get_cnx(self.timeout or None)
if readonly:
db = ConnectionWrapper(db, readonly=True)
return db
def get_database_version(self, name='database_version'):
"""Returns the database version from the SYSTEM table as an int,
or `False` if the entry is not found.
:param name: The name of the entry that contains the database version
in the SYSTEM table. Defaults to `database_version`,
which contains the database version for Trac.
"""
with self.env.db_query as db:
for value, in db("""
SELECT value FROM {0} WHERE name=%s
""".format(db.quote('system')), (name,)):
return int(value)
else:
return False
def get_exceptions(self):
return self.get_connector()[0].get_exceptions()
def get_sequence_names(self):
"""Returns a list of the sequence names.
:since: 1.3.2
"""
with self.env.db_query as db:
return db.get_sequence_names()
def get_table_names(self):
"""Returns a list of the table names.
:since: 1.1.6
"""
with self.env.db_query as db:
return db.get_table_names()
def get_column_names(self, table):
"""Returns a list of the column names for `table`.
:param table: a `Table` object or table name.
:since: 1.2
"""
table_name = table.name if isinstance(table, Table) else table
with self.env.db_query as db:
if not db.has_table(table_name):
raise self.env.db_exc.OperationalError('Table %s not found' %
db.quote(table_name))
return db.get_column_names(table_name)
def has_table(self, table):
"""Returns whether the table exists."""
table_name = table.name if isinstance(table, Table) else table
with self.env.db_query as db:
return db.has_table(table_name)
def set_database_version(self, version, name='database_version'):
"""Sets the database version in the SYSTEM table.
:param version: an integer database version.
:param name: The name of the entry that contains the database version
in the SYSTEM table. Defaults to `database_version`,
which contains the database version for Trac.
"""
current_database_version = self.get_database_version(name)
if current_database_version is False:
with self.env.db_transaction as db:
db("""
INSERT INTO {0} (name, value) VALUES (%s, %s)
""".format(db.quote('system')), (name, version))
else:
with self.env.db_transaction as db:
db("""
UPDATE {0} SET value=%s WHERE name=%s
""".format(db.quote('system')), (version, name))
self.log.info("Upgraded %s from %d to %d",
name, current_database_version, version)
def needs_upgrade(self, version, name='database_version'):
"""Checks the database version to determine if an upgrade is needed.
:param version: the expected integer database version.
:param name: the name of the entry in the SYSTEM table that contains
the database version. Defaults to `database_version`,
which contains the database version for Trac.
:return: `True` if the stored version is less than the expected
version, `False` if it is equal to the expected version.
:raises TracError: if the stored version is greater than the expected
version.
"""
dbver = self.get_database_version(name)
if dbver == version:
return False
elif dbver > version:
raise TracError(_("Need to downgrade %(name)s.", name=name))
self.log.info("Need to upgrade %s from %d to %d",
name, dbver, version)
return True
def upgrade(self, version, name='database_version', pkg='trac.upgrades'):
"""Invokes `do_upgrade(env, version, cursor)` in module
`"%s/db%i.py" % (pkg, version)`, for each required version upgrade.
:param version: the expected integer database version.
:param name: the name of the entry in the SYSTEM table that contains
the database version. Defaults to `database_version`,
which contains the database version for Trac.
:param pkg: the package containing the upgrade modules.
:raises TracError: if the package or module doesn't exist.
"""
dbver = self.get_database_version(name)
for i in xrange(dbver + 1, version + 1):
module = '%s.db%i' % (pkg, i)
try:
upgrader = importlib.import_module(module)
except ImportError:
raise TracError(_("No upgrade module %(module)s.py",
module=module))
with self.env.db_transaction as db:
cursor = db.cursor()
upgrader.do_upgrade(self.env, i, cursor)
self.set_database_version(i, name)
def shutdown(self, tid=None):
if self._cnx_pool:
self._cnx_pool.shutdown(tid)
if not tid:
self._cnx_pool = None
def backup(self, dest=None):
"""Save a backup of the database.
:param dest: base filename to write to.
Returns the file actually written.
"""
connector, args = self.get_connector()
if not dest:
backup_dir = self.backup_dir
if not os.path.isabs(backup_dir):
backup_dir = os.path.join(self.env.path, backup_dir)
db_str = self.config.get('trac', 'database')
db_name, db_path = db_str.split(":", 1)
dest_name = '%s.%i.%d.bak' % (db_name, self.env.database_version,
int(time.time()))
dest = os.path.join(backup_dir, dest_name)
else:
backup_dir = os.path.dirname(dest)
if not os.path.exists(backup_dir):
os.makedirs(backup_dir)
return connector.backup(dest)
def get_connector(self):
scheme, args = parse_connection_uri(self.connection_uri)
candidates = [
(priority, connector)
for connector in self.connectors
for scheme_, priority in connector.get_supported_schemes()
if scheme_ == scheme
]
if not candidates:
raise TracError(_('Unsupported database type "%(scheme)s"',
scheme=scheme))
priority, connector = max(candidates)
if priority < 0:
raise TracError(connector.error)
if scheme == 'sqlite':
if args['path'] == ':memory:':
# Special case for SQLite in-memory database, always get
# the /same/ connection over
pass
elif not os.path.isabs(args['path']):
# Special case for SQLite to support a path relative to the
# environment directory
args['path'] = os.path.join(self.env.path,
args['path'].lstrip('/'))
if self.debug_sql:
args['log'] = self.log
return connector, args
# IEnvironmentSetupParticipant methods
def environment_created(self):
"""Insert default data into the database."""
self.insert_into_tables(db_default.get_data)
def environment_needs_upgrade(self):
return self.needs_upgrade(db_default.db_version)
def upgrade_environment(self):
self.upgrade(db_default.db_version)
# ISystemInfoProvider methods
def get_system_info(self):
connector = self.get_connector()[0]
for info in connector.get_system_info():
yield info
def get_column_names(cursor):
"""Retrieve column names from a cursor, if possible."""
return [unicode(d[0], 'utf-8') if isinstance(d[0], str) else d[0]
for d in cursor.description] if cursor.description else []
def parse_connection_uri(db_str):
"""Parse the database connection string.
The database connection string for an environment is specified through
the `database` option in the `[trac]` section of trac.ini.
:return: a tuple containing the scheme and a dictionary of attributes:
`user`, `password`, `host`, `port`, `path`, `params`.
:since: 1.1.3
"""
if not db_str:
section = tag.a("[trac]",
title=_("TracIni documentation"),
class_='trac-target-new',
href='https://trac.edgewall.org/wiki/TracIni'
'#trac-section')
raise ConfigurationError(
tag_("Database connection string is empty. Set the %(option)s "
"configuration option in the %(section)s section of "
"trac.ini. Please refer to the %(doc)s for help.",
option=tag.code("database"), section=section,
doc=_doc_db_str()))
try:
scheme, rest = db_str.split(':', 1)
except ValueError:
raise _invalid_db_str(db_str)
if not rest.startswith('/'):
if scheme == 'sqlite' and rest:
# Support for relative and in-memory SQLite connection strings
host = None
path = rest
else:
raise _invalid_db_str(db_str)
else:
if not rest.startswith('//'):
host = None
rest = rest[1:]
elif rest.startswith('///'):
host = None
rest = rest[3:]
else:
rest = rest[2:]
if '/' in rest:
host, rest = rest.split('/', 1)
else:
host = rest
rest = ''
path = None
if host and '@' in host:
user, host = host.split('@', 1)
if ':' in user:
user, password = user.split(':', 1)
else:
password = None
if user:
user = urllib.unquote(user)
if password:
password = unicode_passwd(urllib.unquote(password))
else:
user = password = None
if host and ':' in host:
host, port = host.split(':', 1)
try:
port = int(port)
except ValueError:
raise _invalid_db_str(db_str)
else:
port = None
if not path:
path = '/' + rest
if os.name == 'nt':
# Support local paths containing drive letters on Win32
if len(rest) > 1 and rest[1] == '|':
path = "%s:%s" % (rest[0], rest[2:])
params = {}
if '?' in path:
path, qs = path.split('?', 1)
qs = qs.split('&')
for param in qs:
try:
name, value = param.split('=', 1)
except ValueError:
raise _invalid_db_str(db_str)
value = urllib.unquote(value)
params[name] = value
args = zip(('user', 'password', 'host', 'port', 'path', 'params'),
(user, password, host, port, path, params))
return scheme, {key: value for key, value in args if value}
def _invalid_db_str(db_str):
return ConfigurationError(
tag_("Invalid format %(db_str)s for the database connection string. "
"Please refer to the %(doc)s for help.",
db_str=tag.code(db_str), doc=_doc_db_str()))
def _doc_db_str():
return tag.a(_("documentation"),
title=_("Database Connection Strings documentation"),
class_='trac-target-new',
href='https://trac.edgewall.org/wiki/'
'TracIni#DatabaseConnectionStrings')
| 35.325676 | 78 | 0.577101 |
69a4ea1d91d0b1fc4a1fffa32b24e51725dfe555
| 10,887 |
py
|
Python
|
examples/widgets/forms/askusingform.py
|
offlineJ/idapython
|
30c721083d580d81dd95ea8a3d79306f94c7ba3f
|
[
"BSD-3-Clause"
] | null | null | null |
examples/widgets/forms/askusingform.py
|
offlineJ/idapython
|
30c721083d580d81dd95ea8a3d79306f94c7ba3f
|
[
"BSD-3-Clause"
] | null | null | null |
examples/widgets/forms/askusingform.py
|
offlineJ/idapython
|
30c721083d580d81dd95ea8a3d79306f94c7ba3f
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import print_function
# -----------------------------------------------------------------------
# This is an example illustrating how to use the Form class
# (c) Hex-Rays
#
import ida_kernwin
# --------------------------------------------------------------------------
class busy_form_t(ida_kernwin.Form):
class test_chooser_t(ida_kernwin.Choose):
"""
A simple chooser to be used as an embedded chooser
"""
def __init__(self, title, nb=5, flags=ida_kernwin.Choose.CH_MULTI):
ida_kernwin.Choose.__init__(
self,
title,
[
["Address", 10],
["Name", 30]
],
flags=flags,
embedded=True,
width=30,
height=6)
self.items = [ [str(x), "func_%04d" % x] for x in range(nb + 1) ]
self.icon = 5
def OnGetLine(self, n):
print("getline %d" % n)
return self.items[n]
def OnGetSize(self):
n = len(self.items)
print("getsize -> %d" % n)
return n
def __init__(self):
self.invert = False
F = ida_kernwin.Form
F.__init__(
self,
r"""STARTITEM {id:rNormal}
BUTTON YES* Yeah
BUTTON NO Nope
BUTTON CANCEL Nevermind
Form Test
{FormChangeCb}
This is a string: |+{cStr1}+
This is an address:|+{cAddr1}+
This is some HTML: |+{cHtml1}+
This is a number: |+{cVal1}+
<#Hint1#Enter text :{iStr1}>
<#Hint2#Select color:{iColor1}>
Browse test
<#Select a file to open#Browse to open:{iFileOpen}>
<#Select a file to save#Browse to save:{iFileSave}>
<#Select dir#Browse for dir:{iDir}>
Misc
<##Enter a selector value:{iSegment}>
<##Enter a raw hex :{iRawHex}>
<##Enter a character :{iChar}>
<##Enter an address :{iAddr}>
<##Write a type name :{iType}>
Button test: <##Button1:{iButton1}> <##Button2:{iButton2}>
<##Check boxes##Error output:{rError}> | <##Radio boxes##Green:{rGreen}>
<Normal output:{rNormal}> | <Red:{rRed}>
<Warnings:{rWarnings}>{cGroup1}> | <Blue:{rBlue}>{cGroup2}>
<Embedded chooser:{cEChooser}>
The end!
""", {
'cStr1': F.StringLabel("Hello"),
'cHtml1': F.StringLabel("<span style='color: red'>Is this red?<span>", tp=F.FT_HTML_LABEL),
'cAddr1': F.NumericLabel(0x401000, F.FT_ADDR),
'cVal1' : F.NumericLabel(99, F.FT_HEX),
'iStr1': F.StringInput(),
'iColor1': F.ColorInput(),
'iFileOpen': F.FileInput(open=True),
'iFileSave': F.FileInput(save=True),
'iDir': F.DirInput(),
'iType': F.StringInput(tp=F.FT_TYPE),
'iSegment': F.NumericInput(tp=F.FT_SEG),
'iRawHex': F.NumericInput(tp=F.FT_RAWHEX),
'iAddr': F.NumericInput(tp=F.FT_ADDR),
'iChar': F.NumericInput(tp=F.FT_CHAR),
'iButton1': F.ButtonInput(self.OnButton1),
'iButton2': F.ButtonInput(self.OnButton2),
'cGroup1': F.ChkGroupControl(("rNormal", "rError", "rWarnings")),
'cGroup2': F.RadGroupControl(("rRed", "rGreen", "rBlue")),
'FormChangeCb': F.FormChangeCb(self.OnFormChange),
'cEChooser' : F.EmbeddedChooserControl(busy_form_t.test_chooser_t("E1"))
})
def OnButton1(self, code=0):
print("Button1 pressed")
def OnButton2(self, code=0):
print("Button2 pressed")
def OnFormChange(self, fid):
if fid == self.iButton1.id:
print("Button1 fchg;inv=%s" % self.invert)
self.SetFocusedField(self.rNormal)
self.EnableField(self.rError, self.invert)
self.invert = not self.invert
elif fid == self.iButton2.id:
g1 = self.GetControlValue(self.cGroup1)
g2 = self.GetControlValue(self.cGroup2)
d = self.GetControlValue(self.iDir)
f = self.GetControlValue(self.iFileOpen)
print("cGroup2:%x;Dir=%s;fopen=%s;cGroup1:%x" % (g1, d, f, g2))
elif fid == self.cEChooser.id:
l = self.GetControlValue(self.cEChooser)
print("Chooser: %s" % l)
elif fid in [self.rGreen.id, self.rRed.id, self.rBlue.id]:
color = {
self.rGreen.id : 0x00FF00,
self.rRed.id : 0x0000FF,
self.rBlue.id : 0xFF0000,
}
self.SetControlValue(self.iColor1, color[fid])
elif fid == self.iColor1.id:
print("Color changed: %06x" % self.GetControlValue(self.iColor1))
else:
print(">>fid:%d" % fid)
return 1
@staticmethod
def compile_and_fiddle_with_fields():
f = busy_form_t()
f, args = f.Compile()
print(args[0])
print(args[1:])
f.rNormal.checked = True
f.rWarnings.checked = True
print(hex(f.cGroup1.value))
f.rGreen.selected = True
print(f.cGroup2.value)
print("Title: '%s'" % f.title)
f.Free()
@staticmethod
def test():
f = busy_form_t()
# Compile (in order to populate the controls)
f.Compile()
f.iColor1.value = 0x5bffff
f.iDir.value = os.getcwd()
f.iChar.value = ord("a")
f.rNormal.checked = True
f.rWarnings.checked = True
f.rGreen.selected = True
f.iStr1.value = "Hello"
f.iFileSave.value = "*.*"
f.iFileOpen.value = "*.*"
# Execute the form
ok = f.Execute()
print("r=%d" % ok)
if ok == 1:
print("f.str1=%s" % f.iStr1.value)
print("f.color1=%x" % f.iColor1.value)
print("f.openfile=%s" % f.iFileOpen.value)
print("f.savefile=%s" % f.iFileSave.value)
print("f.dir=%s" % f.iDir.value)
print("f.type=%s" % f.iType.value)
print("f.seg=%s" % f.iSegment.value)
print("f.rawhex=%x" % f.iRawHex.value)
print("f.char=%x" % f.iChar.value)
print("f.addr=%x" % f.iAddr.value)
print("f.cGroup1=%x" % f.cGroup1.value)
print("f.cGroup2=%x" % f.cGroup2.value)
sel = f.cEChooser.selection
if sel is None:
print("No selection")
else:
print("Selection: %s" % sel)
# Dispose the form
f.Free()
# --------------------------------------------------------------------------
class multiline_text_t(ida_kernwin.Form):
"""Simple Form to test multilinetext"""
def __init__(self):
F = ida_kernwin.Form
F.__init__(self, r"""STARTITEM 0
BUTTON YES* Yeah
BUTTON NO Nope
BUTTON CANCEL NONE
Form Test
{FormChangeCb}
<Multilinetext:{txtMultiLineText}>
""", {
'txtMultiLineText': F.MultiLineTextControl(text="Hello"),
'FormChangeCb': F.FormChangeCb(self.OnFormChange),
})
def OnFormChange(self, fid):
if fid == self.txtMultiLineText.id:
pass
elif fid == -2:
ti = self.GetControlValue(self.txtMultiLineText)
print("ti.text = %s" % ti.text)
else:
print(">>fid:%d" % fid)
return 1
@staticmethod
def test(execute=True):
f = multiline_text_t()
f, args = f.Compile()
if execute:
ok = f.Execute()
else:
print(args[0])
print(args[1:])
ok = 0
if ok == 1:
assert f.txtMultiLineText.text == f.txtMultiLineText.value
print(f.txtMultiLineText.text)
f.Free()
# --------------------------------------------------------------------------
class multiline_text_and_dropdowns_t(ida_kernwin.Form):
"""Simple Form to test multilinetext and combo box controls"""
def __init__(self):
self.__n = 0
F = ida_kernwin.Form
F.__init__(self,
r"""BUTTON YES* Yeah
BUTTON NO Nope
BUTTON CANCEL NONE
Dropdown list test
{FormChangeCb}
<Dropdown list (readonly):{cbReadonly}> <Add element:{iButtonAddelement}> <Set index:{iButtonSetIndex}>
<Dropdown list (editable):{cbEditable}> <Set string:{iButtonSetString}>
""", {
'FormChangeCb': F.FormChangeCb(self.OnFormChange),
'cbReadonly': F.DropdownListControl(
items=["red", "green", "blue"],
readonly=True,
selval=1),
'cbEditable': F.DropdownListControl(
items=["1MB", "2MB", "3MB", "4MB"],
readonly=False,
selval="4MB"),
'iButtonAddelement': F.ButtonInput(self.OnButtonNop),
'iButtonSetIndex': F.ButtonInput(self.OnButtonNop),
'iButtonSetString': F.ButtonInput(self.OnButtonNop),
})
def OnButtonNop(self, code=0):
"""Do nothing, we will handle events in the form callback"""
pass
def OnFormChange(self, fid):
if fid == self.iButtonSetString.id:
s = ida_kernwin.ask_str("none", 0, "Enter value")
if s:
self.SetControlValue(self.cbEditable, s)
elif fid == self.iButtonSetIndex.id:
s = ida_kernwin.ask_str("1", 0, "Enter index value:")
if s:
try:
i = int(s)
except:
i = 0
self.SetControlValue(self.cbReadonly, i)
elif fid == self.iButtonAddelement.id:
# add a value to the string list
self.__n += 1
self.cbReadonly.add("some text #%d" % self.__n)
# Refresh the control
self.RefreshField(self.cbReadonly)
elif fid == -2:
s = self.GetControlValue(self.cbEditable)
print("user entered: %s" % s)
sel_idx = self.GetControlValue(self.cbReadonly)
return 1
@staticmethod
def test(execute=True):
f = multiline_text_and_dropdowns_t()
f, args = f.Compile()
if execute:
ok = f.Execute()
else:
print(args[0])
print(args[1:])
ok = 0
if ok == 1:
print("Editable: %s" % f.cbEditable.value)
print("Readonly: %s" % f.cbReadonly.value)
f.Free()
NON_MODAL_INSTANCE = None
@staticmethod
def test_non_modal():
if multiline_text_and_dropdowns_t.NON_MODAL_INSTANCE is None:
f = multiline_text_and_dropdowns_t()
f.modal = False
f.openform_flags = ida_kernwin.PluginForm.FORM_TAB
f, _ = f.Compile()
multiline_text_and_dropdowns_t.NON_MODAL_INSTANCE = f
multiline_text_and_dropdowns_t.NON_MODAL_INSTANCE.Open()
# --------------------------------------------------------------------------
busy_form_t.test()
| 33.601852 | 103 | 0.528704 |
730faabbb1abb6ea8e502046ced8db4f57d19f01
| 3,525 |
py
|
Python
|
mars/learn/glm/tests/test_logistic.py
|
wjsi/mars
|
a69fb19edfe748d4393b90ff2c4941a76c084596
|
[
"Apache-2.0"
] | 1 |
2022-02-02T03:03:48.000Z
|
2022-02-02T03:03:48.000Z
|
mars/learn/glm/tests/test_logistic.py
|
wjsi/mars
|
a69fb19edfe748d4393b90ff2c4941a76c084596
|
[
"Apache-2.0"
] | null | null | null |
mars/learn/glm/tests/test_logistic.py
|
wjsi/mars
|
a69fb19edfe748d4393b90ff2c4941a76c084596
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import pytest
from sklearn.datasets import load_iris
from .._logistic import _check_solver, _check_multi_class, LogisticRegression
# general data load
X, y = load_iris(return_X_y=True)
def test_check_solver(setup):
all_solvers = ["SGD"]
for solver in all_solvers:
checked_solver = _check_solver(solver)
assert checked_solver == solver
invalid_solver = "Newton"
error_msg = re.escape(
"Logistic Regression supports only solvers in %s, "
"got %s." % (all_solvers, invalid_solver)
)
with pytest.raises(ValueError, match=error_msg):
_check_solver(invalid_solver)
def test_check_multi_class(setup):
all_multi_class = ["auto", "multinomial", "ovr"]
solver = "SGD"
for multi_class in all_multi_class:
checked_multi_class = _check_multi_class(multi_class, solver, 2)
assert checked_multi_class == "multinomial"
error_msg = re.escape(
"Solver %s does not support "
"an ovr backend with number of classes "
"larger than 2." % solver
)
with pytest.raises(ValueError, match=error_msg):
_check_multi_class("ovr", solver, 3)
invalid_multi_class = "multiovr"
error_msg = re.escape(
"multi_class should be 'multinomial', "
"'ovr' or 'auto'. Got %s." % invalid_multi_class
)
with pytest.raises(ValueError, match=error_msg):
_check_multi_class(invalid_multi_class, solver, 3)
def test_invalid_penalty(setup):
error_msg = re.escape("Only support L2 penalty.")
with pytest.raises(NotImplementedError, match=error_msg):
model = LogisticRegression(penalty="l1")
model.fit(X, y)
def test_invalid_C(setup):
invalid_C = -1
error_msg = re.escape("Penalty term must be positive; got (C=%r)" % invalid_C)
with pytest.raises(ValueError, match=error_msg):
model = LogisticRegression(C=invalid_C)
model.fit(X, y)
def test_invalid_max_iter(setup):
invalid_max_iter = -1
error_msg = re.escape(
"Maximum number of iteration must be positive;"
" got (max_iter=%r)" % invalid_max_iter
)
with pytest.raises(ValueError, match=error_msg):
model = LogisticRegression(max_iter=invalid_max_iter)
model.fit(X, y)
@pytest.mark.parametrize("fit_intercept", [True, False])
def test_logistic_regression_no_converge(setup, fit_intercept):
# quite slow in local tests, so set max_iter=1
# suggested max_iter >= 10
model = LogisticRegression(fit_intercept=fit_intercept, max_iter=1)
model.fit(X, y)
model.predict(X)
model.score(X, y)
model.predict_proba(X)
model.predict_log_proba(X)
error_msg = re.escape(
"X has %d features per sample; expecting %d"
% (X.shape[1], model.coef_.shape[1] - 1)
)
model.coef_ = model.coef_[:, :-1]
with pytest.raises(ValueError, match=error_msg):
model.predict(X)
| 30.652174 | 82 | 0.691348 |
d01663890ba31661007b2c6b26d6b88d9acc62cc
| 4,592 |
py
|
Python
|
PythonAPI/pycocotoolse/mask.py
|
linhandev/cocoapie
|
f26e17a1e1d91e04f0d1a2b55d11248147f634fa
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
PythonAPI/pycocotoolse/mask.py
|
linhandev/cocoapie
|
f26e17a1e1d91e04f0d1a2b55d11248147f634fa
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
PythonAPI/pycocotoolse/mask.py
|
linhandev/cocoapie
|
f26e17a1e1d91e04f0d1a2b55d11248147f634fa
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
__author__ = 'tsungyi'
import pycocotoolse._mask as _mask
# Interface for manipulating masks stored in RLE format.
#
# RLE is a simple yet efficient format for storing binary masks. RLE
# first divides a vector (or vectorized image) into a series of piecewise
# constant regions and then for each piece simply stores the length of
# that piece. For example, given M=[0 0 1 1 1 0 1] the RLE counts would
# be [2 3 1 1], or for M=[1 1 1 1 1 1 0] the counts would be [0 6 1]
# (note that the odd counts are always the numbers of zeros). Instead of
# storing the counts directly, additional compression is achieved with a
# variable bitrate representation based on a common scheme called LEB128.
#
# Compression is greatest given large piecewise constant regions.
# Specifically, the size of the RLE is proportional to the number of
# *boundaries* in M (or for an image the number of boundaries in the y
# direction). Assuming fairly simple shapes, the RLE representation is
# O(sqrt(n)) where n is number of pixels in the object. Hence space usage
# is substantially lower, especially for large simple objects (large n).
#
# Many common operations on masks can be computed directly using the RLE
# (without need for decoding). This includes computations such as area,
# union, intersection, etc. All of these operations are linear in the
# size of the RLE, in other words they are O(sqrt(n)) where n is the area
# of the object. Computing these operations on the original mask is O(n).
# Thus, using the RLE can result in substantial computational savings.
#
# The following API functions are defined:
# encode - Encode binary masks using RLE.
# decode - Decode binary masks encoded via RLE.
# merge - Compute union or intersection of encoded masks.
# iou - Compute intersection over union between masks.
# area - Compute area of encoded masks.
# toBbox - Get bounding boxes surrounding encoded masks.
# frPyObjects - Convert polygon, bbox, and uncompressed RLE to encoded RLE mask.
#
# Usage:
# Rs = encode( masks )
# masks = decode( Rs )
# R = merge( Rs, intersect=false )
# o = iou( dt, gt, iscrowd )
# a = area( Rs )
# bbs = toBbox( Rs )
# Rs = frPyObjects( [pyObjects], h, w )
#
# In the API the following formats are used:
# Rs - [dict] Run-length encoding of binary masks
# R - dict Run-length encoding of binary mask
# masks - [hxwxn] Binary mask(s) (must have type np.ndarray(dtype=uint8) in column-major order)
# iscrowd - [nx1] list of np.ndarray. 1 indicates corresponding gt image has crowd region to ignore
# bbs - [nx4] Bounding box(es) stored as [x y w h]
# poly - Polygon stored as [[x1 y1 x2 y2...],[x1 y1 ...],...] (2D list)
# dt,gt - May be either bounding boxes or encoded masks
# Both poly and bbs are 0-indexed (bbox=[0 0 1 1] encloses first pixel).
#
# Finally, a note about the intersection over union (iou) computation.
# The standard iou of a ground truth (gt) and detected (dt) object is
# iou(gt,dt) = area(intersect(gt,dt)) / area(union(gt,dt))
# For "crowd" regions, we use a modified criteria. If a gt object is
# marked as "iscrowd", we allow a dt to match any subregion of the gt.
# Choosing gt' in the crowd gt that best matches the dt can be done using
# gt'=intersect(dt,gt). Since by definition union(gt',dt)=dt, computing
# iou(gt,dt,iscrowd) = iou(gt',dt) = area(intersect(gt,dt)) / area(dt)
# For crowd gt regions we use this modified criteria above for the iou.
#
# To compile run "python setup.py build_ext --inplace"
# Please do not contact us for help with compiling.
#
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2015.
# Licensed under the Simplified BSD License [see coco/license.txt]
iou = _mask.iou
merge = _mask.merge
frPyObjects = _mask.frPyObjects
def encode(bimask):
if len(bimask.shape) == 3:
return _mask.encode(bimask)
elif len(bimask.shape) == 2:
h, w = bimask.shape
return _mask.encode(bimask.reshape((h, w, 1), order='F'))[0]
def decode(rleObjs):
if type(rleObjs) == list:
return _mask.decode(rleObjs)
else:
return _mask.decode([rleObjs])[:,:,0]
def area(rleObjs):
if type(rleObjs) == list:
return _mask.area(rleObjs)
else:
return _mask.area([rleObjs])[0]
def toBbox(rleObjs):
if type(rleObjs) == list:
return _mask.toBbox(rleObjs)
else:
return _mask.toBbox([rleObjs])[0]
| 44.582524 | 100 | 0.690113 |
7db5cff998aadf7bc38969ae57c8c8dc633e5b72
| 2,303 |
py
|
Python
|
examples/torch/trpo_pendulum_ray_sampler.py
|
bainro/garage
|
c5afbb19524792d9bbad9b9741f45e1d48ddca3d
|
[
"MIT"
] | null | null | null |
examples/torch/trpo_pendulum_ray_sampler.py
|
bainro/garage
|
c5afbb19524792d9bbad9b9741f45e1d48ddca3d
|
[
"MIT"
] | null | null | null |
examples/torch/trpo_pendulum_ray_sampler.py
|
bainro/garage
|
c5afbb19524792d9bbad9b9741f45e1d48ddca3d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""This is an example to train a task with TRPO algorithm (PyTorch).
Uses Ray sampler instead of OnPolicyVectorizedSampler.
Here it runs InvertedDoublePendulum-v2 environment with 100 iterations.
"""
import numpy as np
import ray
import torch
from garage import wrap_experiment
from garage.experiment import deterministic, LocalRunner
from garage.sampler import RaySampler
from garage.tf.envs import TfEnv
from garage.torch.algos import TRPO
from garage.torch.policies import GaussianMLPPolicy
from garage.torch.value_functions import GaussianMLPValueFunction
@wrap_experiment(snapshot_mode='none')
def trpo_ray_pendulum(ctxt=None, seed=1):
"""Set up environment and algorithm and run the task.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
# Since this is an example, we are running ray in a reduced state.
# One can comment this line out in order to run ray at full capacity
ray.init(memory=52428800,
object_store_memory=78643200,
ignore_reinit_error=True,
log_to_driver=False,
include_webui=False)
deterministic.set_seed(seed)
env = TfEnv(env_name='InvertedDoublePendulum-v2')
runner = LocalRunner(ctxt)
policy = GaussianMLPPolicy(env.spec,
hidden_sizes=[32, 32],
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
value_function = GaussianMLPValueFunction(env_spec=env.spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
algo = TRPO(env_spec=env.spec,
policy=policy,
value_function=value_function,
max_path_length=100,
discount=0.99,
center_adv=False)
runner.setup(algo, env, sampler_cls=RaySampler)
runner.train(n_epochs=100, batch_size=1024)
s = np.random.randint(0, 1000)
trpo_ray_pendulum(seed=s)
| 34.893939 | 77 | 0.651324 |
d20f142eb22c87e18e134e1f39a3e1cfc7f42fef
| 5,672 |
py
|
Python
|
tests/sentry/tasks/test_beacon.py
|
AlexWayfer/sentry
|
ef935cda2b2e960bd602fda590540882d1b0712d
|
[
"BSD-3-Clause"
] | 4 |
2019-05-27T13:55:07.000Z
|
2021-03-30T07:05:09.000Z
|
tests/sentry/tasks/test_beacon.py
|
AlexWayfer/sentry
|
ef935cda2b2e960bd602fda590540882d1b0712d
|
[
"BSD-3-Clause"
] | 196 |
2019-06-10T08:34:10.000Z
|
2022-02-22T01:26:13.000Z
|
tests/sentry/tasks/test_beacon.py
|
AlexWayfer/sentry
|
ef935cda2b2e960bd602fda590540882d1b0712d
|
[
"BSD-3-Clause"
] | 1 |
2020-08-10T07:55:40.000Z
|
2020-08-10T07:55:40.000Z
|
from __future__ import absolute_import, print_function
import json
import responses
import sentry
from mock import patch
from uuid import uuid4
from sentry import options
from sentry.models import Broadcast
from sentry.testutils import TestCase
from sentry.tasks.beacon import BEACON_URL, send_beacon
class SendBeaconTest(TestCase):
@patch('sentry.tasks.beacon.get_all_package_versions')
@patch('sentry.tasks.beacon.safe_urlopen')
@patch('sentry.tasks.beacon.safe_urlread')
@responses.activate
def test_simple(self, safe_urlread, safe_urlopen, mock_get_all_package_versions):
mock_get_all_package_versions.return_value = {'foo': '1.0'}
safe_urlread.return_value = json.dumps({
'notices': [],
'version': {
'stable': '1.0.0'
},
})
assert options.set('system.admin-email', '[email protected]')
assert options.set('beacon.anonymous', False)
send_beacon()
install_id = options.get('sentry:install-id')
assert install_id and len(install_id) == 40
safe_urlopen.assert_called_once_with(
BEACON_URL,
json={
'install_id': install_id,
'version': sentry.get_version(),
'docker': sentry.is_docker(),
'data': {
'organizations': 1,
'users': 0,
'projects': 1,
'teams': 1,
'events.24h': 0,
},
'anonymous': False,
'admin_email': '[email protected]',
'packages': mock_get_all_package_versions.return_value,
},
timeout=5
)
safe_urlread.assert_called_once_with(safe_urlopen.return_value)
assert options.get('sentry:latest_version') == '1.0.0'
@patch('sentry.tasks.beacon.get_all_package_versions')
@patch('sentry.tasks.beacon.safe_urlopen')
@patch('sentry.tasks.beacon.safe_urlread')
@responses.activate
def test_anonymous(self, safe_urlread, safe_urlopen, mock_get_all_package_versions):
mock_get_all_package_versions.return_value = {'foo': '1.0'}
safe_urlread.return_value = json.dumps({
'notices': [],
'version': {
'stable': '1.0.0'
},
})
assert options.set('system.admin-email', '[email protected]')
assert options.set('beacon.anonymous', True)
send_beacon()
install_id = options.get('sentry:install-id')
assert install_id and len(install_id) == 40
safe_urlopen.assert_called_once_with(
BEACON_URL,
json={
'install_id': install_id,
'version': sentry.get_version(),
'docker': sentry.is_docker(),
'data': {
'organizations': 1,
'users': 0,
'projects': 1,
'teams': 1,
'events.24h': 0,
},
'anonymous': True,
'packages': mock_get_all_package_versions.return_value,
},
timeout=5
)
safe_urlread.assert_called_once_with(safe_urlopen.return_value)
assert options.get('sentry:latest_version') == '1.0.0'
@patch('sentry.tasks.beacon.get_all_package_versions')
@patch('sentry.tasks.beacon.safe_urlopen')
@patch('sentry.tasks.beacon.safe_urlread')
@responses.activate
def test_with_broadcasts(self, safe_urlread, safe_urlopen, mock_get_all_package_versions):
broadcast_id = uuid4().hex
mock_get_all_package_versions.return_value = {}
safe_urlread.return_value = json.dumps(
{
'notices': [
{
'id': broadcast_id,
'title': 'Hello!',
'message': 'Hello world',
'active': True,
}
],
'version': {
'stable': '1.0.0'
},
}
)
with self.settings():
send_beacon()
broadcast = Broadcast.objects.get(upstream_id=broadcast_id)
assert broadcast.title == 'Hello!'
assert broadcast.message == 'Hello world'
assert broadcast.is_active
safe_urlread.return_value = json.dumps({
'notices': [],
'version': {
'stable': '1.0.0'
},
})
with self.settings():
send_beacon()
# test explicit disable
broadcast = Broadcast.objects.get(upstream_id=broadcast_id)
assert not broadcast.is_active
@patch('sentry.tasks.beacon.get_all_package_versions')
@patch('sentry.tasks.beacon.safe_urlopen')
@patch('sentry.tasks.beacon.safe_urlread')
@responses.activate
def test_disabled(self, safe_urlread, safe_urlopen, mock_get_all_package_versions):
mock_get_all_package_versions.return_value = {'foo': '1.0'}
with self.settings(SENTRY_BEACON=False):
send_beacon()
assert not safe_urlopen.mock_calls
@patch('sentry.tasks.beacon.get_all_package_versions')
@patch('sentry.tasks.beacon.safe_urlopen')
@patch('sentry.tasks.beacon.safe_urlread')
@responses.activate
def test_debug(self, safe_urlread, safe_urlopen, mock_get_all_package_versions):
mock_get_all_package_versions.return_value = {'foo': '1.0'}
with self.settings(DEBUG=True):
send_beacon()
assert not safe_urlopen.mock_calls
| 32.976744 | 94 | 0.575282 |
62c3eca07defd025aeaca970e522c039d445fe05
| 1,699 |
py
|
Python
|
test/functional/feature_abortnode.py
|
XziimP/bitcoinV
|
38980aff8a8be63b338bbe83ea9896107104fc60
|
[
"MIT"
] | 128 |
2015-01-20T22:21:27.000Z
|
2021-09-17T04:40:56.000Z
|
test/functional/feature_abortnode.py
|
XSWLO/bitcoin
|
b931f61b9ab098ea4ea8fbe4cbf0b03c566c3f63
|
[
"MIT"
] | 162 |
2015-02-23T00:45:54.000Z
|
2021-11-10T09:51:47.000Z
|
test/functional/feature_abortnode.py
|
XSWLO/bitcoin
|
b931f61b9ab098ea4ea8fbe4cbf0b03c566c3f63
|
[
"MIT"
] | 168 |
2015-01-13T13:54:38.000Z
|
2022-01-24T23:04:06.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test bitcoind aborts if can't disconnect a block.
- Start a single node and generate 3 blocks.
- Delete the undo data.
- Mine a fork that requires disconnecting the tip.
- Verify that bitcoind AbortNode's.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import wait_until, get_datadir_path, connect_nodes
import os
class AbortNodeTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def setup_network(self):
self.setup_nodes()
# We'll connect the nodes later
def run_test(self):
self.nodes[0].generate(3)
datadir = get_datadir_path(self.options.tmpdir, 0)
# Deleting the undo file will result in reorg failure
os.unlink(os.path.join(datadir, 'regtest', 'blocks', 'rev00000.dat'))
# Connecting to a node with a more work chain will trigger a reorg
# attempt.
self.nodes[1].generate(3)
with self.nodes[0].assert_debug_log(["Failed to disconnect block"]):
connect_nodes(self.nodes[0], 1)
self.nodes[1].generate(1)
# Check that node0 aborted
self.log.info("Waiting for crash")
wait_until(lambda: self.nodes[0].is_node_stopped(), timeout=60)
self.log.info("Node crashed - now verifying restart fails")
self.nodes[0].assert_start_raises_init_error()
if __name__ == '__main__':
AbortNodeTest().main()
| 34.673469 | 77 | 0.689818 |
4546f9e19e527219b4a088509c1a8b00ae9278c4
| 1,425 |
py
|
Python
|
pineboolib/qt3_widgets/qtextedit.py
|
deavid/pineboo
|
acc96ab6d5b8bb182990af6dea4bf0986af15549
|
[
"MIT"
] | 2 |
2015-09-19T16:54:49.000Z
|
2016-09-12T08:06:29.000Z
|
pineboolib/qt3_widgets/qtextedit.py
|
deavid/pineboo
|
acc96ab6d5b8bb182990af6dea4bf0986af15549
|
[
"MIT"
] | 1 |
2017-08-14T17:07:14.000Z
|
2017-08-15T00:22:47.000Z
|
pineboolib/qt3_widgets/qtextedit.py
|
deavid/pineboo
|
acc96ab6d5b8bb182990af6dea4bf0986af15549
|
[
"MIT"
] | 9 |
2015-01-15T18:15:42.000Z
|
2019-05-05T18:53:00.000Z
|
# -*- coding: utf-8 -*-
from PyQt5 import QtWidgets # type: ignore
from pineboolib.core import decorators
from typing import Any
class QTextEdit(QtWidgets.QTextEdit):
LogText = 0
RichText = 1
def __init__(self, parent=None) -> None:
super(QTextEdit, self).__init__(parent)
self.LogText = 0
def setText(self, text) -> None:
super(QTextEdit, self).setText(text)
# if not project.DGI.localDesktop():
# project.DGI._par.addQueque("%s_setText" % self._parent.objectName(), text)
def getText(self) -> Any:
return super(QTextEdit, self).toPlainText()
@decorators.NotImplementedWarn
def textFormat(self):
return
@decorators.Incomplete
def setTextFormat(self, value):
if value == 0: # LogText
self.setReadOnly(True)
self.setAcceptRichText(False)
elif value == 1:
self.setReadOnly(False)
self.setAcceptRichText(True)
def setShown(self, value) -> None:
if value:
super().show()
else:
super().hide()
def getPlainText(self) -> Any:
return super(QTextEdit, self).toPlainText()
def setAutoFormatting(self, value) -> None:
value = QtWidgets.QTextEdit.AutoAll
super(QTextEdit, self).setAutoFormatting(value)
text = property(getText, setText)
PlainText = property(getPlainText, setText)
| 27.941176 | 87 | 0.625965 |
81c391bbd200800c2bc8385abcc9be999202b28d
| 7,126 |
py
|
Python
|
ugali/analysis/imf.py
|
mcnanna/ugali
|
2572915b82af5b25e8762013e6d5baabdaa24b21
|
[
"MIT"
] | null | null | null |
ugali/analysis/imf.py
|
mcnanna/ugali
|
2572915b82af5b25e8762013e6d5baabdaa24b21
|
[
"MIT"
] | null | null | null |
ugali/analysis/imf.py
|
mcnanna/ugali
|
2572915b82af5b25e8762013e6d5baabdaa24b21
|
[
"MIT"
] | 1 |
2019-07-18T16:42:27.000Z
|
2019-07-18T16:42:27.000Z
|
"""
Classes to handle initial mass functions (IMFs).
https://github.com/keflavich/imf
"""
from abc import abstractmethod
import numpy as np
import scipy.interpolate
from ugali.utils.logger import logger
############################################################
class IMF(object):
"""
Base class for initial mass functions (IMFs).
"""
def __call__(self, mass, **kwargs):
""" Call the pdf of the mass function """
return self.pdf(mass,**kwargs)
def integrate(self, mass_min, mass_max, log_mode=True, weight=False, steps=1e4):
""" Numerical Riemannn integral of the IMF (stupid simple).
Parameters:
-----------
mass_min: minimum mass bound for integration (solar masses)
mass_max: maximum mass bound for integration (solar masses)
log_mode[True]: use logarithmic steps in stellar mass as oppose to linear
weight[False]: weight the integral by stellar mass
steps: number of numerical integration steps
Returns:
--------
result of integral
"""
if log_mode:
d_log_mass = (np.log10(mass_max) - np.log10(mass_min)) / float(steps)
log_mass = np.linspace(np.log10(mass_min), np.log10(mass_max), steps)
mass = 10.**log_mass
if weight:
return np.sum(mass * d_log_mass * self.pdf(mass, log_mode=True))
else:
return np.sum(d_log_mass * self.pdf(mass, log_mode=True))
else:
d_mass = (mass_max - mass_min) / float(steps)
mass = np.linspace(mass_min, mass_max, steps)
if weight:
return np.sum(mass * d_mass * self.pdf(mass, log_mode=False))
else:
return np.sum(d_mass * self.pdf(mass, log_mode=False))
def sample(self, n, mass_min=0.1, mass_max=10., steps=10000, seed=None):
"""
Sample initial mass values between mass_min and mass_max,
following the IMF distribution.
ADW: Should this be `sample` or `simulate`?
Parameters:
-----------
n : number of samples to draw
mass_min : minimum mass to sample from
mass_max : maximum mass to sample from
steps : number of steps for isochrone sampling
seed : random seed (passed to np.random.seed)
Returns:
--------
mass : array of randomly sampled mass values
"""
if seed is not None: np.random.seed(seed)
d_mass = (mass_max - mass_min) / float(steps)
mass = np.linspace(mass_min, mass_max, steps)
cdf = np.insert(np.cumsum(d_mass * self.pdf(mass[1:], log_mode=False)), 0, 0.)
cdf = cdf / cdf[-1]
f = scipy.interpolate.interp1d(cdf, mass)
return f(np.random.uniform(size=n))
@abstractmethod
def pdf(cls, mass, **kwargs): pass
class Chabrier2003(IMF):
""" Initial mass function from Chabrier (2003):
"Galactic Stellar and Substellar Initial Mass Function"
Chabrier PASP 115:763-796 (2003)
https://arxiv.org/abs/astro-ph/0304382
"""
@classmethod
def pdf(cls, mass, log_mode=True):
""" PDF for the Chabrier IMF.
The functional form and coefficients are described in Eq 17
and Tab 1 of Chabrier (2003):
m <= 1 Msun: E(log m) = A1*exp(-(log m - log m_c)^2 / 2 sigma^2)
m > 1 Msun: E(log m) = A2 * m^-x
A1 = 1.58 : normalization [ log(Msun)^-1 pc^-3]
m_c = 0.079 [Msun]
sigma = 0.69
A2 = 4.43e-2
x = 1.3
We redefine a = A1, A2 = a * b;
The normalization is set so that the IMF integrates to 1 over
the mass range from 0.1 Msun to 100 Msun
Parameters:
-----------
mass: stellar mass (solar masses)
log_mode[True]: return number per logarithmic mass range, i.e., dN/dlog(M)
Returns:
--------
number per (linear or log) mass bin, i.e., dN/dM or dN/dlog(M) where mass unit is solar masses
"""
log_mass = np.log10(mass)
# Constants from Chabrier 2003
m_c = 0.079
sigma = 0.69
x = 1.3
# This value is set to normalize integral from 0.1 to 100 Msun
a=1.31357499301
# This value is required so that the two components match at 1 Msun
b = 0.279087531047
dn_dlogm = ((log_mass <= 0) * a * np.exp(-(log_mass - np.log10(m_c))**2 / (2 * (sigma**2))))
dn_dlogm += ((log_mass > 0) * a * b * mass**(-x))
if log_mode:
# Number per logarithmic mass range, i.e., dN/dlog(M)
return dn_dlogm
else:
# Number per linear mass range, i.e., dN/dM
return dn_dlogm / (mass * np.log(10))
class Kroupa2001(IMF):
""" IMF from Kroupa (2001):
"On the variation of the initial mass function"
MNRAS 322:231 (2001)
https://arxiv.org/abs/astro-ph/0009005
"""
@classmethod
def pdf(cls, mass, log_mode=True):
""" PDF for the Kroupa IMF.
Normalization is set over the mass range from 0.1 Msun to 100 Msun
"""
log_mass = np.log10(mass)
# From Eq 2
mb = mbreak = [0.08, 0.5] # Msun
a = alpha = [0.3, 1.3, 2.3] # alpha
# Normalization set from 0.1 -- 100 Msun
norm = 0.27947743949440446
b = 1./norm
c = b * mbreak[0]**(alpha[1]-alpha[0])
d = c * mbreak[1]**(alpha[2]-alpha[1])
dn_dm = b * (mass < 0.08) * mass**(-alpha[0])
dn_dm += c * (0.08 <= mass) * (mass < 0.5) * mass**(-alpha[1])
dn_dm += d * (0.5 <= mass) * mass**(-alpha[2])
if log_mode:
# Number per logarithmic mass range, i.e., dN/dlog(M)
return dn_dm * (mass * np.log(10))
else:
# Number per linear mass range, i.e., dN/dM
return dn_dm
class Salpeter1955(IMF):
""" IMF from Salpeter (1955):
"The Luminosity Function and Stellar Evolution"
ApJ 121, 161S (1955)
http://adsabs.harvard.edu/abs/1955ApJ...121..161S
"""
@classmethod
def pdf(cls, mass, log_mode=True):
""" PDF for the Salpeter IMF.
Value of 'a' is set to normalize the IMF to 1 between 0.1 and 100 Msun
"""
alpha = 2.35
a = 0.060285569480482866
dn_dm = a * mass**(-alpha)
if log_mode:
# Number per logarithmic mass range, i.e., dN/dlog(M)
return dn_dm * (mass * np.log(10))
else:
# Number per linear mass range, i.e., dN/dM
return dn_dm
def factory(name, **kwargs):
from ugali.utils.factory import factory
return factory(name, module=__name__, **kwargs)
imfFactory = factory
############################################################
def chabrierIMF(mass, log_mode=True):
""" Backward compatible wrapper around Chabrier2003.pdf """
return Chabrier2003.pdf(mass,log_mode=log_mode)
############################################################
| 32.244344 | 102 | 0.550379 |
0a6e0859c7ea4d562e9809928c2a4726c91187fe
| 8,900 |
py
|
Python
|
cnn/train_search.py
|
yuezuegu/darts
|
21af791837060b9e3372301c23cb94f74f56dbf1
|
[
"Apache-2.0"
] | null | null | null |
cnn/train_search.py
|
yuezuegu/darts
|
21af791837060b9e3372301c23cb94f74f56dbf1
|
[
"Apache-2.0"
] | null | null | null |
cnn/train_search.py
|
yuezuegu/darts
|
21af791837060b9e3372301c23cb94f74f56dbf1
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
import time
import glob
import numpy as np
import torch
import utils
import logging
import argparse
import torch.nn as nn
import torch.utils
import torch.nn.functional as F
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from model_search import Network
from architect import Architect
CIFAR_CLASSES = 10
def main():
parser = argparse.ArgumentParser("cifar")
parser.add_argument('--data', type=str, default='../data', help='location of the data corpus')
parser.add_argument('--batch_size', type=int, default=64, help='batch size')
parser.add_argument('--learning_rate', type=float, default=0.025, help='init learning rate')
parser.add_argument('--learning_rate_min', type=float, default=0.001, help='min learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=3e-4, help='weight decay')
parser.add_argument('--report_freq', type=float, default=50, help='report frequency')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--epochs', type=int, default=50, help='num of training epochs')
parser.add_argument('--init_channels', type=int, default=16, help='num of init channels')
parser.add_argument('--layers', type=int, default=8, help='total number of layers')
parser.add_argument('--model_path', type=str, default='saved_models', help='path to save the model')
parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
parser.add_argument('--cutout_length', type=int, default=16, help='cutout length')
parser.add_argument('--drop_path_prob', type=float, default=0.3, help='drop path probability')
parser.add_argument('--save', type=str, default='EXP', help='experiment name')
parser.add_argument('--seed', type=int, default=2, help='random seed')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping')
parser.add_argument('--train_portion', type=float, default=0.9, help='portion of training data')
parser.add_argument('--unrolled', action='store_true', default=True, help='use one-step unrolled validation loss')
parser.add_argument('--arch_learning_rate', type=float, default=3e-4, help='learning rate for arch encoding')
parser.add_argument('--arch_weight_decay', type=float, default=1e-3, help='weight decay for arch encoding')
parser.add_argument('--init_tau', type=float, default=5., help='Initial temperature value for Gumbel softmax')
parser.add_argument('--tau_anneal_rate', type=float, default=0.956, help='Exponential anneal rate for temperature value for Gumbel softmax')
args = parser.parse_args()
os.system("mkdir -p experiments")
args.save = 'experiments/search-{}-{}'.format(args.save, time.strftime("%Y%m%d-%H%M%S"))
utils.create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
if torch.cuda.is_available():
logging.info('gpu device available')
logging.info('gpu device = %d' % args.gpu)
device = torch.device('cuda:0')
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
cudnn.enabled=True
use_cuda = True
else:
device = torch.device('cpu')
logging.info('using cpu')
use_cuda = False
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if use_cuda:
torch.cuda.manual_seed(args.seed)
logging.info("args = %s", args)
criterion = nn.CrossEntropyLoss()
if use_cuda:
criterion = criterion.cuda()
model = Network(args.init_channels, CIFAR_CLASSES, args.layers, criterion, args.init_tau, use_cuda)
if use_cuda:
model = model.cuda()
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
weight_optimizer = torch.optim.SGD(
model.parameters(),
args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay)
alpha_optimizer = torch.optim.Adam(
model.arch_parameters(),
lr=args.arch_learning_rate,
betas=(0.5, 0.999),
weight_decay=args.arch_weight_decay)
train_transform, valid_transform = utils._data_transforms_cifar10(args)
train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform)
num_train = len(train_data)
indices = list(range(num_train))
split = int(np.floor(args.train_portion * num_train))
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]),
pin_memory=True, num_workers=2)
valid_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]),
pin_memory=True, num_workers=2)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
weight_optimizer, float(args.epochs), eta_min=args.learning_rate_min)
tau = args.init_tau
architect = Architect(model, args, use_cuda)
for epoch in range(args.epochs):
print(F.softmax(model.alphas_normal, dim=-1))
print(F.softmax(model.alphas_channels, dim=-1))
lr = scheduler.get_last_lr()[0]
logging.info('epoch %d lr %e', epoch, lr)
# training
train_acc, train_obj = train_weights(train_queue, model, criterion, weight_optimizer, epoch_early_stop=0.2, args=args)
logging.info('train_acc %f', train_acc)
train_acc, train_obj = train_alphas(train_queue, model, architect, criterion, alpha_optimizer, tau, epoch_early_stop=0.2, args=args)
logging.info('train_acc %f', train_acc)
# validation
valid_acc, valid_obj = infer(valid_queue, model, criterion, args)
logging.info('valid_acc %f', valid_acc)
# update lr
scheduler.step()
model.update_tau(args.tau_anneal_rate)
utils.save(model, os.path.join(args.save, 'weights.pt'))
def train_weights(train_queue, model, criterion, weight_optimizer, epoch_early_stop, args, use_cuda=False):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
no_iterations = len(train_queue)
for step, (input, target) in enumerate(train_queue):
model.train()
n = input.size(0)
input = Variable(input, requires_grad=False)
if use_cuda:
input = input.cuda()
target = Variable(target, requires_grad=False)
if use_cuda:
target = target.cuda(non_blocking=True)
weight_optimizer.zero_grad()
logits = model(input)
loss = criterion(logits, target)
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip)
weight_optimizer.step()
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
objs.update(loss.item(), n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
if step % args.report_freq == 0:
logging.info('train weights: %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
if step > no_iterations*epoch_early_stop:
break
return top1.avg, objs.avg
def train_alphas(train_queue, model, architect, criterion, alpha_optimizer, tau, epoch_early_stop, args, use_cuda=False):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
no_iterations = len(train_queue)
for step, (input, target) in enumerate(train_queue):
model.train()
n = input.size(0)
input = Variable(input, requires_grad=False)
if use_cuda:
input = input.cuda()
target = Variable(target, requires_grad=False)
if use_cuda:
target = target.cuda(non_blocking=True)
architect.step(input, target, alpha_optimizer)
logits = model(input)
loss = criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
objs.update(loss.item(), n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
if step % args.report_freq == 0:
logging.info('training alphas: %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
if step > no_iterations*epoch_early_stop:
break
return top1.avg, objs.avg
def infer(valid_queue, model, criterion, args, use_cuda=False):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.eval()
for step, (input, target) in enumerate(valid_queue):
input = Variable(input, requires_grad=False)
if use_cuda:
input = input.cuda()
target = Variable(target, requires_grad=False)
if use_cuda:
target = target.cuda(non_blocking=True)
logits = model(input)
loss = criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.item(), n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
if step % args.report_freq == 0:
logging.info('valid %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
return top1.avg, objs.avg
if __name__ == '__main__':
main()
| 34.765625 | 141 | 0.735169 |
7daea682ad52aafe005d98e262a185b13641e860
| 14,792 |
py
|
Python
|
dymos/transcriptions/common/control_group.py
|
cashmesh/dymos
|
809f8e2cb2f82826a425286b9410b5c65df28949
|
[
"Apache-2.0"
] | 1 |
2021-07-19T17:03:49.000Z
|
2021-07-19T17:03:49.000Z
|
dymos/transcriptions/common/control_group.py
|
cashmesh/dymos
|
809f8e2cb2f82826a425286b9410b5c65df28949
|
[
"Apache-2.0"
] | null | null | null |
dymos/transcriptions/common/control_group.py
|
cashmesh/dymos
|
809f8e2cb2f82826a425286b9410b5c65df28949
|
[
"Apache-2.0"
] | null | null | null |
from collections.abc import Iterable
import numpy as np
import scipy.sparse as sp
import openmdao.api as om
from ..grid_data import GridData
from ...utils.misc import get_rate_units, CoerceDesvar, reshape_val
from ...utils.constants import INF_BOUND
from ...options import options as dymos_options
class ControlInterpComp(om.ExplicitComponent):
"""
Class definition for the ControlInterpComp.
Compute the approximated control values and rates given the values of a control at all nodes,
given values at the control discretization nodes.
Parameters
----------
**kwargs : dict
Dictionary of optional arguments.
Notes
-----
.. math::
u = \\left[ L \\right] u_d
\\dot{u} = \\frac{d\\tau_s}{dt} \\left[ D \\right] u_d
\\ddot{u} = \\left( \\frac{d\\tau_s}{dt} \\right)^2 \\left[ D_2 \\right] u_d
where
:math:`u_d` are the values of the control at the control discretization nodes,
:math:`u` are the values of the control at all nodes,
:math:`\\dot{u}` are the time-derivatives of the control at all nodes,
:math:`\\ddot{u}` are the second time-derivatives of the control at all nodes,
:math:`L` is the Lagrange interpolation matrix,
:math:`D` is the Lagrange differentiation matrix,
and :math:`\\frac{d\\tau_s}{dt}` is the ratio of segment duration in segment tau space
[-1 1] to segment duration in time.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._no_check_partials = not dymos_options['include_check_partials']
def initialize(self):
"""
Declare component options.
"""
self.options.declare(
'control_options', types=dict,
desc='Dictionary of options for the dynamic controls')
self.options.declare(
'time_units', default=None, allow_none=True, types=str,
desc='Units of time')
self.options.declare(
'grid_data', types=GridData,
desc='Container object for grid info')
# Save the names of the dynamic controls/parameters
# self._dynamic_names = []
self._input_names = {}
self._output_val_names = {}
self._output_rate_names = {}
self._output_rate2_names = {}
def _configure_controls(self):
control_options = self.options['control_options']
num_nodes = self.options['grid_data'].num_nodes
num_control_input_nodes = self.options['grid_data'].subset_num_nodes['control_input']
time_units = self.options['time_units']
for name, options in control_options.items():
self._input_names[name] = 'controls:{0}'.format(name)
self._output_val_names[name] = 'control_values:{0}'.format(name)
self._output_rate_names[name] = 'control_rates:{0}_rate'.format(name)
self._output_rate2_names[name] = 'control_rates:{0}_rate2'.format(name)
shape = options['shape']
input_shape = (num_control_input_nodes,) + shape
output_shape = (num_nodes,) + shape
units = options['units']
rate_units = get_rate_units(units, time_units)
rate2_units = get_rate_units(units, time_units, deriv=2)
# self._dynamic_names.append(name)
self.add_input(self._input_names[name], val=np.ones(input_shape), units=units)
self.add_output(self._output_val_names[name], shape=output_shape, units=units)
self.add_output(self._output_rate_names[name], shape=output_shape, units=rate_units)
self.add_output(self._output_rate2_names[name], shape=output_shape,
units=rate2_units)
size = np.prod(shape)
self.sizes[name] = size
# The partial of interpolated value wrt the control input values is linear
# and can be computed as the kronecker product of the interpolation matrix (L)
# and eye(size).
J_val = sp.kron(self.L, sp.eye(size), format='csr')
rs, cs, data = sp.find(J_val)
self.declare_partials(of=self._output_val_names[name],
wrt=self._input_names[name],
rows=rs, cols=cs, val=data)
# The partials of the output rate and second derivative wrt dt_dstau
rs = np.arange(num_nodes * size, dtype=int)
cs = np.repeat(np.arange(num_nodes, dtype=int), size)
self.declare_partials(of=self._output_rate_names[name],
wrt='dt_dstau',
rows=rs, cols=cs)
self.declare_partials(of=self._output_rate2_names[name],
wrt='dt_dstau',
rows=rs, cols=cs)
# The partials of the rates and second derivatives are nonlinear but the sparsity
# pattern is obtained from the kronecker product of the 1st and 2nd differentiation
# matrices (D and D2) and eye(size).
self.rate_jacs[name] = sp.kron(sp.csr_matrix(self.D), sp.eye(size), format='csr')
rs, cs = self.rate_jacs[name].nonzero()
self.declare_partials(of=self._output_rate_names[name],
wrt=self._input_names[name],
rows=rs, cols=cs)
self.rate2_jacs[name] = sp.kron(sp.csr_matrix(self.D2), sp.eye(size), format='csr')
rs, cs = self.rate2_jacs[name].nonzero()
self.declare_partials(of=self._output_rate2_names[name],
wrt=self._input_names[name],
rows=rs, cols=cs)
def configure_io(self):
"""
I/O creation is delayed until configure so we can determine shape and units for the states.
"""
num_nodes = self.options['grid_data'].num_nodes
time_units = self.options['time_units']
gd = self.options['grid_data']
self.add_input('dt_dstau', shape=num_nodes, units=time_units)
self.rate_jacs = {}
self.rate2_jacs = {}
self.sizes = {}
num_disc_nodes = gd.subset_num_nodes['control_disc']
num_input_nodes = gd.subset_num_nodes['control_input']
# Find the indexing matrix that, multiplied by the values at the input nodes,
# gives the values at the discretization nodes
L_id = np.zeros((num_disc_nodes, num_input_nodes), dtype=float)
L_id[np.arange(num_disc_nodes, dtype=int),
gd.input_maps['dynamic_control_input_to_disc']] = 1.0
L_id = sp.csr_matrix(L_id)
# Matrices L_da and D_da interpolate values and rates (respectively) at all nodes from
# values specified at control discretization nodes.
L_da, D_da = gd.phase_lagrange_matrices('control_disc', 'all', sparse=True)
self.L = L_da.dot(L_id)
self.D = D_da.dot(L_id)
# Matrix D_dd interpolates rates at discretization nodes from values given at control
# discretization nodes.
_, D_dd = gd.phase_lagrange_matrices('control_disc', 'control_disc', sparse=True)
# Matrix D2 provides second derivatives at all nodes given values at input nodes.
self.D2 = D_da.dot(D_dd.dot(L_id))
self._configure_controls()
self.set_check_partial_options('*', method='cs')
def compute(self, inputs, outputs):
"""
Compute interpolated control values and rates.
Parameters
----------
inputs : `Vector`
`Vector` containing inputs.
outputs : `Vector`
`Vector` containing outputs.
"""
control_options = self.options['control_options']
num_nodes = self.options['grid_data'].num_nodes
num_control_input_nodes = self.options['grid_data'].subset_num_nodes['control_input']
for name, options in control_options.items():
size = np.prod(options['shape'])
u_flat = np.reshape(inputs[self._input_names[name]],
newshape=(num_control_input_nodes, size))
a = self.D.dot(u_flat)
b = self.D2.dot(u_flat)
val = np.reshape(self.L.dot(u_flat), (num_nodes,) + options['shape'])
rate = a / inputs['dt_dstau'][:, np.newaxis]
rate = np.reshape(rate, (num_nodes,) + options['shape'])
rate2 = b / inputs['dt_dstau'][:, np.newaxis] ** 2
rate2 = np.reshape(rate2, (num_nodes,) + options['shape'])
outputs[self._output_val_names[name]] = val
outputs[self._output_rate_names[name]] = rate
outputs[self._output_rate2_names[name]] = rate2
def compute_partials(self, inputs, partials):
"""
Compute sub-jacobian parts. The model is assumed to be in an unscaled state.
Parameters
----------
inputs : Vector
Unscaled, dimensional input variables read via inputs[key].
partials : Jacobian
Subjac components written to partials[output_name, input_name].
"""
control_options = self.options['control_options']
num_input_nodes = self.options['grid_data'].subset_num_nodes['control_input']
dstau_dt = np.reciprocal(inputs['dt_dstau'])
dstau_dt2 = (dstau_dt ** 2)[:, np.newaxis]
dstau_dt3 = (dstau_dt ** 3)[:, np.newaxis]
for name, options in control_options.items():
control_name = self._input_names[name]
size = self.sizes[name]
rate_name = self._output_rate_names[name]
rate2_name = self._output_rate2_names[name]
# Unroll shaped controls into an array at each node
u_flat = np.reshape(inputs[control_name], (num_input_nodes, size))
partials[rate_name, 'dt_dstau'] = (-self.D.dot(u_flat) * dstau_dt2).ravel()
partials[rate2_name, 'dt_dstau'] = (-2.0 * self.D2.dot(u_flat) * dstau_dt3).ravel()
dstau_dt_x_size = np.repeat(dstau_dt, size)[:, np.newaxis]
dstau_dt2_x_size = np.repeat(dstau_dt2, size)[:, np.newaxis]
partials[rate_name, control_name] = self.rate_jacs[name].multiply(dstau_dt_x_size).data
partials[rate2_name, control_name] = self.rate2_jacs[name].multiply(dstau_dt2_x_size).data
class ControlGroup(om.Group):
"""
Class definition for the ControlGroup.
Parameters
----------
**kwargs : dict
Dictionary of optional arguments.
"""
def initialize(self):
"""
Declare group options.
"""
self.options.declare('control_options', types=dict,
desc='Dictionary of options for the dynamic controls')
self.options.declare('time_units', default=None, allow_none=True, types=str,
desc='Units of time')
self.options.declare('grid_data', types=GridData, desc='Container object for grid info')
def setup(self):
"""
Define the structure of the control group.
"""
gd = self.options['grid_data']
control_options = self.options['control_options']
time_units = self.options['time_units']
if len(control_options) < 1:
return
opt_controls = [name for (name, opts) in control_options.items() if opts['opt']]
if len(opt_controls) > 0:
self.add_subsystem('indep_controls', subsys=om.IndepVarComp(), promotes_outputs=['*'])
self.add_subsystem(
'control_interp_comp',
subsys=ControlInterpComp(time_units=time_units, grid_data=gd,
control_options=control_options),
promotes_inputs=['*'],
promotes_outputs=['*'])
def configure_io(self):
"""
I/O creation is delayed until configure so we can determine shape and units for the states.
"""
control_options = self.options['control_options']
gd = self.options['grid_data']
self.control_interp_comp.configure_io()
for name, options in control_options.items():
shape = options['shape']
size = np.prod(shape)
if options['opt']:
num_input_nodes = gd.subset_num_nodes['control_input']
desvar_indices = list(range(size * num_input_nodes))
if options['fix_initial']:
if isinstance(options['fix_initial'], Iterable):
idxs_to_fix = np.where(np.asarray(options['fix_initial']))[0]
for idx_to_fix in reversed(sorted(idxs_to_fix)):
del desvar_indices[idx_to_fix]
else:
del desvar_indices[:size]
if options['fix_final']:
if isinstance(options['fix_final'], Iterable):
idxs_to_fix = np.where(np.asarray(options['fix_final']))[0]
for idx_to_fix in reversed(sorted(idxs_to_fix)):
del desvar_indices[-size + idx_to_fix]
else:
del desvar_indices[-size:]
if len(desvar_indices) > 0:
coerce_desvar_option = CoerceDesvar(num_input_nodes, desvar_indices,
options)
lb = np.zeros_like(desvar_indices, dtype=float)
lb[:] = -INF_BOUND if coerce_desvar_option('lower') is None else \
coerce_desvar_option('lower')
ub = np.zeros_like(desvar_indices, dtype=float)
ub[:] = INF_BOUND if coerce_desvar_option('upper') is None else \
coerce_desvar_option('upper')
self.add_design_var(name='controls:{0}'.format(name),
lower=lb,
upper=ub,
scaler=coerce_desvar_option('scaler'),
adder=coerce_desvar_option('adder'),
ref0=coerce_desvar_option('ref0'),
ref=coerce_desvar_option('ref'),
indices=desvar_indices)
default_val = reshape_val(options['val'], shape, num_input_nodes)
self.indep_controls.add_output(name=f'controls:{name}',
val=default_val,
shape=(num_input_nodes,) + shape,
units=options['units'])
| 41.318436 | 102 | 0.583423 |
afe690113b48cacd55d0b5f1e8f1492bae11b8c2
| 35 |
py
|
Python
|
pygraylog/__init__.py
|
zmallen/pygraylog
|
cda2c6b583e8c7de47e98458b3faeae7d05a94d3
|
[
"Apache-2.0"
] | 14 |
2016-08-29T16:31:14.000Z
|
2021-11-30T10:39:29.000Z
|
pygraylog/__init__.py
|
zmallen/pygraylog
|
cda2c6b583e8c7de47e98458b3faeae7d05a94d3
|
[
"Apache-2.0"
] | 9 |
2016-08-28T15:23:47.000Z
|
2018-02-07T20:11:18.000Z
|
pygraylog/__init__.py
|
zmallen/pygraylog
|
cda2c6b583e8c7de47e98458b3faeae7d05a94d3
|
[
"Apache-2.0"
] | 16 |
2016-10-04T17:37:42.000Z
|
2021-07-08T15:43:50.000Z
|
import endpoints
import graylogapi
| 11.666667 | 17 | 0.885714 |
731de0a6d0aeeff06fe3f22999e564d574c63d39
| 109 |
py
|
Python
|
srim/executor/__init__.py
|
agoose77/pysrim-executor
|
e251c6fc270ce8dfd500edd944c015d1911a20b9
|
[
"MIT"
] | null | null | null |
srim/executor/__init__.py
|
agoose77/pysrim-executor
|
e251c6fc270ce8dfd500edd944c015d1911a20b9
|
[
"MIT"
] | null | null | null |
srim/executor/__init__.py
|
agoose77/pysrim-executor
|
e251c6fc270ce8dfd500edd944c015d1911a20b9
|
[
"MIT"
] | null | null | null |
from .native import NativeExecutor
from .docker import DockerExecutor
from .executor import SRIMExecutorBase
| 27.25 | 38 | 0.862385 |
e857b4d8823b0a1e77d74fd02e69e06dae2091e2
| 1,470 |
py
|
Python
|
offer/39_MoreThanHalfNumber.py
|
DevRoss/python-offer-code
|
580b2d7b265b3fa8a598287f42a4ca8d9f834eb1
|
[
"MIT"
] | 1 |
2019-09-02T07:14:26.000Z
|
2019-09-02T07:14:26.000Z
|
offer/39_MoreThanHalfNumber.py
|
DevRoss/python-offer-code
|
580b2d7b265b3fa8a598287f42a4ca8d9f834eb1
|
[
"MIT"
] | 1 |
2019-09-02T07:14:48.000Z
|
2019-10-21T14:30:37.000Z
|
offer/39_MoreThanHalfNumber.py
|
DevRoss/python-offer-code
|
580b2d7b265b3fa8a598287f42a4ca8d9f834eb1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Created by Ross on 19-1-19
def check(array: list, middle):
c = 0
for i in range(middle, len(array)):
if array[i] != array[middle]:
break
c += 1
for i in range(middle, -1, -1):
if array[i] != array[middle]:
break
c += 1
return c > (len(array) >> 1)
def solve(array: list):
if not array:
return None
middle = (len(array) - 1) >> 1
def partition(array, left, right):
first = left
key = array[left]
while left != right:
while key <= array[right] and left < right:
right -= 1
while array[left] <= key and left < right:
left += 1
if left < right:
array[left], array[right] = array[right], array[left]
# 归位
array[first] = array[left]
array[left] = key
return left
left = 0
right = len(array) - 1
index = partition(array, left, right)
while index != middle:
if index > middle:
right = index - 1
index = partition(array, left, right)
else:
left = index + 1
index = partition(array, left, right)
if check(array, middle):
return array[middle]
return None
if __name__ == '__main__':
print(solve([1, 2, 7, 2, 8, 2, 2, 5, 2]))
print(solve([1, 2, 7, 2, 8, 7, 2, 5, 2]))
print(solve([3]))
| 23.709677 | 69 | 0.493197 |
785180ec9a0b5d5ebefe4b3b654dc21c1a974f3e
| 4,379 |
py
|
Python
|
benchmark/startQiskit3155.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit3155.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit3155.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=4
# total number=47
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=31
prog.cz(input_qubit[0],input_qubit[3]) # number=32
prog.h(input_qubit[3]) # number=33
prog.x(input_qubit[3]) # number=27
prog.h(input_qubit[3]) # number=34
prog.cz(input_qubit[0],input_qubit[3]) # number=35
prog.h(input_qubit[3]) # number=36
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.cx(input_qubit[3],input_qubit[0]) # number=38
prog.z(input_qubit[3]) # number=39
prog.cx(input_qubit[3],input_qubit[0]) # number=40
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.h(input_qubit[0]) # number=41
prog.cz(input_qubit[2],input_qubit[0]) # number=42
prog.h(input_qubit[0]) # number=43
prog.y(input_qubit[3]) # number=37
prog.h(input_qubit[0]) # number=14
prog.h(input_qubit[1]) # number=30
prog.cz(input_qubit[2],input_qubit[0]) # number=15
prog.h(input_qubit[0]) # number=16
prog.cx(input_qubit[0],input_qubit[2]) # number=20
prog.cx(input_qubit[0],input_qubit[2]) # number=44
prog.x(input_qubit[2]) # number=45
prog.cx(input_qubit[0],input_qubit[2]) # number=46
prog.cx(input_qubit[0],input_qubit[2]) # number=22
prog.cx(input_qubit[0],input_qubit[2]) # number=17
prog.cx(input_qubit[0],input_qubit[2]) # number=23
prog.x(input_qubit[2]) # number=24
prog.cx(input_qubit[0],input_qubit[2]) # number=25
prog.cx(input_qubit[0],input_qubit[2]) # number=19
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit3155.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 35.314516 | 140 | 0.652432 |
04443d86cd7305f39a9edacf707b7f58cc42700e
| 2,041 |
py
|
Python
|
unittests/test_structures.py
|
argupta98/MotionPlanning
|
1b8454ff2b6fe797773727d8de57999a9fc68c0f
|
[
"MIT"
] | null | null | null |
unittests/test_structures.py
|
argupta98/MotionPlanning
|
1b8454ff2b6fe797773727d8de57999a9fc68c0f
|
[
"MIT"
] | null | null | null |
unittests/test_structures.py
|
argupta98/MotionPlanning
|
1b8454ff2b6fe797773727d8de57999a9fc68c0f
|
[
"MIT"
] | null | null | null |
from src.structures import *
import unittest
import numpy as np
class TestPolygonCounter(unittest.TestCase):
def test_specific_1(self):
vehicle= np.array([[-183.49516, -220.11375],
[-225.02881, -184.53629 ],
[-53.735645, -109.01296 ],
[-45.640778, -155.59946 ]])
vehicle = Polygon(vehicle)
np.testing.assert_equal(vehicle.edges[0], np.array([[-183.49516, -220.11375],
[-225.02881, -184.53629 ]]))
np.testing.assert_equal(vehicle.edges[1], np.array(
[[-225.02881, -184.53629 ],
[-53.735645, -109.01296 ]]))
np.testing.assert_equal(vehicle.edges[2], np.array(
[[-53.735645, -109.01296 ],
[-45.640778, -155.59946 ]]))
self.assertTrue(vehicle.is_counterclockwise())
vehicle.counterclockwise()
self.assertTrue(vehicle.is_counterclockwise())
angles, _ = vehicle.edge_angles()
def test_specific_2(self):
square = np.array([[400, 50],
[800, 50],
[800, 200],
[400, 200]])
vehicle = Polygon(square)
self.assertFalse(vehicle.is_counterclockwise())
vehicle.counterclockwise()
self.assertTrue(vehicle.is_counterclockwise())
angles, _ = vehicle.edge_angles()
self.assertEqual(angles[0], 0)
self.assertEqual(angles[1], np.pi /2)
self.assertEqual(angles[2], np.pi)
self.assertEqual(angles[3], 3 * np.pi / 2)
# def test_edges(self):
# square = np.array([[400, 50],
# [800, 50],
# [800, 200],
# [400, 200]])
# vehicle = Polygon(square)
# np.assertEqual(edges[0], np.array([edges]))
| 39.25 | 85 | 0.476237 |
766102e3be6635ee2e02343a660e3148e3119aa0
| 854 |
py
|
Python
|
LPPy/Abstract/tableau.py
|
VijayS02/LPPy
|
7643e3cd78f4aac523611cfbfb2392d6e94d2a5c
|
[
"MIT"
] | null | null | null |
LPPy/Abstract/tableau.py
|
VijayS02/LPPy
|
7643e3cd78f4aac523611cfbfb2392d6e94d2a5c
|
[
"MIT"
] | null | null | null |
LPPy/Abstract/tableau.py
|
VijayS02/LPPy
|
7643e3cd78f4aac523611cfbfb2392d6e94d2a5c
|
[
"MIT"
] | null | null | null |
import numpy as np
import LPPy.Abstract.lpp
from LPPy.Abstract.lpp import LPP
from LPPy.Abstract.outputHandler import OutputHandler
class Tableau(LPP):
"""
An abstract class representing a tableau form of an LPP used in the Simplex Method.
"""
# An output handler to send the output to.
outputter: OutputHandler
def get_form(self) -> str:
"""
Return the form of the given LPP as canonical because every tableau represents a canonical problem. (APM236)
:return: lpp.CANONICAL
"""
return LPPy.Abstract.lpp.CANONICAL
def get_is_max(self):
"""
Every tableau is of form maximization when used in the simplex method. This means that this is always true.
:return: True
"""
return True
def set_is_max(self, new_max: bool):
return False
| 27.548387 | 116 | 0.663934 |
e93031e396da536f4b35390797447d0eb5e55a0c
| 1,118 |
py
|
Python
|
analysis/scripts/mx_select.py
|
sbooeshaghi/azucar
|
0ced041aa9cfa52593109f79794ac6009adf909a
|
[
"BSD-2-Clause"
] | null | null | null |
analysis/scripts/mx_select.py
|
sbooeshaghi/azucar
|
0ced041aa9cfa52593109f79794ac6009adf909a
|
[
"BSD-2-Clause"
] | null | null | null |
analysis/scripts/mx_select.py
|
sbooeshaghi/azucar
|
0ced041aa9cfa52593109f79794ac6009adf909a
|
[
"BSD-2-Clause"
] | null | null | null |
from collections import defaultdict
from .utils import read_markers, write_list
def read_genes(genes_fname, genes=defaultdict()):
with open(genes_fname) as f:
for idx, line in enumerate(f.readlines()):
gene = line.strip()
genes[gene] = idx
def sel_genes(genes, marker_genes, sel=[]):
mg_inv = {v: k for k, v in marker_genes.items()}
for idx in range(len(mg_inv)):
# this maps the marker gene name index to the gene index
# in order of the marker_genes file
sel.append(genes[mg_inv[idx]])
def mx_select(markers_fname, genes_fname, out_select_fn):
# select should be extensible to axis and genes -> md (metadata)
markers_ec = defaultdict(list)
celltypes = defaultdict()
marker_genes = defaultdict()
# this is duplicated from index, not ideal but w/e maybe ok
# ideally would want to give it markers.ec
read_markers(markers_fname, markers_ec, celltypes, marker_genes)
genes = defaultdict()
read_genes(genes_fname, genes)
sel = []
sel_genes(genes, marker_genes, sel)
write_list(out_select_fn, sel)
| 31.942857 | 68 | 0.686047 |
06a56e4a40a041822e6879768a8022731d67db83
| 8,266 |
py
|
Python
|
open_facebook/tests.py
|
devhub/Django-facebook
|
fec1b5dbd37f7840ee325f372ecf8754ce265c2a
|
[
"BSD-3-Clause"
] | null | null | null |
open_facebook/tests.py
|
devhub/Django-facebook
|
fec1b5dbd37f7840ee325f372ecf8754ce265c2a
|
[
"BSD-3-Clause"
] | null | null | null |
open_facebook/tests.py
|
devhub/Django-facebook
|
fec1b5dbd37f7840ee325f372ecf8754ce265c2a
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'facebook_example.settings'
from open_facebook.api import *
import unittest
import logging
logger = logging.getLogger()
from open_facebook.utils import json
class TestErrorMapping(unittest.TestCase):
def test_oauth_errors(self):
expires_response = '''{
"error": {
"type": "OAuthException",
"message": "Session has expired at unix time SOME_TIME. The current unix time is SOME_TIME."
}
} '''
changed_password_response = '''
{
"error": {
"type": "OAuthException",
"message": "The session has been invalidated because the user has changed the password."
}
}
'''
deauthorized_response = '''
{
"error": {
"type": "OAuthException",
"message": "Error validating access token: USER_ID has not authorized application APP_ID"
}
}
'''
loggedout_response = '''
{
"error": {
"type": "OAuthException",
"message": "Error validating access token: The session is invalid because the user logged out."
}
}
'''
responses = [expires_response, changed_password_response,
deauthorized_response, loggedout_response]
response_objects = []
for response_string in responses:
response = json.loads(response_string)
response_objects.append(response)
from open_facebook import exceptions as open_facebook_exceptions
for response in response_objects:
oauth = False
try:
FacebookConnection.raise_error(response['error']['type'],
response['error']['message'])
except open_facebook_exceptions.OAuthException, e:
oauth = True
assert oauth, 'response %s didnt raise oauth error' % response
class TestOpenFacebook(unittest.TestCase):
def test_thijs_profile(self):
token = FacebookAuthorization.get_app_access_token()
FacebookAuthorization.create_test_user(token)
test_user = FacebookAuthorization.get_or_create_test_user(token)
return
message = "Hi! I'm on Fashiolista, a worldwide community for " \
"fashion inspiration. Click to see my style profile and " \
"discover great new shops and fashion items!"
image_urls = [
'http://e.fashiocdn.com/images/entities/0/0/x/l/W/0.365x365.jpg',
'http://d.fashiocdn.com/images/entities/0/9/9/j/j/0.365x365.jpg',
'http://e.fashiocdn.com/images/entities/0/8/0/7/4/0.365x365.jpg',
]
token = None
access_token = '215464901804004|fc589819a12431167c3bd571.0-100002862180253|by58p1KHqf_XiqA4ux390XBGBIo'
# login
# https://www.facebook.com/platform/test_account_login.php?user_id=100002898600225&n=I4x8lGXREnEhea7
# fill in a real token for this to work
# {u'access_token': u'215464901804004|fc589819a12431167c3bd571.0-100002862180253|by58p1KHqf_XiqA4ux390XBGBIo', u'password': u'1439799010', u'login_url': u'https://www.facebook.com/platform/test_account_login.php?user_id=100002862180253&n=4xdwSTQbstgOzUt', u'id': u'100002862180253', u'email': u'[email protected]'}
fb = OpenFacebook(access_token)
print fb.get('me/accounts')
return
permissions = [p for p, v in fb.get(
'me/permissions')['data'][0].items() if v]
print permissions
return
print fb.fql("SELECT uid, name, sex FROM user WHERE uid IN " \
"(SELECT uid2 FROM friend WHERE uid1 = me())")
return
actions = [dict(name='Follow', link='http://www.fashiolista.com/')]
# print fb.get('thijsgoos', metadata='1')['metadata']
types = ['link', 'photo']
for type in types:
for image_url in image_urls:
fb.set('me/feed', picture=image_url,
actions=actions, type=type, message=type)
# print fb.set('696010430_10150752137065431/likes')
def test_app_access_token(self):
token = FacebookAuthorization.get_app_access_token()
test_user = FacebookAuthorization.create_test_user(token)
token_available = 'access_token' in test_user
assert token_available, 'App authentication failed %s' % test_user
def test_cookie_parsing(self):
cookie = 'F7cndfQuSIkcVHWIgg_SHQ4LIDJXeeHhiXUNjesOw5g.eyJhbGdvcml0aG0iOiJITUFDLVNIQTI1NiIsImNvZGUiOiJVMTZuMFNoWVUxSTJ5VEFJMVZ0RmlvZTdhRVRaaEZ4cGV5d1hwYnZvOUprLmV5SnBkaUk2SW1OcmFGVXlWR053ZDA1VlMwSTRlUzFzZDA1WmFtY2lmUS5rZl9RTUhCMnVFTVh5YW83UU5UcnFGMlJzOGxxQUxrM1AxYm8zazBLMm5YUXpOZW5LSVlfczBVV3ZNbE1jTXAzcE04TXNLNVVDQUpjWlQ1N1ZaZXFkS3ZPeXRFbmdoODFxTmczTXVDeTBHNjB6WjFBOWZGZlpHenVDejdKSEVSSCIsImlzc3VlZF9hdCI6MTMxMTYwMDEyNywidXNlcl9pZCI6Nzg0Nzg1NDMwfQ'
parsed_cookie = FacebookAuthorization.parse_signed_data(cookie)
assert 'code' in parsed_cookie
def test_code_conversion(self):
from open_facebook import exceptions as open_facebook_exceptions
# before testing update this with a valid code, hope facebook comes with a way to automate this
code = 'AQDByzD95HCaQLIY3PyQFvCJ67bkYx5f692TylEXARQ0p6_XK0mXGRVBU3G759qOIa_A966Wmm-kxxw1GbXkXQiJj0A3b_XNFewFhT8GSro4i9F8b_7q1RSnKzfq327XYno-Qw4NGxm0ordSl0gJ0YTjhwY8TwSMy2b2whD5ZhHvaYkEaC1J-GcBhkF7o4F2-W8'
#the redirect uri needs to be connected
try:
user_token = FacebookAuthorization.convert_code(
code, redirect_uri='http://local.mellowmorning.com:8080')
facebook = OpenFacebook(user_token['access_token'])
facebook.me()
except open_facebook_exceptions.OAuthException, e:
pass
def test_fql(self):
token = self.get_access_token()
facebook = OpenFacebook(token)
result = facebook.fql('SELECT name FROM user WHERE uid = me()')
assert 'name' in result[0]
def get_access_token(self):
token = FacebookAuthorization.get_app_access_token()
test_user = FacebookAuthorization.create_test_user(token)
return test_user['access_token']
def test_open_api(self):
token = self.get_access_token()
facebook = OpenFacebook(token)
assert 'name' in facebook.me()
assert facebook.get('fashiolista')
def test_album_upload(self):
token = self.get_access_token()
facebook = OpenFacebook(token)
photo_urls = [
'http://d.fashiocdn.com/images/entities/0/6/t/p/d/0.365x365.jpg',
'http://e.fashiocdn.com/images/entities/0/5/E/b/Q/0.365x365.jpg',
]
#feed method
for photo in photo_urls:
facebook.set(
'me/feed', message='Fashiolista is awesome - part one',
picture=photo)
#app album method
#gives an unknown error for some reason
# for photo in photo_urls:
# uploaded = facebook.set('me/photos', url=photo, message='Fashiolista 2 is awesome - part two', name='FashiolistaTest2')
albums = facebook.get('me/albums')
album_names = [album['name'] for album in albums['data']]
album_name = 'FashiolistaSuperAlbum'
album_response = facebook.set('me/albums', params=dict(
name=album_name, message='Your latest fashion finds'))
albums = facebook.get('me/albums')
album_names = [album['name'] for album in albums['data']]
assert album_name in album_names
album_id = album_response['id']
for photo in photo_urls:
facebook.set(
'%s/photos' % album_id, url=photo,
message='the writing is one the wall tw',
name='FashiolistaTestt')
if __name__ == '__main__':
import logging
handler = logging.StreamHandler()
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
unittest.main(defaultTest='TestOpenFacebook.test_thijs_profile')
| 43.277487 | 457 | 0.650859 |
f7fa184eba534135c8de9fdf6f7eebf36d1427f5
| 4,647 |
py
|
Python
|
4-Informer/chainInformer/data/merge_2018_2020_data.py
|
wang-yuhao/On-the-topological-propertyof-dynamic-transaction-graph
|
8dc8c3870befb82581099e3a6edc9f9734c23f31
|
[
"MIT"
] | 1 |
2021-01-13T20:54:18.000Z
|
2021-01-13T20:54:18.000Z
|
4-Informer/chainInformer/data/merge_2018_2020_data.py
|
wang-yuhao/On-the-topological-propertyof-dynamic-transaction-graph
|
8dc8c3870befb82581099e3a6edc9f9734c23f31
|
[
"MIT"
] | null | null | null |
4-Informer/chainInformer/data/merge_2018_2020_data.py
|
wang-yuhao/On-the-topological-propertyof-dynamic-transaction-graph
|
8dc8c3870befb82581099e3a6edc9f9734c23f31
|
[
"MIT"
] | 1 |
2020-12-03T10:30:53.000Z
|
2020-12-03T10:30:53.000Z
|
# Merge 2018-2020 data for Informer
# This process will generate merged base, betti, betti_deri, and fl files in PRICESSED_DIR.
import pandas as pd
import os
from sklearn.decomposition import PCA
import datetime
import math
import pandas as pd
import numpy as np
import torch
BETTI_NUMBER_DIR = "/content/drive/MyDrive/aliyun/betti_number/"
AMOMAT_DIR = "/content/drive/MyDrive/aliyun/amoMat/"
OCCMAT_DIR = "/content/drive/MyDrive/aliyun/occMat/"
PRICE_PATH = "/content/drive/MyDrive/aliyun/bitcoin_2018_2020.csv"
PROCESSED_DIR = "/content/drive/MyDrive/aliyun/processed_data/2018_2020/"
TOTALTX_DIR = "/content/drive/MyDrive/aliyun/bitcoin_totaltx_2018_2020.csv"
PERIOD = [2018, 2019, 2020]
def getBetweenDay(begin_date, end_date):
date_list = []
date_arr = []
date_unix_list = []
begin_date = datetime.datetime.strptime(begin_date, "%Y-%m-%d")
print("begin_date:",begin_date)
# end_date = datetime.datetime.strptime(time.strftime('%Y-%m-%d', time.localtime(time.time())), "%Y-%m-%d")
end_date = datetime.datetime.strptime(end_date, "%Y-%m-%d")
print("end_date:",end_date)
while begin_date <= end_date:
date_unix = math.trunc(begin_date.replace(tzinfo=datetime.timezone.utc).timestamp()*1000)
date_unix_list.append(date_unix)
date_str = begin_date.strftime("%Y-%m-%d")
date_list.append(date_str)
date_arr.append([date_str, date_unix])
begin_date += datetime.timedelta(days=1)
return np.asarray(date_arr)
def combine_features_with_data(dataset_model):
data_price = pd.read_csv(PRICE_PATH)
btc_price_2018_2020 = data_price.Open.str.replace(",","")
total_tx = pd.read_csv(TOTALTX_DIR, index_col=0)
date_arr = pd.DataFrame(getBetweenDay("2018-01-01", "2020-12-31"))[0]
btc_2018_2020 = pd.concat([total_tx, btc_price_2018_2020, date_arr], axis = 1)
btc_2018_2020.columns = ["totaltx", "price", "date"]
print("btc_2018_2020:",btc_2018_2020)
data_feature = pd.DataFrame([])
if dataset_model == "betti":
for YEAR in PERIOD:
#for file_name in os.listdir(BETTI_NUMBER_DIR):
feature_betti_0 = pd.read_csv(BETTI_NUMBER_DIR + str(YEAR) + "_betti_0.csv", index_col=0).loc[:, "0":"49"]
feature_betti_1 = pd.read_csv(BETTI_NUMBER_DIR + str(YEAR) + "_betti_1.csv", index_col=0).loc[:, "0":"49"]
feature_betti_number = pd.concat([feature_betti_0,feature_betti_1], axis = 1)
data_feature = pd.concat([data_feature,feature_betti_number]).reset_index(drop=True)
data_feature.to_csv("data_feature.csv")
print("data_feature:",data_feature)
elif dataset_model == "betti_der":
for YEAR in PERIOD:
feature_betti_0 = pd.read_csv(BETTI_NUMBER_DIR + str(YEAR) + "_betti_0.csv", index_col=0).loc[:, "0":"49"]
feature_betti_1 = pd.read_csv(BETTI_NUMBER_DIR + str(YEAR) + "_betti_1.csv", index_col=0).loc[:, "0":"49"]
feature_betti_0_der = pd.read_csv(BETTI_NUMBER_DIR + str(YEAR) + "_betti_0.csv", index_col=0).diff(axis=1)
feature_betti_1_der = pd.read_csv(BETTI_NUMBER_DIR + str(YEAR) + "_betti_1.csv", index_col=0).diff(axis=1)
feature_betti_0_der_50 = feature_betti_0_der.loc[:, "1":"50"]
feature_betti_1_der_50 = feature_betti_1_der.loc[:, "1":"50"]
feature_betti_total = pd.concat([feature_betti_0, feature_betti_1, feature_betti_0_der_50, feature_betti_1_der_50], axis=1)
data_feature = pd.concat([data_feature,feature_betti_total]).reset_index(drop=True)
elif dataset_model == "fl":
for year in PERIOD:
for day in getBetweenDay(str(year) + "-01-01", str(year) + "-12-31"):
feature = pd.read_csv(OCCMAT_DIR + str(year) + "/occ" + day[0] + '.csv', index_col=0).to_numpy()
feature = pd.DataFrame(feature.flatten()).T
data_feature = pd.concat([data_feature,feature], axis = 0)
data_feature.to_csv(PROCESSED_DIR + dataset_model+"_orig.csv")
print("data_feature:",data_feature)
if len(data_feature) > 0:
pca = PCA(n_components = 20)
pca.fit(data_feature)
data_feature = pd.DataFrame(pca.transform(data_feature))
print("pca data_feature:",data_feature)
data_combined = pd.concat([btc_2018_2020,data_feature], axis=1)
cols = data_combined.columns.tolist()
cols = cols[2:] + cols[:2]
data_combined = data_combined[cols]
data_combined.to_csv(PROCESSED_DIR + dataset_model+".csv", index=False)
print(data_combined)
for dataset_model in ["base", "betti","betti_der", "fl"]:
combine_features_with_data(dataset_model)
| 49.43617 | 135 | 0.688832 |
aaf2dcdc8b197e8b6ba98e8c986a198f99a36231
| 5,771 |
py
|
Python
|
events/desucon2016/forms.py
|
darkismus/kompassi
|
35dea2c7af2857a69cae5c5982b48f01ba56da1f
|
[
"CC-BY-3.0"
] | 13 |
2015-11-29T12:19:12.000Z
|
2021-02-21T15:42:11.000Z
|
events/desucon2016/forms.py
|
darkismus/kompassi
|
35dea2c7af2857a69cae5c5982b48f01ba56da1f
|
[
"CC-BY-3.0"
] | 23 |
2015-04-29T19:43:34.000Z
|
2021-02-10T05:50:17.000Z
|
events/desucon2016/forms.py
|
darkismus/kompassi
|
35dea2c7af2857a69cae5c5982b48f01ba56da1f
|
[
"CC-BY-3.0"
] | 11 |
2015-09-20T18:59:00.000Z
|
2020-02-07T08:47:34.000Z
|
from django import forms
from django.db.models import Q
from crispy_forms.layout import Layout, Fieldset
from core.utils import horizontal_form_helper, indented_without_label
from labour.forms import AlternativeFormMixin, SignupForm
from labour.models import Signup, JobCategory, WorkPeriod
from .models import SignupExtraV2
class SignupExtraForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(SignupExtraForm, self).__init__(*args, **kwargs)
self.helper = horizontal_form_helper()
self.helper.form_tag = False
self.helper.layout = Layout(
'shift_type',
indented_without_label('night_work'),
Fieldset('Lisätiedot',
'shirt_size',
'special_diet',
'special_diet_other',
'desu_amount',
'prior_experience',
'free_text',
)
)
class Meta:
model = SignupExtraV2
fields = (
'shift_type',
'shirt_size',
'special_diet',
'special_diet_other',
'desu_amount',
'night_work',
'prior_experience',
'free_text',
)
widgets = dict(
special_diet=forms.CheckboxSelectMultiple,
)
class OrganizerSignupForm(forms.ModelForm, AlternativeFormMixin):
def __init__(self, *args, **kwargs):
event = kwargs.pop('event')
admin = kwargs.pop('admin')
assert not admin
super(OrganizerSignupForm, self).__init__(*args, **kwargs)
self.helper = horizontal_form_helper()
self.helper.form_tag = False
self.helper.layout = Layout(
Fieldset('Tehtävän tiedot',
'job_title',
),
)
self.fields['job_title'].help_text = "Mikä on tehtäväsi vastaavana? Printataan badgeen."
# self.fields['job_title'].required = True
class Meta:
model = Signup
fields = ('job_title',)
widgets = dict(
job_categories=forms.CheckboxSelectMultiple,
special_diet=forms.CheckboxSelectMultiple,
)
def get_excluded_m2m_field_defaults(self):
return dict(
job_categories=JobCategory.objects.filter(event__slug='desucon2016', name='Vastaava')
)
class OrganizerSignupExtraForm(forms.ModelForm, AlternativeFormMixin):
def __init__(self, *args, **kwargs):
super(OrganizerSignupExtraForm, self).__init__(*args, **kwargs)
self.helper = horizontal_form_helper()
self.helper.form_tag = False
self.helper.layout = Layout(
Fieldset('Lisätiedot',
# 'shirt_size',
'special_diet',
'special_diet_other',
),
)
class Meta:
model = SignupExtraV2
fields = (
# 'shirt_size',
'special_diet',
'special_diet_other',
)
widgets = dict(
special_diet=forms.CheckboxSelectMultiple,
)
def get_excluded_field_defaults(self):
return dict(
shift_type='none',
desu_amount=666,
free_text='Syötetty käyttäen vastaavan ilmoittautumislomaketta',
)
class ProgrammeSignupExtraForm(forms.ModelForm, AlternativeFormMixin):
def __init__(self, *args, **kwargs):
super(ProgrammeSignupExtraForm, self).__init__(*args, **kwargs)
self.helper = horizontal_form_helper()
self.helper.form_tag = False
self.helper.layout = Layout(
# 'shirt_size',
'special_diet',
'special_diet_other',
)
class Meta:
model = SignupExtraV2
fields = (
# 'shirt_size',
'special_diet',
'special_diet_other',
)
widgets = dict(
special_diet=forms.CheckboxSelectMultiple,
)
def get_excluded_field_defaults(self):
return dict(
shift_type='none',
desu_amount=666,
free_text='Syötetty käyttäen ohjelmanjärjestäjän ilmoittautumislomaketta',
)
class SpecialistSignupForm(SignupForm, AlternativeFormMixin):
def get_job_categories_query(self, event, admin=False):
assert not admin
return Q(event__slug='desucon2016', slug__in=[
'pelisali',
'kahvila',
'sidosryhmat',
'av-tekniikka',
'logistiikka',
'desutv',
'tulkki',
'valokuvaaja',
])
def get_excluded_field_defaults(self):
return dict(
notes='Syötetty käyttäen jälki-ilmoittautumislomaketta',
)
class SpecialistSignupExtraForm(SignupExtraForm, AlternativeFormMixin):
def __init__(self, *args, **kwargs):
super(SpecialistSignupExtraForm, self).__init__(*args, **kwargs)
self.helper = horizontal_form_helper()
self.helper.form_tag = False
self.helper.layout = Layout(
'shift_type',
indented_without_label('night_work'),
Fieldset('Lisätiedot',
# 'shirt_size',
'special_diet',
'special_diet_other',
'desu_amount',
'prior_experience',
'free_text',
)
)
class Meta:
model = SignupExtraV2
fields = (
'shift_type',
# 'shirt_size',
'special_diet',
'special_diet_other',
'desu_amount',
'night_work',
'prior_experience',
'free_text',
)
widgets = dict(
special_diet=forms.CheckboxSelectMultiple,
)
| 28.014563 | 97 | 0.572344 |
0bfca8ae23b34363e45b1c60f0cad07e30487524
| 1,372 |
py
|
Python
|
src/OTLMOW/OTLModel/Datatypes/KlWildreflectorDrager.py
|
davidvlaminck/OTLClassPython
|
71330afeb37c3ea6d9981f521ff8f4a3f8b946fc
|
[
"MIT"
] | 2 |
2022-02-01T08:58:11.000Z
|
2022-02-08T13:35:17.000Z
|
src/OTLMOW/OTLModel/Datatypes/KlWildreflectorDrager.py
|
davidvlaminck/OTLMOW
|
71330afeb37c3ea6d9981f521ff8f4a3f8b946fc
|
[
"MIT"
] | null | null | null |
src/OTLMOW/OTLModel/Datatypes/KlWildreflectorDrager.py
|
davidvlaminck/OTLMOW
|
71330afeb37c3ea6d9981f521ff8f4a3f8b946fc
|
[
"MIT"
] | null | null | null |
# coding=utf-8
from OTLMOW.OTLModel.Datatypes.KeuzelijstField import KeuzelijstField
from OTLMOW.OTLModel.Datatypes.KeuzelijstWaarde import KeuzelijstWaarde
# Generated with OTLEnumerationCreator. To modify: extend, do not edit
class KlWildreflectorDrager(KeuzelijstField):
"""Mogelijke dragers van een wildreflector."""
naam = 'KlWildreflectorDrager'
label = 'Wildreflector drager'
objectUri = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#KlWildreflectorDrager'
definition = 'Mogelijke dragers van een wildreflector.'
codelist = 'https://wegenenverkeer.data.vlaanderen.be/id/conceptscheme/KlWildreflectorDrager'
options = {
'houten-paal': KeuzelijstWaarde(invulwaarde='houten-paal',
label='houten paal',
definitie='houten paal als drager.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlWildreflectorDrager/houten-paal'),
'metalen-paal': KeuzelijstWaarde(invulwaarde='metalen-paal',
label='metalen paal',
definitie='metalen paal als drager.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlWildreflectorDrager/metalen-paal')
}
| 54.88 | 141 | 0.641399 |
d538b82b5837a9e9c08e46c88f578613bd4e7b4c
| 25,432 |
py
|
Python
|
lib/streamlit/state/session_state.py
|
AnOctopus/streamlit
|
6c5384f62c1415538347fa751185e5c487673f82
|
[
"Apache-2.0"
] | 19,099 |
2019-08-25T14:00:15.000Z
|
2022-03-31T21:00:28.000Z
|
lib/streamlit/state/session_state.py
|
AnOctopus/streamlit
|
6c5384f62c1415538347fa751185e5c487673f82
|
[
"Apache-2.0"
] | 3,078 |
2019-08-25T19:50:14.000Z
|
2022-03-31T23:26:14.000Z
|
lib/streamlit/state/session_state.py
|
AnOctopus/streamlit
|
6c5384f62c1415538347fa751185e5c487673f82
|
[
"Apache-2.0"
] | 1,892 |
2019-08-26T04:44:24.000Z
|
2022-03-30T16:11:51.000Z
|
# Copyright 2018-2021 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
import json
from streamlit.stats import CacheStat, CacheStatsProvider
from streamlit.type_util import Key
from typing import (
TYPE_CHECKING,
Any,
KeysView,
cast,
Dict,
Iterator,
MutableMapping,
Optional,
Union,
Tuple,
Callable,
Set,
List,
)
import attr
from pympler.asizeof import asizeof
import streamlit as st
from streamlit import logger as _logger
from streamlit.errors import StreamlitAPIException
from streamlit.proto.WidgetStates_pb2 import WidgetState as WidgetStateProto
from streamlit.proto.WidgetStates_pb2 import WidgetStates as WidgetStatesProto
if TYPE_CHECKING:
from streamlit.server.server import SessionInfo
logger = _logger.get_logger(__name__)
GENERATED_WIDGET_KEY_PREFIX = "$$GENERATED_WIDGET_KEY"
STREAMLIT_INTERNAL_KEY_PREFIX = "$$STREAMLIT_INTERNAL_KEY"
SCRIPT_RUN_WITHOUT_ERRORS_KEY = (
f"{STREAMLIT_INTERNAL_KEY_PREFIX}_SCRIPT_RUN_WITHOUT_ERRORS"
)
@attr.s(auto_attribs=True, slots=True, frozen=True)
class Serialized:
value: WidgetStateProto
@attr.s(auto_attribs=True, slots=True, frozen=True)
class Value:
value: Any
WState = Union[Serialized, Value]
WidgetArgs = Tuple[Any, ...]
WidgetCallback = Callable[..., None]
# A deserializer receives the value from whatever field is set on the
# WidgetState proto, and returns a regular python value. A serializer
# receives a regular python value, and returns something suitable for
# a value field on WidgetState proto. They should be inverses.
WidgetDeserializer = Callable[[Any, str], Any]
WidgetSerializer = Callable[[Any], Any]
WidgetKwargs = Dict[str, Any]
@attr.s(auto_attribs=True, slots=True, frozen=True)
class WidgetMetadata:
id: str
deserializer: WidgetDeserializer = attr.ib(repr=False)
serializer: WidgetSerializer = attr.ib(repr=False)
value_type: Any
callback: Optional[WidgetCallback] = None
callback_args: Optional[WidgetArgs] = None
callback_kwargs: Optional[WidgetKwargs] = None
@attr.s(auto_attribs=True, slots=True)
class WStates(MutableMapping[str, Any]):
states: Dict[str, WState] = attr.Factory(dict)
widget_metadata: Dict[str, WidgetMetadata] = attr.Factory(dict)
def __getitem__(self, k: str) -> Any:
item = self.states.get(k)
if item is not None:
if isinstance(item, Value):
return item.value
else:
metadata = self.widget_metadata.get(k)
if metadata is None:
# No deserializer, which should only happen if state is
# gotten from a reconnecting browser and the script is
# trying to access it. Pretend it doesn't exist.
raise KeyError(k)
value_type = cast(str, item.value.WhichOneof("value"))
value = item.value.__getattribute__(value_type)
# Array types are messages with data in a `data` field
if value_type in [
"double_array_value",
"int_array_value",
"string_array_value",
]:
value = value.data
elif value_type == "json_value":
value = json.loads(value)
deserialized = metadata.deserializer(value, metadata.id)
# Update metadata to reflect information from WidgetState proto
self.set_widget_metadata(attr.evolve(metadata, value_type=value_type))
self.states[k] = Value(deserialized)
return deserialized
else:
raise KeyError(k)
def __setitem__(self, k: str, v: WState):
self.states[k] = v
def __delitem__(self, k: str) -> None:
del self.states[k]
def __len__(self) -> int:
return len(self.states)
def __iter__(self):
# For this and many other methods, we can't simply delegate to the
# states field, because we need to invoke `__getitem__` for any
# values, to handle deserialization and unwrapping of values.
for key in self.states:
yield key
def keys(self) -> KeysView[str]:
return KeysView(self.states)
def items(self) -> Set[Tuple[str, Any]]: # type: ignore
return {(k, self[k]) for k in self}
def values(self) -> Set[Any]: # type: ignore
return {self[wid] for wid in self}
def update(self, other: "WStates"): # type: ignore
self.states.update(other.states)
self.widget_metadata.update(other.widget_metadata)
def set_widget_from_proto(self, widget_state: WidgetStateProto):
self[widget_state.id] = Serialized(widget_state)
def set_from_value(self, k: str, v: Any):
self[k] = Value(v)
def set_widget_metadata(self, widget_meta: WidgetMetadata):
self.widget_metadata[widget_meta.id] = widget_meta
def cull_nonexistent(self, widget_ids: Set[str]) -> None:
"""Removes items in state that aren't present in a set of provided
widget_ids.
"""
self.states = {k: v for k, v in self.states.items() if k in widget_ids}
def get_serialized(
self, k: str, default: Optional[WidgetStateProto] = None
) -> Optional[WidgetStateProto]:
widget = WidgetStateProto()
widget.id = k
item = self.states.get(k)
if item is not None:
if isinstance(item, Value):
metadata = self.widget_metadata.get(k)
if metadata is None:
return default
else:
field = metadata.value_type
serialized = metadata.serializer(item.value)
if field in (
"double_array_value",
"int_array_value",
"string_array_value",
):
arr = getattr(widget, field)
arr.data.extend(serialized)
elif field == "json_value":
setattr(widget, field, json.dumps(serialized))
elif field == "file_uploader_state_value":
widget.file_uploader_state_value.CopyFrom(serialized)
else:
setattr(widget, field, serialized)
return widget
else:
return item.value
else:
return default
def as_widget_states(self) -> List[WidgetStateProto]:
states = [
self.get_serialized(widget_id)
for widget_id in self.states.keys()
if self.get_serialized(widget_id)
]
states = cast(List[WidgetStateProto], states)
return states
def call_callback(self, widget_id: str) -> None:
metadata = self.widget_metadata.get(widget_id)
assert metadata is not None
callback = metadata.callback
if callback is None:
return
args = metadata.callback_args or ()
kwargs = metadata.callback_kwargs or {}
callback(*args, **kwargs)
def _missing_key_error_message(key: str) -> str:
return (
f'st.session_state has no key "{key}". Did you forget to initialize it? '
f"More info: https://docs.streamlit.io/library/advanced-features/session-state#initialization"
)
def _missing_attr_error_message(attr_name: str) -> str:
return (
f'st.session_state has no attribute "{attr_name}". Did you forget to initialize it? '
f"More info: https://docs.streamlit.io/library/advanced-features/session-state#initialization"
)
@attr.s(auto_attribs=True, slots=True)
class SessionState(MutableMapping[str, Any]):
"""SessionState allows users to store values that persist between app
reruns.
SessionState objects are created lazily when a script accesses
st.session_state.
Example
-------
>>> if "num_script_runs" not in st.session_state:
... st.session_state.num_script_runs = 0
>>> st.session_state.num_script_runs += 1
>>> st.write(st.session_state.num_script_runs) # writes 1
The next time your script runs, the value of
st.session_state.num_script_runs will be preserved.
>>> st.session_state.num_script_runs += 1
>>> st.write(st.session_state.num_script_runs) # writes 2
"""
# All the values from previous script runs, squished together to save memory
_old_state: Dict[str, Any] = attr.Factory(dict)
# Values set in session state during the current script run, possibly for
# setting a widget's value. Keyed by a user provided string.
_new_session_state: Dict[str, Any] = attr.Factory(dict)
# Widget values from the frontend, usually one changing prompted the script rerun
_new_widget_state: WStates = attr.Factory(WStates)
# Keys used for widgets will be eagerly converted to the matching widget id
_key_id_mapping: Dict[str, str] = attr.Factory(dict)
# is it possible for a value to get through this without being deserialized?
def compact_state(self) -> None:
for key_or_wid in self:
self._old_state[key_or_wid] = self[key_or_wid]
self._new_session_state.clear()
self._new_widget_state.clear()
def _compact(self) -> "SessionState":
state: SessionState = self.copy()
state.compact_state()
return state
def clear_state(self) -> None:
self._old_state.clear()
self._new_session_state.clear()
self._new_widget_state.clear()
self._key_id_mapping.clear()
def _safe_widget_state(self) -> Dict[str, Any]:
"""Returns widget states for all widgets with deserializers registered.
On a browser tab reconnect, it's possible for widgets in
self._new_widget_state to not have deserializers registered, which will
result in trying to access them raising a KeyError. This results in
things exploding if we try to naively use the splat operator on
self._new_widget_state in _merged_state below.
"""
wstate = {}
for k in self._new_widget_state.keys():
try:
wstate[k] = self._new_widget_state[k]
except KeyError:
pass
return wstate
@property
def _merged_state(self) -> Dict[str, Any]:
return {k: self[k] for k in self}
@property
def filtered_state(self) -> Dict[str, Any]:
"""The combined session and widget state, excluding keyless widgets."""
wid_key_map = self.reverse_key_wid_map
state: Dict[str, Any] = {}
# We can't write `for k, v in self.items()` here because doing so will
# run into a `KeyError` if widget metadata has been cleared (which
# happens when the streamlit server restarted or the cache was cleared),
# then we receive a widget's state from a browser.
for k in self.keys():
if not is_widget_id(k) and not is_internal_key(k):
state[k] = self[k]
elif is_keyed_widget_id(k):
try:
key = wid_key_map[k]
state[key] = self[k]
except KeyError:
# Widget id no longer maps to a key, it is a not yet
# cleared value in old state for a reset widget
pass
return state
@property
def reverse_key_wid_map(self) -> Dict[str, str]:
wid_key_map = {v: k for k, v in self._key_id_mapping.items()}
return wid_key_map
def keys(self) -> Set[str]: # type: ignore
"""All keys active in Session State, with widget keys converted
to widget ids when one is known."""
old_keys = {self._get_widget_id(k) for k in self._old_state.keys()}
new_widget_keys = set(self._new_widget_state.keys())
new_session_state_keys = {
self._get_widget_id(k) for k in self._new_session_state.keys()
}
return old_keys | new_widget_keys | new_session_state_keys
def is_new_state_value(self, user_key: str) -> bool:
return user_key in self._new_session_state
def is_new_widget_value(self, widget_id: str) -> bool:
return widget_id in self._new_widget_state
def __iter__(self) -> Iterator[Any]:
return iter(self.keys())
def __len__(self) -> int:
return len(self.keys())
def __str__(self):
return str(self._merged_state)
def __getitem__(self, key: str) -> Any:
wid_key_map = self.reverse_key_wid_map
widget_id = self._get_widget_id(key)
if widget_id in wid_key_map and widget_id == key:
# the "key" is a raw widget id, so get its associated user key for lookup
key = wid_key_map[widget_id]
try:
return self._getitem(widget_id, key)
except KeyError:
raise KeyError(_missing_key_error_message(key))
def _getitem(self, widget_id: Optional[str], user_key: Optional[str]) -> Any:
"""Get the value of an entry in Session State, using either the
user-provided key or a widget id as appropriate for the internal dict
being accessed.
At least one of the arguments must have a value."""
assert user_key is not None or widget_id is not None
if user_key is not None:
try:
return self._new_session_state[user_key]
except KeyError:
pass
if widget_id is not None:
try:
return self._new_widget_state[widget_id]
except KeyError:
pass
# Typically, there won't be both a widget id and an associated state key in
# old state at the same time, so the order we check is arbitrary.
# The exception is if session state is set and then a later run has
# a widget created, so the widget id entry should be newer.
# The opposite case shouldn't happen, because setting the value of a widget
# through session state will result in the next widget state reflecting that
# value.
if widget_id is not None:
try:
return self._old_state[widget_id]
except KeyError:
pass
if user_key is not None:
try:
return self._old_state[user_key]
except KeyError:
pass
raise KeyError
def __setitem__(self, user_key: str, value: Any) -> None:
from streamlit.report_thread import get_report_ctx
ctx = get_report_ctx()
if ctx is not None:
widget_id = self._key_id_mapping.get(user_key, None)
widget_ids = ctx.widget_ids_this_run.items()
form_ids = ctx.form_ids_this_run.items()
if widget_id in widget_ids or user_key in form_ids:
raise StreamlitAPIException(
f"`st.session_state.{user_key}` cannot be modified after the widget"
f" with key `{user_key}` is instantiated."
)
self._new_session_state[user_key] = value
def __delitem__(self, key: str) -> None:
widget_id = self._get_widget_id(key)
if not (key in self or widget_id in self):
raise KeyError(_missing_key_error_message(key))
if key in self._new_session_state:
del self._new_session_state[key]
if key in self._old_state:
del self._old_state[key]
if key in self._key_id_mapping:
del self._key_id_mapping[key]
if widget_id in self._new_widget_state:
del self._new_widget_state[widget_id]
if widget_id in self._old_state:
del self._old_state[widget_id]
def update(self, other: "SessionState"): # type: ignore
self._new_session_state.update(other._new_session_state)
self._new_widget_state.update(other._new_widget_state)
self._old_state.update(other._old_state)
self._key_id_mapping.update(other._key_id_mapping)
def set_widgets_from_proto(self, widget_states: WidgetStatesProto):
for state in widget_states.widgets:
self._new_widget_state.set_widget_from_proto(state)
def call_callbacks(self):
from streamlit.script_runner import RerunException
changed_widget_ids = [
wid for wid in self._new_widget_state if self._widget_changed(wid)
]
for wid in changed_widget_ids:
try:
self._new_widget_state.call_callback(wid)
except RerunException:
st.warning(
"Calling st.experimental_rerun() within a callback is a no-op."
)
def _widget_changed(self, widget_id: str) -> bool:
new_value = self._new_widget_state.get(widget_id)
old_value = self._old_state.get(widget_id)
changed: bool = new_value != old_value
return changed
def reset_triggers(self) -> None:
"""Sets all trigger values in our state dictionary to False."""
for state_id in self._new_widget_state:
metadata = self._new_widget_state.widget_metadata.get(state_id)
if metadata is not None:
if metadata.value_type == "trigger_value":
self._new_widget_state[state_id] = Value(False)
for state_id in self._old_state:
metadata = self._new_widget_state.widget_metadata.get(state_id)
if metadata is not None:
if metadata.value_type == "trigger_value":
self._old_state[state_id] = False
def cull_nonexistent(self, widget_ids: Set[str]):
self._new_widget_state.cull_nonexistent(widget_ids)
# Remove entries from _old_state corresponding to
# widgets not in widget_ids.
self._old_state = {
k: v
for k, v in self._old_state.items()
if (k in widget_ids or not is_widget_id(k))
}
def set_metadata(self, widget_metadata: WidgetMetadata) -> None:
widget_id = widget_metadata.id
self._new_widget_state.widget_metadata[widget_id] = widget_metadata
def maybe_set_new_widget_value(
self, widget_id: str, user_key: Optional[str] = None
) -> None:
"""Add the value of a new widget to session state."""
widget_metadata = self._new_widget_state.widget_metadata[widget_id]
deserializer = widget_metadata.deserializer
initial_widget_value = deepcopy(deserializer(None, widget_metadata.id))
if widget_id not in self and (user_key is None or user_key not in self):
# This is the first time this widget is being registered, so we save
# its value in widget state.
self._new_widget_state.set_from_value(widget_id, initial_widget_value)
def should_set_frontend_state_value(
self, widget_id: str, user_key: Optional[str]
) -> bool:
"""Keep widget_state and session_state in sync when a widget is registered.
This method returns whether the frontend needs to be updated with the
new value of this widget.
"""
if user_key is None:
return False
return self.is_new_state_value(user_key)
def get_value_for_registration(self, widget_id: str) -> Any:
"""Get the value of a widget, for use as its return value.
Returns a copy, so reference types can't be accidentally mutated by user code.
"""
value = self[widget_id]
return deepcopy(value)
def as_widget_states(self) -> List[WidgetStateProto]:
return self._new_widget_state.as_widget_states()
def _get_widget_id(self, k: str) -> str:
"""Turns a value that might be a widget id or a user provided key into
an appropriate widget id.
"""
return self._key_id_mapping.get(k, k)
def set_key_widget_mapping(self, widget_id: str, user_key: str) -> None:
self._key_id_mapping[user_key] = widget_id
def copy(self):
return deepcopy(self)
def set_keyed_widget(
self, metadata: WidgetMetadata, widget_id: str, user_key: str
) -> None:
self.set_metadata(metadata)
self.set_key_widget_mapping(widget_id, user_key)
self.maybe_set_new_widget_value(widget_id, user_key)
def set_unkeyed_widget(self, metadata: WidgetMetadata, widget_id: str) -> None:
self.set_metadata(metadata)
self.maybe_set_new_widget_value(widget_id)
def get_metadata_by_key(self, user_key: str) -> WidgetMetadata:
widget_id = self._key_id_mapping[user_key]
return self._new_widget_state.widget_metadata[widget_id]
def get_stats(self) -> List[CacheStat]:
stat = CacheStat("st_session_state", "", asizeof(self))
return [stat]
def is_widget_id(key: str) -> bool:
return key.startswith(GENERATED_WIDGET_KEY_PREFIX)
# TODO: It would be better to make key vs not visible through more principled means
def is_keyed_widget_id(key: str) -> bool:
return is_widget_id(key) and not key.endswith("-None")
def is_internal_key(key: str) -> bool:
return key.startswith(STREAMLIT_INTERNAL_KEY_PREFIX)
_state_use_warning_already_displayed = False
def get_session_state() -> SessionState:
"""Get the SessionState object for the current session.
Note that in streamlit scripts, this function should not be called
directly. Instead, SessionState objects should be accessed via
st.session_state.
"""
global _state_use_warning_already_displayed
from streamlit.report_thread import get_report_ctx
ctx = get_report_ctx()
# If there is no report context because the script is run bare, have
# session state act as an always empty dictionary, and print a warning.
if ctx is None:
if not _state_use_warning_already_displayed:
_state_use_warning_already_displayed = True
if not st._is_running_with_streamlit:
logger.warning(
"Session state does not function when running a script without `streamlit run`"
)
return SessionState()
return ctx.session_state
class LazySessionState(MutableMapping[str, Any]):
"""A lazy wrapper around SessionState.
SessionState can't be instantiated normally in lib/streamlit/__init__.py
because there may not be a ReportSession yet. Instead we have this wrapper,
which delegates to the SessionState for the active ReportSession. This will
only be interacted within an app script, that is, when a ReportSession is
guaranteed to exist.
"""
def _validate_key(self, key) -> None:
if key.startswith(GENERATED_WIDGET_KEY_PREFIX):
raise StreamlitAPIException(
f"Keys beginning with {GENERATED_WIDGET_KEY_PREFIX} are reserved."
)
def __iter__(self) -> Iterator[Any]:
state = get_session_state()
return iter(state.filtered_state)
def __len__(self) -> int:
state = get_session_state()
return len(state.filtered_state)
def __str__(self):
state = get_session_state()
return str(state.filtered_state)
def __getitem__(self, key: Key) -> Any:
key = str(key)
self._validate_key(key)
state = get_session_state()
return state[key]
def __setitem__(self, key: Key, value: Any) -> None:
key = str(key)
self._validate_key(key)
state = get_session_state()
state[key] = value
def __delitem__(self, key: Key) -> None:
key = str(key)
self._validate_key(key)
state = get_session_state()
del state[key]
def __getattr__(self, key: str) -> Any:
self._validate_key(key)
try:
return self[key]
except KeyError:
raise AttributeError(_missing_attr_error_message(key))
def __setattr__(self, key: str, value: Any) -> None:
self._validate_key(key)
self[key] = value
def __delattr__(self, key: str) -> None:
self._validate_key(key)
try:
del self[key]
except KeyError:
raise AttributeError(_missing_attr_error_message(key))
def to_dict(self) -> Dict[str, Any]:
state = get_session_state()
return state.filtered_state
@attr.s(auto_attribs=True, slots=True)
class SessionStateStatProvider(CacheStatsProvider):
_session_info_by_id: Dict[str, "SessionInfo"]
def get_stats(self) -> List[CacheStat]:
stats: List[CacheStat] = []
for session_info in self._session_info_by_id.values():
session_state = session_info.session.session_state
stats.extend(session_state.get_stats())
return stats
| 35.619048 | 102 | 0.643953 |
f0dfd3feffa7b91535ff1745d77defee40f816de
| 2,342 |
py
|
Python
|
scrapy/contrib/downloadermiddleware/robotstxt.py
|
emschorsch/scrapy
|
acb7bad1ff4037b4a613ac94e2d3357bf92bdb8f
|
[
"BSD-3-Clause"
] | 1 |
2015-04-01T20:02:08.000Z
|
2015-04-01T20:02:08.000Z
|
scrapy/contrib/downloadermiddleware/robotstxt.py
|
emschorsch/scrapy
|
acb7bad1ff4037b4a613ac94e2d3357bf92bdb8f
|
[
"BSD-3-Clause"
] | 2 |
2021-12-13T20:51:32.000Z
|
2022-02-11T03:47:35.000Z
|
scrapy/contrib/downloadermiddleware/robotstxt.py
|
emschorsch/scrapy
|
acb7bad1ff4037b4a613ac94e2d3357bf92bdb8f
|
[
"BSD-3-Clause"
] | 1 |
2017-11-09T20:33:59.000Z
|
2017-11-09T20:33:59.000Z
|
"""
This is a middleware to respect robots.txt policies. To activate it you must
enable this middleware and enable the ROBOTSTXT_OBEY setting.
"""
import robotparser
from scrapy import signals, log
from scrapy.exceptions import NotConfigured, IgnoreRequest
from scrapy.http import Request
from scrapy.utils.httpobj import urlparse_cached
class RobotsTxtMiddleware(object):
DOWNLOAD_PRIORITY = 1000
def __init__(self, crawler):
if not crawler.settings.getbool('ROBOTSTXT_OBEY'):
raise NotConfigured
self.crawler = crawler
self._parsers = {}
self._spider_netlocs = {}
self._useragents = {}
crawler.signals.connect(self.spider_opened, signals.spider_opened)
crawler.signals.connect(self.spider_closed, signals.spider_closed)
@classmethod
def from_crawler(cls, crawler):
return cls(crawler)
def process_request(self, request, spider):
useragent = self._useragents[spider]
rp = self.robot_parser(request, spider)
if rp and not rp.can_fetch(useragent, request.url):
log.msg(format="Forbidden by robots.txt: %(request)s",
level=log.DEBUG, request=request)
raise IgnoreRequest
def robot_parser(self, request, spider):
url = urlparse_cached(request)
netloc = url.netloc
if netloc not in self._parsers:
self._parsers[netloc] = None
robotsurl = "%s://%s/robots.txt" % (url.scheme, url.netloc)
robotsreq = Request(robotsurl, priority=self.DOWNLOAD_PRIORITY)
dfd = self.crawler.engine.download(robotsreq, spider)
dfd.addCallback(self._parse_robots)
self._spider_netlocs[spider].add(netloc)
return self._parsers[netloc]
def _parse_robots(self, response):
rp = robotparser.RobotFileParser(response.url)
rp.parse(response.body.splitlines())
self._parsers[urlparse_cached(response).netloc] = rp
def spider_opened(self, spider):
self._spider_netlocs[spider] = set()
self._useragents[spider] = spider.settings['USER_AGENT']
def spider_closed(self, spider):
for netloc in self._spider_netlocs[spider]:
del self._parsers[netloc]
del self._spider_netlocs[spider]
del self._useragents[spider]
| 35.484848 | 76 | 0.675064 |
37c3a37dcfcc9d2963b4bf2087501212fb7994c0
| 3,544 |
py
|
Python
|
desktop/core/ext-py/Django-1.11/tests/managers_regress/models.py
|
kokosing/hue
|
2307f5379a35aae9be871e836432e6f45138b3d9
|
[
"Apache-2.0"
] | 5,079 |
2015-01-01T03:39:46.000Z
|
2022-03-31T07:38:22.000Z
|
tests/managers_regress/models.py
|
287977288/test
|
142e3626ab3c676574631383ae6b5a4eced5a10e
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 1,623 |
2015-01-01T08:06:24.000Z
|
2022-03-30T19:48:52.000Z
|
tests/managers_regress/models.py
|
287977288/test
|
142e3626ab3c676574631383ae6b5a4eced5a10e
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 2,033 |
2015-01-04T07:18:02.000Z
|
2022-03-28T19:55:47.000Z
|
"""
Various edge-cases for model managers.
"""
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.encoding import force_text, python_2_unicode_compatible
class OnlyFred(models.Manager):
def get_queryset(self):
return super(OnlyFred, self).get_queryset().filter(name='fred')
class OnlyBarney(models.Manager):
def get_queryset(self):
return super(OnlyBarney, self).get_queryset().filter(name='barney')
class Value42(models.Manager):
def get_queryset(self):
return super(Value42, self).get_queryset().filter(value=42)
class AbstractBase1(models.Model):
name = models.CharField(max_length=50)
class Meta:
abstract = True
# Custom managers
manager1 = OnlyFred()
manager2 = OnlyBarney()
objects = models.Manager()
class AbstractBase2(models.Model):
value = models.IntegerField()
class Meta:
abstract = True
# Custom manager
restricted = Value42()
# No custom manager on this class to make sure the default case doesn't break.
class AbstractBase3(models.Model):
comment = models.CharField(max_length=50)
class Meta:
abstract = True
@python_2_unicode_compatible
class Parent(models.Model):
name = models.CharField(max_length=50)
manager = OnlyFred()
def __str__(self):
return self.name
# Managers from base classes are inherited and, if no manager is specified
# *and* the parent has a manager specified, the first one (in the MRO) will
# become the default.
@python_2_unicode_compatible
class Child1(AbstractBase1):
data = models.CharField(max_length=25)
def __str__(self):
return self.data
@python_2_unicode_compatible
class Child2(AbstractBase1, AbstractBase2):
data = models.CharField(max_length=25)
def __str__(self):
return self.data
@python_2_unicode_compatible
class Child3(AbstractBase1, AbstractBase3):
data = models.CharField(max_length=25)
def __str__(self):
return self.data
@python_2_unicode_compatible
class Child4(AbstractBase1):
data = models.CharField(max_length=25)
# Should be the default manager, although the parent managers are
# inherited.
default = models.Manager()
def __str__(self):
return self.data
@python_2_unicode_compatible
class Child5(AbstractBase3):
name = models.CharField(max_length=25)
default = OnlyFred()
objects = models.Manager()
def __str__(self):
return self.name
class Child6(Child4):
value = models.IntegerField()
class Meta:
manager_inheritance_from_future = True
class Child7(Parent):
objects = models.Manager()
# RelatedManagers
@python_2_unicode_compatible
class RelatedModel(models.Model):
test_gfk = GenericRelation('RelationModel', content_type_field='gfk_ctype', object_id_field='gfk_id')
exact = models.NullBooleanField()
def __str__(self):
return force_text(self.pk)
@python_2_unicode_compatible
class RelationModel(models.Model):
fk = models.ForeignKey(RelatedModel, models.CASCADE, related_name='test_fk')
m2m = models.ManyToManyField(RelatedModel, related_name='test_m2m')
gfk_ctype = models.ForeignKey(ContentType, models.SET_NULL, null=True)
gfk_id = models.IntegerField(null=True)
gfk = GenericForeignKey(ct_field='gfk_ctype', fk_field='gfk_id')
def __str__(self):
return force_text(self.pk)
| 23.470199 | 105 | 0.723758 |
f42e9ecf6eb650886b8a1107314c2747b0bcf0b5
| 18,060 |
py
|
Python
|
pcdet/models/detectors/detector3d_template.py
|
penghao1990/SGNet
|
99b9126a4b8ae85ba258a4150cc756ac73a7b7fb
|
[
"Apache-2.0"
] | 1 |
2021-12-30T10:12:38.000Z
|
2021-12-30T10:12:38.000Z
|
pcdet/models/detectors/detector3d_template.py
|
penghao1990/SGNet
|
99b9126a4b8ae85ba258a4150cc756ac73a7b7fb
|
[
"Apache-2.0"
] | null | null | null |
pcdet/models/detectors/detector3d_template.py
|
penghao1990/SGNet
|
99b9126a4b8ae85ba258a4150cc756ac73a7b7fb
|
[
"Apache-2.0"
] | null | null | null |
import os
import torch
import torch.nn as nn
from ...ops.iou3d_nms import iou3d_nms_utils
from .. import backbones_2d, backbones_3d, dense_heads, roi_heads
from ..backbones_2d import map_to_bev
from ..backbones_3d import pfe, vfe
from ..model_utils import model_nms_utils
class Detector3DTemplate(nn.Module):
def __init__(self, model_cfg, num_class, dataset):
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class
self.dataset = dataset
self.class_names = dataset.class_names
self.register_buffer('global_step', torch.LongTensor(1).zero_())
self.module_topology = [
'vfe', 'backbone_3d', 'map_to_bev_module', 'pfe',
'backbone_2d', 'dense_head', 'point_head', 'roi_head'
]
@property
def mode(self):
return 'TRAIN' if self.training else 'TEST'
def update_global_step(self):
self.global_step += 1
def build_networks(self):
model_info_dict = {
'module_list': [],
'num_rawpoint_features': self.dataset.point_feature_encoder.num_point_features,
'num_point_features': self.dataset.point_feature_encoder.num_point_features,
'grid_size': self.dataset.grid_size,
'point_cloud_range': self.dataset.point_cloud_range,
'voxel_size': self.dataset.voxel_size,
'depth_downsample_factor': self.dataset.depth_downsample_factor
}
for module_name in self.module_topology:
module, model_info_dict = getattr(self, 'build_%s' % module_name)(
model_info_dict=model_info_dict
)
self.add_module(module_name, module)
return model_info_dict['module_list']
def build_vfe(self, model_info_dict):
if self.model_cfg.get('VFE', None) is None:
return None, model_info_dict
vfe_module = vfe.__all__[self.model_cfg.VFE.NAME](
model_cfg=self.model_cfg.VFE,
num_point_features=model_info_dict['num_rawpoint_features'],
point_cloud_range=model_info_dict['point_cloud_range'],
voxel_size=model_info_dict['voxel_size'],
grid_size=model_info_dict['grid_size'],
depth_downsample_factor=model_info_dict['depth_downsample_factor']
)
model_info_dict['num_point_features'] = vfe_module.get_output_feature_dim()
model_info_dict['module_list'].append(vfe_module)
return vfe_module, model_info_dict
def build_backbone_3d(self, model_info_dict):
if self.model_cfg.get('BACKBONE_3D', None) is None:
return None, model_info_dict
backbone_3d_module = backbones_3d.__all__[self.model_cfg.BACKBONE_3D.NAME](
model_cfg=self.model_cfg.BACKBONE_3D,
input_channels=model_info_dict['num_point_features'],
grid_size=model_info_dict['grid_size'],
voxel_size=model_info_dict['voxel_size'],
point_cloud_range=model_info_dict['point_cloud_range']
)
model_info_dict['module_list'].append(backbone_3d_module)
model_info_dict['num_point_features'] = backbone_3d_module.num_point_features
model_info_dict['backbone_channels'] = backbone_3d_module.backbone_channels \
if hasattr(backbone_3d_module, 'backbone_channels') else None
return backbone_3d_module, model_info_dict
def build_map_to_bev_module(self, model_info_dict):
if self.model_cfg.get('MAP_TO_BEV', None) is None:
return None, model_info_dict
map_to_bev_module = map_to_bev.__all__[self.model_cfg.MAP_TO_BEV.NAME](
model_cfg=self.model_cfg.MAP_TO_BEV,
grid_size=model_info_dict['grid_size']
)
model_info_dict['module_list'].append(map_to_bev_module)
model_info_dict['num_bev_features'] = map_to_bev_module.num_bev_features
return map_to_bev_module, model_info_dict
def build_backbone_2d(self, model_info_dict):
if self.model_cfg.get('BACKBONE_2D', None) is None:
return None, model_info_dict
backbone_2d_module = backbones_2d.__all__[self.model_cfg.BACKBONE_2D.NAME](
model_cfg=self.model_cfg.BACKBONE_2D,
input_channels=model_info_dict['num_bev_features']
)
model_info_dict['module_list'].append(backbone_2d_module)
model_info_dict['num_bev_features'] = backbone_2d_module.num_bev_features
return backbone_2d_module, model_info_dict
def build_pfe(self, model_info_dict):
if self.model_cfg.get('PFE', None) is None:
return None, model_info_dict
pfe_module = pfe.__all__[self.model_cfg.PFE.NAME](
model_cfg=self.model_cfg.PFE,
voxel_size=model_info_dict['voxel_size'],
point_cloud_range=model_info_dict['point_cloud_range'],
num_bev_features=model_info_dict['num_bev_features'],
num_rawpoint_features=model_info_dict['num_rawpoint_features']
)
model_info_dict['module_list'].append(pfe_module)
model_info_dict['num_point_features'] = pfe_module.num_point_features
model_info_dict['num_point_features_before_fusion'] = pfe_module.num_point_features_before_fusion
return pfe_module, model_info_dict
def build_dense_head(self, model_info_dict):
if self.model_cfg.get('DENSE_HEAD', None) is None:
return None, model_info_dict
dense_head_module = dense_heads.__all__[self.model_cfg.DENSE_HEAD.NAME](
model_cfg=self.model_cfg.DENSE_HEAD,
input_channels=model_info_dict['num_bev_features'],
num_class=self.num_class if not self.model_cfg.DENSE_HEAD.CLASS_AGNOSTIC else 1,
class_names=self.class_names,
grid_size=model_info_dict['grid_size'],
point_cloud_range=model_info_dict['point_cloud_range'],
predict_boxes_when_training=self.model_cfg.get('ROI_HEAD', False)
)
model_info_dict['module_list'].append(dense_head_module)
return dense_head_module, model_info_dict
def build_point_head(self, model_info_dict):
if self.model_cfg.get('POINT_HEAD', None) is None:
return None, model_info_dict
if self.model_cfg.POINT_HEAD.get('USE_POINT_FEATURES_BEFORE_FUSION', False):
num_point_features = model_info_dict['num_point_features_before_fusion']
else:
num_point_features = model_info_dict['num_point_features']
point_head_module = dense_heads.__all__[self.model_cfg.POINT_HEAD.NAME](
voxel_size=self.dataset.voxel_size,
point_cloud_range=self.dataset.point_cloud_range,
backbone_channels=model_info_dict['backbone_channels'],
model_cfg=self.model_cfg.POINT_HEAD,
input_channels=num_point_features,
num_class=self.num_class if not self.model_cfg.POINT_HEAD.CLASS_AGNOSTIC else 1,
predict_boxes_when_training=self.model_cfg.get('ROI_HEAD', False)
)
model_info_dict['module_list'].append(point_head_module)
return point_head_module, model_info_dict
def build_roi_head(self, model_info_dict):
if self.model_cfg.get('ROI_HEAD', None) is None:
return None, model_info_dict
point_head_module = roi_heads.__all__[self.model_cfg.ROI_HEAD.NAME](
model_cfg=self.model_cfg.ROI_HEAD,
input_channels=model_info_dict['num_point_features'],
backbone_channels=model_info_dict['backbone_channels'],
point_cloud_range=model_info_dict['point_cloud_range'],
voxel_size=model_info_dict['voxel_size'],
num_class=self.num_class if not self.model_cfg.ROI_HEAD.CLASS_AGNOSTIC else 1,
)
model_info_dict['module_list'].append(point_head_module)
return point_head_module, model_info_dict
def forward(self, **kwargs):
raise NotImplementedError
def post_processing(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
batch_cls_preds: (B, num_boxes, num_classes | 1) or (N1+N2+..., num_classes | 1)
or [(B, num_boxes, num_class1), (B, num_boxes, num_class2) ...]
multihead_label_mapping: [(num_class1), (num_class2), ...]
batch_box_preds: (B, num_boxes, 7+C) or (N1+N2+..., 7+C)
cls_preds_normalized: indicate whether batch_cls_preds is normalized
batch_index: optional (N1+N2+...)
has_class_labels: True/False
roi_labels: (B, num_rois) 1 .. num_classes
batch_pred_labels: (B, num_boxes, 1)
Returns:
"""
post_process_cfg = self.model_cfg.POST_PROCESSING
batch_size = batch_dict['batch_size']
recall_dict = {}
pred_dicts = []
for index in range(batch_size):
if batch_dict.get('batch_index', None) is not None:
assert batch_dict['batch_box_preds'].shape.__len__() == 2
batch_mask = (batch_dict['batch_index'] == index)
else:
assert batch_dict['batch_box_preds'].shape.__len__() == 3
batch_mask = index
box_preds = batch_dict['batch_box_preds'][batch_mask]
src_box_preds = box_preds
if not isinstance(batch_dict['batch_cls_preds'], list):
cls_preds = batch_dict['batch_cls_preds'][batch_mask]
src_cls_preds = cls_preds
assert cls_preds.shape[1] in [1, self.num_class]
if not batch_dict['cls_preds_normalized']:
cls_preds = torch.sigmoid(cls_preds)
else:
cls_preds = [x[batch_mask] for x in batch_dict['batch_cls_preds']]
src_cls_preds = cls_preds
if not batch_dict['cls_preds_normalized']:
cls_preds = [torch.sigmoid(x) for x in cls_preds]
if post_process_cfg.NMS_CONFIG.MULTI_CLASSES_NMS:
if not isinstance(cls_preds, list):
cls_preds = [cls_preds]
multihead_label_mapping = [torch.arange(1, self.num_class, device=cls_preds[0].device)]
else:
multihead_label_mapping = batch_dict['multihead_label_mapping']
cur_start_idx = 0
pred_scores, pred_labels, pred_boxes = [], [], []
for cur_cls_preds, cur_label_mapping in zip(cls_preds, multihead_label_mapping):
assert cur_cls_preds.shape[1] == len(cur_label_mapping)
cur_box_preds = box_preds[cur_start_idx: cur_start_idx + cur_cls_preds.shape[0]]
cur_pred_scores, cur_pred_labels, cur_pred_boxes = model_nms_utils.multi_classes_nms(
cls_scores=cur_cls_preds, box_preds=cur_box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
cur_pred_labels = cur_label_mapping[cur_pred_labels]
pred_scores.append(cur_pred_scores)
pred_labels.append(cur_pred_labels)
pred_boxes.append(cur_pred_boxes)
cur_start_idx += cur_cls_preds.shape[0]
final_scores = torch.cat(pred_scores, dim=0)
final_labels = torch.cat(pred_labels, dim=0)
final_boxes = torch.cat(pred_boxes, dim=0)
else:
cls_preds, label_preds = torch.max(cls_preds, dim=-1)
if batch_dict.get('has_class_labels', False):
label_key = 'roi_labels' if 'roi_labels' in batch_dict else 'batch_pred_labels'
label_preds = batch_dict[label_key][index]
else:
label_preds = label_preds + 1
# selected, selected_scores = model_nms_utils.class_agnostic_nms(
# box_scores=cls_preds, box_preds=box_preds,
# nms_config=post_process_cfg.NMS_CONFIG,
# score_thresh=post_process_cfg.SCORE_THRESH
# )
selected, selected_scores = model_nms_utils.multi_thresh(
box_scores=cls_preds, box_labels=label_preds, box_preds=box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
if post_process_cfg.OUTPUT_RAW_SCORE:
max_cls_preds, _ = torch.max(src_cls_preds, dim=-1)
selected_scores = max_cls_preds[selected]
final_scores = selected_scores
final_labels = label_preds[selected]
final_boxes = box_preds[selected]
recall_dict = self.generate_recall_record(
box_preds=final_boxes if 'rois' not in batch_dict else src_box_preds,
recall_dict=recall_dict, batch_index=index, data_dict=batch_dict,
thresh_list=post_process_cfg.RECALL_THRESH_LIST
)
record_dict = {
'pred_boxes': final_boxes,
'pred_scores': final_scores,
'pred_labels': final_labels
}
pred_dicts.append(record_dict)
return pred_dicts, recall_dict
@staticmethod
def generate_recall_record(box_preds, recall_dict, batch_index, data_dict=None, thresh_list=None):
if 'gt_boxes' not in data_dict:
return recall_dict
rois = data_dict['rois'][batch_index] if 'rois' in data_dict else None
gt_boxes = data_dict['gt_boxes'][batch_index]
if recall_dict.__len__() == 0:
recall_dict = {'gt': 0}
for cur_thresh in thresh_list:
recall_dict['roi_%s' % (str(cur_thresh))] = 0
recall_dict['rcnn_%s' % (str(cur_thresh))] = 0
cur_gt = gt_boxes
k = cur_gt.__len__() - 1
while k > 0 and cur_gt[k].sum() == 0:
k -= 1
cur_gt = cur_gt[:k + 1]
if cur_gt.shape[0] > 0:
if box_preds.shape[0] > 0:
iou3d_rcnn = iou3d_nms_utils.boxes_iou3d_gpu(box_preds[:, 0:7], cur_gt[:, 0:7])
else:
iou3d_rcnn = torch.zeros((0, cur_gt.shape[0]))
if rois is not None:
iou3d_roi = iou3d_nms_utils.boxes_iou3d_gpu(rois[:, 0:7], cur_gt[:, 0:7])
for cur_thresh in thresh_list:
if iou3d_rcnn.shape[0] == 0:
recall_dict['rcnn_%s' % str(cur_thresh)] += 0
else:
rcnn_recalled = (iou3d_rcnn.max(dim=0)[0] > cur_thresh).sum().item()
recall_dict['rcnn_%s' % str(cur_thresh)] += rcnn_recalled
if rois is not None:
roi_recalled = (iou3d_roi.max(dim=0)[0] > cur_thresh).sum().item()
recall_dict['roi_%s' % str(cur_thresh)] += roi_recalled
recall_dict['gt'] += cur_gt.shape[0]
else:
gt_iou = box_preds.new_zeros(box_preds.shape[0])
return recall_dict
def load_params_from_file(self, filename, logger, to_cpu=False):
if not os.path.isfile(filename):
raise FileNotFoundError
logger.info('==> Loading parameters from checkpoint %s to %s' % (filename, 'CPU' if to_cpu else 'GPU'))
loc_type = torch.device('cpu') if to_cpu else None
checkpoint = torch.load(filename, map_location=loc_type)
model_state_disk = checkpoint['model_state']
if 'version' in checkpoint:
logger.info('==> Checkpoint trained from version: %s' % checkpoint['version'])
update_model_state = {}
for key, val in model_state_disk.items():
if key in self.state_dict() and self.state_dict()[key].shape == model_state_disk[key].shape:
update_model_state[key] = val
logger.info('Update weight %s: %s' % (key, str(val.shape)))
state_dict = self.state_dict()
state_dict.update(update_model_state)
self.load_state_dict(state_dict)
for key in state_dict:
if key not in update_model_state:
logger.info('Not updated weight %s: %s' % (key, str(state_dict[key].shape)))
logger.info('==> Done (loaded %d/%d)' % (len(update_model_state), len(self.state_dict())))
def load_params_with_optimizer(self, filename, to_cpu=False, optimizer=None, logger=None):
if not os.path.isfile(filename):
raise FileNotFoundError
logger.info('==> Loading parameters from checkpoint %s to %s' % (filename, 'CPU' if to_cpu else 'GPU'))
loc_type = torch.device('cpu') if to_cpu else None
checkpoint = torch.load(filename, map_location=loc_type)
epoch = checkpoint.get('epoch', -1)
it = checkpoint.get('it', 0.0)
self.load_state_dict(checkpoint['model_state'])
if optimizer is not None:
if 'optimizer_state' in checkpoint and checkpoint['optimizer_state'] is not None:
logger.info('==> Loading optimizer parameters from checkpoint %s to %s'
% (filename, 'CPU' if to_cpu else 'GPU'))
optimizer.load_state_dict(checkpoint['optimizer_state'])
else:
assert filename[-4] == '.', filename
src_file, ext = filename[:-4], filename[-3:]
optimizer_filename = '%s_optim.%s' % (src_file, ext)
if os.path.exists(optimizer_filename):
optimizer_ckpt = torch.load(optimizer_filename, map_location=loc_type)
optimizer.load_state_dict(optimizer_ckpt['optimizer_state'])
if 'version' in checkpoint:
print('==> Checkpoint trained from version: %s' % checkpoint['version'])
logger.info('==> Done')
return it, epoch
| 45.837563 | 111 | 0.629568 |
dc51dd62be5048150d351425bc4af70a18437739
| 476 |
py
|
Python
|
424.py
|
RafaelHuang87/Leet-Code-Practice
|
7754dcee38ffda18a5759113ef06d7becf4fe728
|
[
"MIT"
] | null | null | null |
424.py
|
RafaelHuang87/Leet-Code-Practice
|
7754dcee38ffda18a5759113ef06d7becf4fe728
|
[
"MIT"
] | null | null | null |
424.py
|
RafaelHuang87/Leet-Code-Practice
|
7754dcee38ffda18a5759113ef06d7becf4fe728
|
[
"MIT"
] | null | null | null |
class Solution:
def characterReplacement(self, s: str, k: int) -> int:
import collections
count = collections.Counter()
res = 0
start = 0
for i, char in enumerate(s):
count[char] += 1
maxCnt = count.most_common(1)[0][1]
while i - start + 1 - maxCnt > k:
count[s[start]] = count[s[start]] - 1
start += 1
res = max(res, i - start + 1)
return res
| 31.733333 | 58 | 0.481092 |
7d8006677b1c56389465c56214c4f24a29c11834
| 3,187 |
py
|
Python
|
app.py
|
raynoldng/dialogflow-demo2
|
f63deeff2b8d9950432596879cd95812127baedc
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
raynoldng/dialogflow-demo2
|
f63deeff2b8d9950432596879cd95812127baedc
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
raynoldng/dialogflow-demo2
|
f63deeff2b8d9950432596879cd95812127baedc
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import urllib
import urllib.request
import json
from dateutil import parser
from datetime import datetime, timezone
import datetime
import os
from flask import Flask
from flask import request
from flask import make_response
# Flask app should start in global layout
app = Flask(__name__)
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
print("Request:")
print(json.dumps(req, indent=4))
res = makeWebhookResult(req)
res = json.dumps(res, indent=4)
print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def makeWebhookResult(req):
if req.get("result").get("action") == "shipping.cost":
return shippingCost(req)
elif req.get("result").get("action") == "bus.eta":
return busETA(req)
def busETA(req):
if req.get("result").get("action") != "bus.eta":
return {}
result = req.get("result")
parameters = result.get("parameters")
busStopId = parameters.get("busStopId")
busNo = parameters.get("busNo")
print("result", result)
APIURL = "https://arrivelah.herokuapp.com/?id="
url = APIURL + str(busStopId)
print("api url:", url)
res = urllib.request.urlopen(APIURL + str(busStopId)).read()
# res = requests.get('http://example.com').content
print("res_read:", res)
resJSON = json.loads(res)
print("api response;", resJSON)
busRowResult = [s for s in resJSON["services"] if s["no"] == str(busNo)]
assert(len(busRowResult) == 1)
bus = busRowResult[0]
# now = datetime.now(timezone.utc)
currTime = datetime.datetime.now(timezone.utc)
nextTime = parser.parse(bus["next"]["time"])
subTime = parser.parse(bus["subsequent"]["time"])
# parser.parse("2017-12-11T12:07:41+08:00")
print("times:", currTime, nextTime, subTime)
nextDelta = nextTime - currTime
subDelta = subTime - currTime
print(nextDelta, subDelta)
eta = (nextDelta.seconds // 60, subDelta.seconds // 60)
speech = "Eta of bus " + str(busNo) + " at stop " + str(busStopId) + " is " + str(eta[0]) \
+ " and " + str(eta[1]) + " minutes"
print("returning as speech:", speech)
return {
"speech": speech,
"displayText": speech,
#"data": {},
# "contextOut": [],
"source": "apiai-onlinestore-shipping"
}
def shippingCost(req):
if req.get("result").get("action") != "shipping.cost":
return {}
result = req.get("result")
parameters = result.get("parameters")
zone = parameters.get("shipping-zone")
cost = {'Europe':100, 'North America':200, 'South America':300, 'Asia':400, 'Africa':500}
speech = "The cost of shipping to " + zone + " is " + str(cost[zone]) + " euros."
print("Response:")
print(speech)
return {
"speech": speech,
"displayText": speech,
#"data": {},
# "contextOut": [],
"source": "apiai-onlinestore-shipping"
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print("Starting app on port %d" % port)
app.run(debug=True, port=port, host='0.0.0.0')
| 28.455357 | 95 | 0.618764 |
df6f645aa2a8972ad82d96f3ddf57a7a333e9ff5
| 4,949 |
py
|
Python
|
odeo/models/cash.py
|
odeoteknologi/odeo-python-sdk
|
31a0b21a8b23d887fef16b09e1293091e520a23e
|
[
"MIT"
] | null | null | null |
odeo/models/cash.py
|
odeoteknologi/odeo-python-sdk
|
31a0b21a8b23d887fef16b09e1293091e520a23e
|
[
"MIT"
] | null | null | null |
odeo/models/cash.py
|
odeoteknologi/odeo-python-sdk
|
31a0b21a8b23d887fef16b09e1293091e520a23e
|
[
"MIT"
] | null | null | null |
import dataclasses
from dataclasses import dataclass
from datetime import datetime
@dataclass
class Cash:
amount: int
currency: str
formatted_amount: str
@classmethod
def from_json(cls, json: dict):
"""Convert from JSON dictionary to :class:`Cash` object"""
return cls(
amount=json.get('amount'),
currency=json.get('currency'),
formatted_amount=json.get('formatted_amount')
)
@dataclass
class Balance:
cash: Cash
locked_cash: Cash
@classmethod
def from_json(cls, json: dict):
"""Convert from JSON dictionary to :class:`Balance` object"""
return cls(
cash=Cash.from_json(json.get('cash')),
locked_cash=Cash.from_json(json.get('locked_cash'))
)
@dataclass
class _BaseRequest:
receiver_user_id: int
amount: int
reference_id: str
@dataclass
class _DefaultRequest:
sender_user_id: int = None
note: str = None
@dataclass
class Request(_DefaultRequest, _BaseRequest):
def to_dict(self):
"""Convert :class:`Request` object to dictionary data type"""
return dataclasses.asdict(self)
@dataclass
class Channel:
fee: int
channel_id: int
pay_code: str
amount: int
total: int
@classmethod
def from_json(cls, json: dict):
"""Convert from JSON dictionary to :class:`Channel`"""
return cls(
fee=int(json.get('fee')),
channel_id=json.get('channel_id'),
pay_code=json.get('pay_code'),
amount=json.get('amount'),
total=json.get('total')
)
@dataclass
class Topup:
channels: list[Channel]
topup_id: str
expires_at: datetime
@classmethod
def from_json(cls, json: dict):
"""Convert from JSON dictionary to :class:`Topup`"""
expires_at = json.get('expires_at')
if expires_at is not None:
expires_at = datetime.utcfromtimestamp(float(expires_at))
return cls(
channels=list(map(lambda c: Channel.from_json(c), json.get('channels'))),
topup_id=json.get('topup_id'),
expires_at=expires_at
)
@dataclass
class CashTransaction:
cash_transaction_id: str
user_id: str
amount: int
balance_before: int
balance_after: int
transaction_type: str
created_at: datetime
@classmethod
def from_json(cls, json: dict):
"""Convert from JSON dictionary to :class:`CashTransaction`"""
created_at = json.get('created_at')
if created_at is not None:
created_at = datetime.utcfromtimestamp(float(created_at))
return cls(
cash_transaction_id=json.get('cash_transaction_id'),
user_id=json.get('user_id'),
amount=json.get('amount'),
balance_before=json.get('balance_before'),
balance_after=json.get('balance_after'),
transaction_type=json.get('transaction_type'),
created_at=created_at
)
@dataclass
class TransactionsHistory:
cash_transactions: list[CashTransaction]
next_page_token: str = None
@classmethod
def from_json(cls, json: dict):
"""Convert from JSON dictionary to :class:`TransactionHistory`"""
if 'cash_transactions' in json:
cash_transactions = list(
map(lambda c: CashTransaction.from_json(c), json.get('cash_transactions'))
)
return cls(
cash_transactions=cash_transactions,
next_page_token=json.get('next_page_token') if 'next_page_token' in json else None)
@dataclass
class _BaseTransfer:
transfer_id: str
created_at: datetime
@dataclass
class Transfer(_DefaultRequest, _BaseRequest, _BaseTransfer):
@classmethod
def from_json(cls, json: dict):
"""Convert from JSON dictionary to :class:`Transfer`"""
created_at = json.get('created_at')
if created_at is not None:
created_at = datetime.utcfromtimestamp(float(created_at))
return cls(
transfer_id=json.get('transfer_id'),
sender_user_id=int(json.get('sender_user_id')),
receiver_user_id=int(json.get('receiver_user_id')),
amount=json.get('amount'),
reference_id=json.get('reference_id'),
note=json.get('note'),
created_at=created_at
)
@dataclass
class TransfersList:
transfers: list[Transfer]
next_page_token: str = None
@classmethod
def from_json(cls, json: dict):
"""Convert from JSON dictionary to :class:`TransfersList`"""
if 'transfers' in json:
transfers = list(map(lambda t: Transfer.from_json(t), json.get('transfers')))
return cls(
transfers=transfers,
next_page_token=json.get('next_page_token') if 'next_page_token' in json else None
)
| 25.510309 | 99 | 0.625985 |
76497f552091e53f84f8a206abce70f4e1833227
| 2,186 |
py
|
Python
|
Chapter03/gpu_mandelbrot_lionel.py
|
liopeer/Hands-On-GPU-Programming-with-Python-and-CUDA
|
a4bedd805de56bb9509f878588a23618136fa502
|
[
"MIT"
] | null | null | null |
Chapter03/gpu_mandelbrot_lionel.py
|
liopeer/Hands-On-GPU-Programming-with-Python-and-CUDA
|
a4bedd805de56bb9509f878588a23618136fa502
|
[
"MIT"
] | null | null | null |
Chapter03/gpu_mandelbrot_lionel.py
|
liopeer/Hands-On-GPU-Programming-with-Python-and-CUDA
|
a4bedd805de56bb9509f878588a23618136fa502
|
[
"MIT"
] | null | null | null |
from time import time
import matplotlib
#this will prevent the figure from popping up
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import numpy as np
import pycuda.autoinit
from pycuda import gpuarray
from pycuda.elementwise import ElementwiseKernel
# first string: the input, pycuda::complex<float> is a special type from pycuda
mandel_ker = ElementwiseKernel(
"pycuda::complex<float> *lattice, float *mandelbrot_graph, int max_iters, float upper_bound",
"""
mandelbrot_graph[i] = 0;
pycuda::complex<float> c = lattice[i];
pycuda::complex<float> z(0,0);
for (int j = 0; j < max_iters; j++)
{
z = z*z + c;
if(abs(z) > 1000)
{
mandelbrot_graph[i] = j;
break;
}
}
""",
"mandel_ker")
def gpu_mandelbrot(width, height, real_low, real_high, imag_low, imag_high, max_iters, upper_bound):
# we set up our complex lattice as such, matrix not supported by pycuda
real_vals = np.matrix(np.linspace(real_low, real_high, width), dtype=np.complex64)
imag_vals = np.matrix(np.linspace(imag_high, imag_low, height), dtype=np.complex64) * 1j
# put them on top of each other
mandelbrot_lattice = np.array(real_vals + imag_vals.transpose(), dtype=np.complex64)
# copy complex lattice to the GPU
mandelbrot_lattice_gpu = gpuarray.to_gpu(mandelbrot_lattice)
# allocate an empty array on the GPU
mandelbrot_graph_gpu = gpuarray.empty(shape=mandelbrot_lattice.shape, dtype=np.float32)
mandel_ker(mandelbrot_lattice_gpu, mandelbrot_graph_gpu, np.int32(max_iters), np.float32(upper_bound))
mandelbrot_graph = mandelbrot_graph_gpu.get()
return mandelbrot_graph
if __name__ == '__main__':
t1 = time()
mandel = gpu_mandelbrot(4096,4096,-2,2,-2,2,1024, 2)
t2 = time()
mandel_time = t2 - t1
t1 = time()
fig = plt.figure(1)
plt.imshow(mandel, vmin=0, vmax=15)
plt.savefig('mandelbrot.png', dpi=fig.dpi)
t2 = time()
dump_time = t2 - t1
print('It took {} seconds to calculate the Mandelbrot graph.'.format(mandel_time))
print('It took {} seconds to dump the image.'.format(dump_time))
| 28.763158 | 106 | 0.68527 |
d2960952880bd6b6dee1f3211383aef5a451e36a
| 33,287 |
py
|
Python
|
server/integrations/saml-passport/SamlPassportAuthenticator.py
|
duttarnab/jans-auth-server
|
c74d4b1056cc6ae364dee1d3b89121925a3dcd0b
|
[
"Apache-2.0"
] | 30 |
2020-10-08T07:42:25.000Z
|
2022-01-14T08:28:54.000Z
|
server/integrations/saml-passport/SamlPassportAuthenticator.py
|
duttarnab/jans-auth-server
|
c74d4b1056cc6ae364dee1d3b89121925a3dcd0b
|
[
"Apache-2.0"
] | 339 |
2020-10-23T19:07:38.000Z
|
2022-01-14T08:27:47.000Z
|
server/integrations/saml-passport/SamlPassportAuthenticator.py
|
duttarnab/jans-auth-server
|
c74d4b1056cc6ae364dee1d3b89121925a3dcd0b
|
[
"Apache-2.0"
] | 17 |
2020-10-07T17:23:59.000Z
|
2022-01-14T09:28:21.000Z
|
# Janssen Project software is available under the Apache License (2004). See http://www.apache.org/licenses/ for full text.
# Copyright (c) 2020, Janssen Project
#
# Author: Jose Gonzalez
# Author: Yuriy Movchan
# Author: Christian Eland
#
from io.jans.jsf2.service import FacesService
from io.jans.jsf2.message import FacesMessages
from org.gluu.oxauth.model.common import User, WebKeyStorage
from org.gluu.oxauth.model.configuration import AppConfiguration
from org.gluu.oxauth.model.crypto import CryptoProviderFactory
from org.gluu.oxauth.model.jwt import Jwt, JwtClaimName
from org.gluu.oxauth.model.util import Base64Util
from io.jans.as.server.service import AppInitializer, AuthenticationService
from io.jans.as.server.service.common import UserService, EncryptionService
from org.gluu.oxauth.model.authorize import AuthorizeRequestParam
from io.jans.as.server.service.net import HttpService
from io.jans.as.server.security import Identity
from io.jans.as.server.util import ServerUtil
from org.gluu.config.oxtrust import LdapOxPassportConfiguration
from io.jans.model.custom.script.type.auth import PersonAuthenticationType
from io.jans.orm import PersistenceEntryManager
from io.jans.service.cdi.util import CdiUtil
from io.jans.util import StringHelper
from java.util import ArrayList, Arrays, Collections, HashSet
from org.gluu.oxauth.model.exception import InvalidJwtException
from javax.faces.application import FacesMessage
from javax.faces.context import FacesContext
import json
import sys
import datetime
import base64
class PersonAuthentication(PersonAuthenticationType):
def __init__(self, currentTimeMillis):
self.currentTimeMillis = currentTimeMillis
def init(self, customScript, configurationAttributes):
print "Passport. init called"
self.extensionModule = self.loadExternalModule(configurationAttributes.get("extension_module"))
extensionResult = self.extensionInit(configurationAttributes)
if extensionResult != None:
return extensionResult
print "Passport. init. Behaviour is inbound SAML"
success = self.processKeyStoreProperties(configurationAttributes)
if success:
self.providerKey = "provider"
self.customAuthzParameter = self.getCustomAuthzParameter(configurationAttributes.get("authz_req_param_provider"))
self.passportDN = self.getPassportConfigDN()
print "Passport. init. Initialization success"
else:
print "Passport. init. Initialization failed"
return success
def destroy(self, configurationAttributes):
print "Passport. destroy called"
return True
def getApiVersion(self):
return 11
def getAuthenticationMethodClaims(self, requestParameters):
return None
def isValidAuthenticationMethod(self, usageType, configurationAttributes):
return True
def getAlternativeAuthenticationMethod(self, usageType, configurationAttributes):
return None
def authenticate(self, configurationAttributes, requestParameters, step):
extensionResult = self.extensionAuthenticate(configurationAttributes, requestParameters, step)
if extensionResult != None:
return extensionResult
print "Passport. authenticate for step %s called" % str(step)
identity = CdiUtil.bean(Identity)
# Loading self.registeredProviders in case passport destroyed
if not hasattr(self,'registeredProviders'):
print "Passport. Fetching registered providers."
self.parseProviderConfigs()
if step == 1:
jwt_param = None
if self.isInboundFlow(identity):
# if is idp-initiated inbound flow
print "Passport. authenticate for step 1. Detected idp-initiated inbound Saml flow"
# get request from session attributes
jwt_param = identity.getSessionId().getSessionAttributes().get(AuthorizeRequestParam.STATE)
print "jwt_param = %s" % jwt_param
# now jwt_param != None
if jwt_param == None:
# gets jwt parameter "user" sent after authentication by passport (if exists)
jwt_param = ServerUtil.getFirstValue(requestParameters, "user")
if jwt_param != None:
# and now that the jwt_param user exists...
print "Passport. authenticate for step 1. JWT user profile token found"
if self.isInboundFlow(identity):
jwt_param = base64.urlsafe_b64decode(str(jwt_param+'=='))
# Parse JWT and validate
jwt = Jwt.parse(jwt_param)
if not self.validSignature(jwt):
return False
if self.jwtHasExpired(jwt):
return False
# Gets user profile as string and json using the information on JWT
(user_profile, jsonp) = self.getUserProfile(jwt)
if user_profile == None:
return False
sessionAttributes = identity.getSessionId().getSessionAttributes()
self.skipProfileUpdate = StringHelper.equalsIgnoreCase(sessionAttributes.get("skipPassportProfileUpdate"), "true")
return self.attemptAuthentication(identity, user_profile, jsonp)
#See passportlogin.xhtml
provider = ServerUtil.getFirstValue(requestParameters, "loginForm:provider")
if StringHelper.isEmpty(provider):
#it's username + passw auth
print "Passport. authenticate for step 1. Basic authentication detected"
logged_in = False
credentials = identity.getCredentials()
user_name = credentials.getUsername()
user_password = credentials.getPassword()
if StringHelper.isNotEmptyString(user_name) and StringHelper.isNotEmptyString(user_password):
authenticationService = CdiUtil.bean(AuthenticationService)
logged_in = authenticationService.authenticate(user_name, user_password)
print "Passport. authenticate for step 1. Basic authentication returned: %s" % logged_in
return logged_in
elif provider in self.registeredProviders:
# user selected provider
# it's a recognized external IDP
identity.setWorkingParameter("selectedProvider", provider)
print "Passport. authenticate for step 1. Retrying step 1"
#see prepareForStep (step = 1)
return True
if step == 2:
mail = ServerUtil.getFirstValue(requestParameters, "loginForm:email")
jsonp = identity.getWorkingParameter("passport_user_profile")
if mail == None:
self.setMessageError(FacesMessage.SEVERITY_ERROR, "Email was missing in user profile")
elif jsonp != None:
# Completion of profile takes place
user_profile = json.loads(jsonp)
user_profile["mail"] = [ mail ]
return self.attemptAuthentication(identity, user_profile, jsonp)
print "Passport. authenticate for step 2. Failed: expected mail value in HTTP request and json profile in session"
return False
def prepareForStep(self, configurationAttributes, requestParameters, step):
extensionResult = self.extensionPrepareForStep(configurationAttributes, requestParameters, step)
if extensionResult != None:
return extensionResult
print "Passport. prepareForStep called %s" % str(step)
identity = CdiUtil.bean(Identity)
if step == 1:
#re-read the strategies config (for instance to know which strategies have enabled the email account linking)
self.parseProviderConfigs()
identity.setWorkingParameter("externalProviders", json.dumps(self.registeredProviders))
providerParam = self.customAuthzParameter
url = None
sessionAttributes = identity.getSessionId().getSessionAttributes()
self.skipProfileUpdate = StringHelper.equalsIgnoreCase(sessionAttributes.get("skipPassportProfileUpdate"), "true")
#this param could have been set previously in authenticate step if current step is being retried
provider = identity.getWorkingParameter("selectedProvider")
print "prepareForStep %s - provider = %s" % (str(step), str(provider))
# if there is a selectedProvider
if provider != None:
# get the redirect URL to use at facesService.redirectToExternalURL() that sends /passport/auth/<provider>/<token>
url = self.getPassportRedirectUrl(provider)
print "prepareForStep %s - url = %s" % (str(step), url)
# sets selectedProvider back to None
identity.setWorkingParameter("selectedProvider", None)
# if there is customAuthzParameter
elif providerParam != None:
# get it from sessionAtributes
paramValue = sessionAttributes.get(providerParam)
#if exists
if paramValue != None:
print "Passport. prepareForStep. Found value in custom param of authorization request: %s" % paramValue
provider = self.getProviderFromJson(paramValue)
if provider == None:
print "Passport. prepareForStep. A provider value could not be extracted from custom authorization request parameter"
elif not provider in self.registeredProviders:
print "Passport. prepareForStep. Provider '%s' not part of known configured IDPs/OPs" % provider
else:
url = self.getPassportRedirectUrl(provider)
# if no provider selected yet...
if url == None:
print "Passport. prepareForStep. A page to manually select an identity provider will be shown"
# else already got the /passport/auth/<provider>/<token> url...
else:
facesService = CdiUtil.bean(FacesService)
# redirects to Passport getRedirectURL - sends browser to IDP.
print "Passport. Redirecting to external url: %s" + url
facesService.redirectToExternalURL(url)
return True
def getExtraParametersForStep(self, configurationAttributes, step):
print "Passport. getExtraParametersForStep called for step %s" % str(step)
if step == 1:
return Arrays.asList("selectedProvider", "externalProviders")
elif step == 2:
return Arrays.asList("passport_user_profile")
return None
def getCountAuthenticationSteps(self, configurationAttributes):
print "Passport. getCountAuthenticationSteps called"
identity = CdiUtil.bean(Identity)
if identity.getWorkingParameter("passport_user_profile") != None:
return 2
return 1
def getPageForStep(self, configurationAttributes, step):
print "Passport. getPageForStep called"
extensionResult = self.extensionGetPageForStep(configurationAttributes, step)
if extensionResult != None:
return extensionResult
if step == 1:
identity = CdiUtil.bean(Identity)
print "Passport. getPageForStep. Entered if step ==1"
if self.isInboundFlow(identity):
print "Passport. getPageForStep for step 1. Detected inbound Saml flow"
return "/postlogin.xhtml"
print "Passport. getPageForStep 1. NormalFlow, returning passportlogin.xhtml"
return "/auth/passport/passportlogin.xhtml"
return "/auth/passport/passportpostlogin.xhtml"
def getNextStep(self, configurationAttributes, requestParameters, step):
if step == 1:
identity = CdiUtil.bean(Identity)
provider = identity.getWorkingParameter("selectedProvider")
if provider != None:
return 1
return -1
def logout(self, configurationAttributes, requestParameters):
return True
# Extension module related functions
def extensionInit(self, configurationAttributes):
if self.extensionModule == None:
return None
return self.extensionModule.init(configurationAttributes)
def extensionAuthenticate(self, configurationAttributes, requestParameters, step):
if self.extensionModule == None:
return None
return self.extensionModule.authenticate(configurationAttributes, requestParameters, step)
def extensionPrepareForStep(self, configurationAttributes, requestParameters, step):
if self.extensionModule == None:
return None
return self.extensionModule.prepareForStep(configurationAttributes, requestParameters, step)
def extensionGetPageForStep(self, configurationAttributes, step):
if self.extensionModule == None:
return None
return self.extensionModule.getPageForStep(configurationAttributes, step)
# Initalization routines
def loadExternalModule(self, simpleCustProperty):
if simpleCustProperty != None:
print "Passport. loadExternalModule. Loading passport extension module..."
moduleName = simpleCustProperty.getValue2()
try:
module = __import__(moduleName)
return module
except:
print "Passport. loadExternalModule. Failed to load module %s" % moduleName
print "Exception: ", sys.exc_info()[1]
print "Passport. loadExternalModule. Flow will be driven entirely by routines of main passport script"
return None
def processKeyStoreProperties(self, attrs):
file = attrs.get("key_store_file")
password = attrs.get("key_store_password")
if file != None and password != None:
file = file.getValue2()
password = password.getValue2()
if StringHelper.isNotEmpty(file) and StringHelper.isNotEmpty(password):
self.keyStoreFile = file
self.keyStorePassword = password
return True
print "Passport. readKeyStoreProperties. Properties key_store_file or key_store_password not found or empty"
return False
def getCustomAuthzParameter(self, simpleCustProperty):
customAuthzParameter = None
if simpleCustProperty != None:
prop = simpleCustProperty.getValue2()
if StringHelper.isNotEmpty(prop):
customAuthzParameter = prop
if customAuthzParameter == None:
print "Passport. getCustomAuthzParameter. No custom param for OIDC authz request in script properties"
print "Passport. getCustomAuthzParameter. Passport flow cannot be initiated by doing an OpenID connect authorization request"
else:
print "Passport. getCustomAuthzParameter. Custom param for OIDC authz request in script properties: %s" % customAuthzParameter
return customAuthzParameter
# Configuration parsing
def getPassportConfigDN(self):
f = open('/etc/gluu/conf/gluu.properties', 'r')
for line in f:
prop = line.split("=")
if prop[0] == "oxpassport_ConfigurationEntryDN":
prop.pop(0)
break
f.close()
return "=".join(prop).strip()
def parseAllProviders(self):
registeredProviders = {}
print "Passport. parseAllProviders. Adding providers"
entryManager = CdiUtil.bean(PersistenceEntryManager)
config = LdapOxPassportConfiguration()
config = entryManager.find(config.getClass(), self.passportDN).getPassportConfiguration()
config = config.getProviders() if config != None else config
if config != None and len(config) > 0:
for prvdetails in config:
if prvdetails.isEnabled():
registeredProviders[prvdetails.getId()] = {
"emailLinkingSafe": prvdetails.isEmailLinkingSafe(),
"requestForEmail" : prvdetails.isRequestForEmail(),
"logo_img": prvdetails.getLogoImg(),
"displayName": prvdetails.getDisplayName(),
"type": prvdetails.getType()
}
return registeredProviders
def parseProviderConfigs(self):
registeredProviders = {}
try:
registeredProviders = self.parseAllProviders()
toRemove = []
for provider in registeredProviders:
if registeredProviders[provider]["type"] != "saml":
toRemove.append(provider)
else:
registeredProviders[provider]["saml"] = True
for provider in toRemove:
registeredProviders.pop(provider)
if len(registeredProviders.keys()) > 0:
print "Passport. parseProviderConfigs. Configured providers:", registeredProviders
else:
print "Passport. parseProviderConfigs. No providers registered yet"
except:
print "Passport. parseProviderConfigs. An error occurred while building the list of supported authentication providers", sys.exc_info()[1]
print "parseProviderConfigs - registeredProviders = %s" % str(registeredProviders)
self.registeredProviders = registeredProviders
print "parseProviderConfigs - self.registeredProviders = %s" % str(self.registeredProviders)
# Auxiliary routines
def getProviderFromJson(self, providerJson):
provider = None
try:
obj = json.loads(Base64Util.base64urldecodeToString(providerJson))
provider = obj[self.providerKey]
except:
print "Passport. getProviderFromJson. Could not parse provided Json string. Returning None"
return provider
def getPassportRedirectUrl(self, provider):
# provider is assumed to exist in self.registeredProviders
url = None
try:
facesContext = CdiUtil.bean(FacesContext)
tokenEndpoint = "https://%s/passport/token" % facesContext.getExternalContext().getRequest().getServerName()
httpService = CdiUtil.bean(HttpService)
httpclient = httpService.getHttpsClient()
print "Passport. getPassportRedirectUrl. Obtaining token from passport at %s" % tokenEndpoint
resultResponse = httpService.executeGet(httpclient, tokenEndpoint, Collections.singletonMap("Accept", "text/json"))
httpResponse = resultResponse.getHttpResponse()
bytes = httpService.getResponseContent(httpResponse)
response = httpService.convertEntityToString(bytes)
print "Passport. getPassportRedirectUrl. Response was %s" % httpResponse.getStatusLine().getStatusCode()
tokenObj = json.loads(response)
url = "/passport/auth/%s/%s" % (provider, tokenObj["token_"])
except:
print "Passport. getPassportRedirectUrl. Error building redirect URL: ", sys.exc_info()[1]
return url
def validSignature(self, jwt):
print "Passport. validSignature. Checking JWT token signature"
valid = False
try:
appConfiguration = AppConfiguration()
appConfiguration.setWebKeysStorage(WebKeyStorage.KEYSTORE)
appConfiguration.setKeyStoreFile(self.keyStoreFile)
appConfiguration.setKeyStoreSecret(self.keyStorePassword)
appConfiguration.setKeyRegenerationEnabled(False)
cryptoProvider = CryptoProviderFactory.getCryptoProvider(appConfiguration)
alg_string = str(jwt.getHeader().getSignatureAlgorithm())
signature_string = str(jwt.getEncodedSignature())
if alg_string == "none" or alg_string == "None" or alg_string == "NoNe" or alg_string == "nONE" or alg_string == "NONE" or alg_string == "NonE" or alg_string == "nOnE":
# blocks none attack
print "WARNING: JWT Signature algorithm is none"
valid = False
elif alg_string != "RS512":
# blocks anything that's not RS512
print "WARNING: JWT Signature algorithm is NOT RS512"
valid = False
elif signature_string == "" :
# blocks empty signature string
print "WARNING: JWT Signature not sent"
valid = False
else:
# class extends AbstractCryptoProvider
''' on version 4.2 .getAlgorithm() method was renamed to .getSignatureAlgorithm()
for older versions:
valid = cryptoProvider.verifySignature(jwt.getSigningInput(), jwt.getEncodedSignature(), jwt.getHeader().getKeyId(),
None, None, jwt.getHeader().getAlgorithm())
'''
# working on 4.2:
valid = cryptoProvider.verifySignature(jwt.getSigningInput(), jwt.getEncodedSignature(), jwt.getHeader().getKeyId(),
None, None, jwt.getHeader().getSignatureAlgorithm())
except:
print "Exception: ", sys.exc_info()[1]
print "Passport. validSignature. Validation result was %s" % valid
return valid
def jwtHasExpired(self, jwt):
# Check if jwt has expired
jwt_claims = jwt.getClaims()
try:
exp_date_timestamp = float(jwt_claims.getClaimAsString(JwtClaimName.EXPIRATION_TIME))
exp_date = datetime.datetime.fromtimestamp(exp_date_timestamp)
hasExpired = exp_date < datetime.datetime.now()
except:
print "Exception: The JWT does not have '%s' attribute" % JwtClaimName.EXPIRATION_TIME
return False
return hasExpired
def getUserProfile(self, jwt):
# getClaims method located at org.gluu.oxauth.model.token.JsonWebResponse.java as a org.gluu.oxauth.model.jwt.JwtClaims object
jwt_claims = jwt.getClaims()
user_profile_json = None
try:
# public String getClaimAsString(String key)
user_profile_json = CdiUtil.bean(EncryptionService).decrypt(jwt_claims.getClaimAsString("data"))
user_profile = json.loads(user_profile_json)
except:
print "Passport. getUserProfile. Problem obtaining user profile json representation"
return (user_profile, user_profile_json)
def attemptAuthentication(self, identity, user_profile, user_profile_json):
print "Entered attemptAuthentication..."
uidKey = "uid"
if not self.checkRequiredAttributes(user_profile, [uidKey, self.providerKey]):
return False
provider = user_profile[self.providerKey]
print "user_profile[self.providerKey] = %s" % str(user_profile[self.providerKey])
if not provider in self.registeredProviders:
print "Entered if note provider in self.registeredProviers:"
print "Passport. attemptAuthentication. Identity Provider %s not recognized" % provider
return False
print "attemptAuthentication. user_profile = %s" % user_profile
print "user_profile[uidKey] = %s" % user_profile[uidKey]
uid = user_profile[uidKey][0]
print "attemptAuthentication - uid = %s" % uid
externalUid = "passport-%s:%s:%s" % ("saml", provider, uid)
userService = CdiUtil.bean(UserService)
userByUid = self.getUserByExternalUid(uid, provider, userService)
email = None
if "mail" in user_profile:
email = user_profile["mail"]
if len(email) == 0:
email = None
else:
email = email[0]
user_profile["mail"] = [ email ]
if email == None and self.registeredProviders[provider]["requestForEmail"]:
print "Passport. attemptAuthentication. Email was not received"
if userByUid != None:
# This avoids asking for the email over every login attempt
email = userByUid.getAttribute("mail")
if email != None:
print "Passport. attemptAuthentication. Filling missing email value with %s" % email
user_profile["mail"] = [ email ]
if email == None:
# Store user profile in session and abort this routine
identity.setWorkingParameter("passport_user_profile", user_profile_json)
return True
userByMail = None if email == None else userService.getUserByAttribute("mail", email)
# Determine if we should add entry, update existing, or deny access
doUpdate = False
doAdd = False
if userByUid != None:
print "User with externalUid '%s' already exists" % externalUid
if userByMail == None:
doUpdate = True
else:
if userByMail.getUserId() == userByUid.getUserId():
doUpdate = True
else:
print "Users with externalUid '%s' and mail '%s' are different. Access will be denied. Impersonation attempt?" % (externalUid, email)
self.setMessageError(FacesMessage.SEVERITY_ERROR, "Email value corresponds to an already existing provisioned account")
else:
if userByMail == None:
doAdd = True
elif self.registeredProviders[provider]["emailLinkingSafe"]:
tmpList = userByMail.getAttributeValues("oxExternalUid")
tmpList = ArrayList() if tmpList == None else ArrayList(tmpList)
tmpList.add(externalUid)
userByMail.setAttribute("oxExternalUid", tmpList, True)
userByUid = userByMail
print "External user supplying mail %s will be linked to existing account '%s'" % (email, userByMail.getUserId())
doUpdate = True
else:
print "An attempt to supply an email of an existing user was made. Turn on 'emailLinkingSafe' if you want to enable linking"
self.setMessageError(FacesMessage.SEVERITY_ERROR, "Email value corresponds to an already existing account. If you already have a username and password use those instead of an external authentication site to get access.")
username = None
try:
if doUpdate:
username = userByUid.getUserId()
print "Passport. attemptAuthentication. Updating user %s" % username
self.updateUser(userByUid, user_profile, userService)
elif doAdd:
print "Passport. attemptAuthentication. Creating user %s" % externalUid
newUser = self.addUser(externalUid, user_profile, userService)
username = newUser.getUserId()
except:
print "Exception: ", sys.exc_info()[1]
print "Passport. attemptAuthentication. Authentication failed"
return False
if username == None:
print "Passport. attemptAuthentication. Authentication attempt was rejected"
return False
else:
logged_in = CdiUtil.bean(AuthenticationService).authenticate(username)
print "Passport. attemptAuthentication. Authentication for %s returned %s" % (username, logged_in)
return logged_in
def getUserByExternalUid(self, uid, provider, userService):
newFormat = "passport-%s:%s:%s" % ("saml", provider, uid)
user = userService.getUserByAttribute("oxExternalUid", newFormat, True)
if user == None:
oldFormat = "passport-%s:%s" % ("saml", uid)
user = userService.getUserByAttribute("oxExternalUid", oldFormat, True)
if user != None:
# Migrate to newer format
list = HashSet(user.getAttributeValues("oxExternalUid"))
list.remove(oldFormat)
list.add(newFormat)
user.setAttribute("oxExternalUid", ArrayList(list), True)
print "Migrating user's oxExternalUid to newer format 'passport-saml:provider:uid'"
userService.updateUser(user)
return user
def setMessageError(self, severity, msg):
facesMessages = CdiUtil.bean(FacesMessages)
facesMessages.setKeepMessages()
facesMessages.clear()
facesMessages.add(severity, msg)
def checkRequiredAttributes(self, profile, attrs):
for attr in attrs:
if (not attr in profile) or len(profile[attr]) == 0:
print "Passport. checkRequiredAttributes. Attribute '%s' is missing in profile" % attr
return False
return True
def addUser(self, externalUid, profile, userService):
print "Passport. Entered addUser()."
print "Passport. addUser. externalUid = %s" % externalUid
print "Passport. addUser. profile = %s" % profile
newUser = User()
#Fill user attrs
newUser.setAttribute("oxExternalUid", externalUid, True)
self.fillUser(newUser, profile)
newUser = userService.addUser(newUser, True)
return newUser
def updateUser(self, foundUser, profile, userService):
# when this is false, there might still some updates taking place (e.g. not related to profile attrs released by external provider)
if (not self.skipProfileUpdate):
self.fillUser(foundUser, profile)
userService.updateUser(foundUser)
def fillUser(self, foundUser, profile):
print
print "Passport. Entered fillUser()."
print "Passport. fillUser. foundUser = %s" % foundUser
print "Passport. fillUser. profile = %s" % profile
for attr in profile:
# "provider" is disregarded if part of mapping
if attr != self.providerKey:
values = profile[attr]
print "%s = %s" % (attr, values)
foundUser.setAttribute(attr, values)
if attr == "mail":
print "Passport. fillUser. entered if attr == mail"
oxtrustMails = []
for mail in values:
oxtrustMails.append('{"value":"%s","primary":false}' % mail)
foundUser.setAttribute("oxTrustEmail", oxtrustMails)
# IDP-initiated flow routines
def isInboundFlow(self, identity):
print "passport. entered isInboundFlow"
sessionId = identity.getSessionId()
print "passport. isInboundFlow. sessionId = %s" % sessionId
if sessionId == None:
print "passport. isInboundFlow. sessionId not found yet..."
# Detect mode if there is no session yet. It's needed for getPageForStep method
facesContext = CdiUtil.bean(FacesContext)
requestParameters = facesContext.getExternalContext().getRequestParameterMap()
print "passport. isInboundFlow. requestParameters = %s" % requestParameters
authz_state = requestParameters.get(AuthorizeRequestParam.STATE)
print "passport. isInboundFlow. authz_state = %s" % authz_state
else:
authz_state = identity.getSessionId().getSessionAttributes().get(AuthorizeRequestParam.STATE)
print "passport. IsInboundFlow. authz_state = %s" % authz_state
# the replace above is workaround due a problem reported
# on issue: https://github.com/GluuFederation/gluu-passport/issues/95
# TODO: Remove after fixed on JSF side
b64url_decoded_auth_state = base64.urlsafe_b64decode(str(authz_state+'=='))
# print "passport. IsInboundFlow. b64url_decoded_auth_state = %s" % str(b64url_decoded_auth_state)
print "passport. IsInboundFlow. self.isInboundJwt() = %s" % str(self.isInboundJwt(b64url_decoded_auth_state))
if self.isInboundJwt(b64url_decoded_auth_state):
return True
return False
def isInboundJwt(self, value):
if value == None:
return False
try:
jwt = Jwt.parse(value)
print "passport.isInboundJwt. jwt = %s" % jwt
user_profile_json = CdiUtil.bean(EncryptionService).decrypt(jwt.getClaims().getClaimAsString("data"))
if StringHelper.isEmpty(user_profile_json):
return False
except InvalidJwtException:
return False
except:
print("Unexpected error:", sys.exc_info()[0])
return False
return True
def getLogoutExternalUrl(self, configurationAttributes, requestParameters):
print "Get external logout URL call"
return None
| 40.250302 | 236 | 0.639679 |
f0a29fed314afb880de4023511ac1a4df2d266e9
| 517 |
py
|
Python
|
Lab8.py
|
Kiaram4/IA241-GitHub-1
|
042d5b8eb19a730441b412de9d29e97de8cc8a70
|
[
"MIT"
] | null | null | null |
Lab8.py
|
Kiaram4/IA241-GitHub-1
|
042d5b8eb19a730441b412de9d29e97de8cc8a70
|
[
"MIT"
] | null | null | null |
Lab8.py
|
Kiaram4/IA241-GitHub-1
|
042d5b8eb19a730441b412de9d29e97de8cc8a70
|
[
"MIT"
] | null | null | null |
'''
Lab8 Functions
'''
#3.1
def count_words(input_str):
return len(input_str.split())
#3.2
demo_str= 'hi, hello world!'
print(count_words(demo_str))
#3.3
def find_min_num(input_list):
min_item=input_list[0]
for num in input_list:
if type(num) is not str:
if min_item>=num:
min_item=num
return min_item
#3.4
demo_list=[1,2,3,4,5,6]
print(find_min_num(demo_list))
#3.5
mix_list= [1,2,3,'a',4,5,6]
print(find_min_num(mix_list))
| 14.771429 | 33 | 0.601547 |
b85b5f6faad39c39c135a837d4268c2d08661b61
| 649 |
py
|
Python
|
test/integration/dppl/naive/schools_naive.py
|
deepppl/stanc3
|
92dd65f6f7e13c95ef15fa8f5040d26265cc9817
|
[
"BSD-3-Clause"
] | 17 |
2020-11-26T00:53:58.000Z
|
2021-08-20T18:56:39.000Z
|
test/integration/dppl/naive/schools_naive.py
|
deepppl/stanc3
|
92dd65f6f7e13c95ef15fa8f5040d26265cc9817
|
[
"BSD-3-Clause"
] | null | null | null |
test/integration/dppl/naive/schools_naive.py
|
deepppl/stanc3
|
92dd65f6f7e13c95ef15fa8f5040d26265cc9817
|
[
"BSD-3-Clause"
] | 3 |
2020-12-09T20:16:16.000Z
|
2021-05-28T04:30:28.000Z
|
def model(N=None, sigma_y=None, y=None):
theta = zeros(N)
mu_theta = sample('mu_theta', dist.Normal(0, 100))
sigma_eta = sample('sigma_eta', dist.InverseGamma(1, 1))
eta = sample('eta', dist.Normal(zeros(N), sigma_eta))
xi = sample('xi', dist.Normal(0, 5))
theta = mu_theta + xi * eta
sample('y', dist.Normal(theta, sigma_y), obs=y)
def generated_quantities(N=None, sigma_y=None, y=None, parameters=None):
eta = parameters['eta']
mu_theta = parameters['mu_theta']
sigma_eta = parameters['sigma_eta']
xi = parameters['xi']
theta = zeros(N)
theta = mu_theta + xi * eta
return {'theta': theta}
| 34.157895 | 72 | 0.639445 |
a57a8bc55a00972d6b010023a0160673dac35dbe
| 51,524 |
py
|
Python
|
pycroscopy/core/viz/plot_utils.py
|
ealopez/pycroscopy
|
9f7c0543b67eaa0668296295fc5f492360c130a0
|
[
"MIT"
] | null | null | null |
pycroscopy/core/viz/plot_utils.py
|
ealopez/pycroscopy
|
9f7c0543b67eaa0668296295fc5f492360c130a0
|
[
"MIT"
] | null | null | null |
pycroscopy/core/viz/plot_utils.py
|
ealopez/pycroscopy
|
9f7c0543b67eaa0668296295fc5f492360c130a0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu May 05 13:29:12 2016
@author: Suhas Somnath, Chris R. Smith
"""
# TODO: All general plotting functions should support data with 1, 2, or 3 spatial dimensions.
from __future__ import division, print_function, absolute_import, unicode_literals
import inspect
import os
import sys
from numbers import Number
import h5py
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import LinearSegmentedColormap
from mpl_toolkits.axes_grid1 import ImageGrid
if sys.version_info.major == 3:
unicode = str
else:
unicode = unicode
default_cmap = plt.cm.viridis
def reset_plot_params():
"""
Resets the plot parameters to matplotlib default values
Adapted from:
https://stackoverflow.com/questions/26413185/how-to-recover-matplotlib-defaults-after-setting-stylesheet
"""
mpl.rcParams.update(mpl.rcParamsDefault)
# Also resetting ipython inline parameters
inline_rc = dict(mpl.rcParams)
mpl.rcParams.update(inline_rc)
def use_nice_plot_params():
"""
Resets default plot parameters such as figure size, font sizes etc. to values better suited for scientific
publications
"""
# mpl.rcParams.keys() # gets all allowable keys
# mpl.rc('figure', figsize=(5.5, 5))
mpl.rc('lines', linewidth=2)
mpl.rc('axes', labelsize=16, titlesize=16)
mpl.rc('figure', titlesize=20)
mpl.rc('font', size=14) # global font size
mpl.rc('legend', fontsize=16, fancybox=True)
mpl.rc('xtick.major', size=6)
mpl.rc('xtick.minor', size=4)
# mpl.rcParams['xtick.major.size'] = 6
def get_plot_grid_size(num_plots, fewer_rows=True):
"""
Returns the number of rows and columns ideal for visualizing multiple (identical) plots within a single figure
Parameters
----------
num_plots : uint
Number of identical subplots within a figure
fewer_rows : bool, optional. Default = True
Set to True if the grid should be short and wide or False for tall and narrow
Returns
-------
nrows : uint
Number of rows
ncols : uint
Number of columns
"""
assert isinstance(num_plots, Number), 'num_plots must be a number'
# force integer:
num_plots = int(num_plots)
if num_plots < 1:
raise ValueError('num_plots was less than 0')
if fewer_rows:
nrows = int(np.floor(np.sqrt(num_plots)))
ncols = int(np.ceil(num_plots / nrows))
else:
ncols = int(np.floor(np.sqrt(num_plots)))
nrows = int(np.ceil(num_plots / ncols))
return nrows, ncols
def set_tick_font_size(axes, font_size):
"""
Sets the font size of the ticks in the provided axes
Parameters
----------
axes : matplotlib.pyplot.axis object or list of axis objects
axes to set font sizes
font_size : unigned int
Font size
"""
assert isinstance(font_size, Number)
font_size = max(1, int(font_size))
def __set_axis_tick(axis):
"""
Sets the font sizes to the x and y axis in the given axis object
Parameters
----------
axis : matplotlib.axes.Axes object
axis to set font sizes
"""
for tick in axis.xaxis.get_major_ticks():
tick.label.set_fontsize(font_size)
for tick in axis.yaxis.get_major_ticks():
tick.label.set_fontsize(font_size)
mesg = 'axes must either be a matplotlib.axes.Axes object or an iterable containing such objects'
if hasattr(axes, '__iter__'):
for axis in axes:
assert isinstance(axis, mpl.axes.Axes), mesg
__set_axis_tick(axis)
else:
assert isinstance(axes, mpl.axes.Axes), mesg
__set_axis_tick(axes)
def make_scalar_mappable(vmin, vmax, cmap=None):
"""
Creates a scalar mappable object that can be used to create a colorbar for non-image (e.g. - line) plots
Parameters
----------
vmin : Number
Minimum value for colorbar
vmax : Number
Maximum value for colorbar
cmap : colormap object
Colormap object to use
Returns
-------
sm : matplotlib.pyplot.cm.ScalarMappable object
The object that can used to create a colorbar via plt.colorbar(sm)
Adapted from: https://stackoverflow.com/questions/8342549/matplotlib-add-colorbar-to-a-sequence-of-line-plots
"""
assert isinstance(vmin, Number), 'vmin should be a number'
assert isinstance(vmax, Number), 'vmax should be a number'
assert vmin < vmax, 'vmin must be less than vmax'
if cmap is None:
cmap = default_cmap
else:
assert isinstance(cmap, (mpl.colors.Colormap, str, unicode))
sm = plt.cm.ScalarMappable(cmap=cmap,
norm=plt.Normalize(vmin=vmin, vmax=vmax))
# fake up the array of the scalar mappable
sm._A = []
return sm
def cbar_for_line_plot(axis, num_steps, discrete_ticks=True, **kwargs):
"""
Adds a colorbar next to a line plot axis
Parameters
----------
axis : matplotlib.axes.Axes
Axis with multiple line objects
num_steps : uint
Number of steps in the colorbar
discrete_ticks : (optional) bool
Whether or not to have the ticks match the number of number of steps. Default = True
"""
if not isinstance(axis, mpl.axes.Axes):
raise TypeError('axis must be a matplotlib.axes.Axes object')
if not isinstance(num_steps, int) and num_steps > 0:
raise TypeError('num_steps must be a whole number')
assert isinstance(discrete_ticks, bool)
cmap = get_cmap_object(kwargs.pop('cmap', None))
cmap = discrete_cmap(num_steps, cmap=cmap.name)
sm = make_scalar_mappable(0, num_steps - 1, cmap=cmap)
if discrete_ticks:
kwargs.update({'ticks': np.arange(num_steps)})
cbar = plt.colorbar(sm, ax=axis, orientation='vertical',
pad=0.04, use_gridspec=True, **kwargs)
return cbar
def get_cmap_object(cmap):
"""
Get the matplotlib.colors.LinearSegmentedColormap object regardless of the input
Parameters
----------
cmap : String, or matplotlib.colors.LinearSegmentedColormap object (Optional)
Requested color map
Returns
-------
cmap : matplotlib.colors.LinearSegmentedColormap object
Requested / Default colormap object
"""
if cmap is None:
return default_cmap
elif type(cmap) in [str, unicode]:
return plt.get_cmap(cmap)
elif not isinstance(cmap, mpl.colors.Colormap):
raise TypeError('cmap should either be a matplotlib.colors.Colormap object or a string')
return cmap
def cmap_jet_white_center():
"""
Generates the jet colormap with a white center
Returns
-------
white_jet : matplotlib.colors.LinearSegmentedColormap object
color map object that can be used in place of the default colormap
"""
# For red - central column is like brightness
# For blue - last column is like brightness
cdict = {'red': ((0.00, 0.0, 0.0),
(0.30, 0.0, 0.0),
(0.50, 1.0, 1.0),
(0.90, 1.0, 1.0),
(1.00, 0.5, 1.0)),
'green': ((0.00, 0.0, 0.0),
(0.10, 0.0, 0.0),
(0.42, 1.0, 1.0),
(0.58, 1.0, 1.0),
(0.90, 0.0, 0.0),
(1.00, 0.0, 0.0)),
'blue': ((0.00, 0.0, 0.5),
(0.10, 1.0, 1.0),
(0.50, 1.0, 1.0),
(0.70, 0.0, 0.0),
(1.00, 0.0, 0.0))
}
return LinearSegmentedColormap('white_jet', cdict)
def cmap_from_rgba(name, interp_vals, normalization_val):
"""
Generates a colormap given a matlab-style interpolation table
Parameters
----------
name : String / Unicode
Name of the desired colormap
interp_vals : List of tuples
Interpolation table that describes the desired color map. Each entry in the table should be described as:
(position in the colorbar, (red, green, blue, alpha))
The position in the color bar, red, green, blue, and alpha vary from 0 to the normalization value
normalization_val : number
The common maximum value for the position in the color bar, red, green, blue, and alpha
Returns
-------
new_cmap : matplotlib.colors.LinearSegmentedColormap object
desired color map
"""
if not isinstance(name, (str, unicode)):
raise TypeError('name should be a string')
if not isinstance(interp_vals, (list, tuple, np.array)):
raise TypeError('interp_vals must be a list of tuples')
if not isinstance(normalization_val, Number):
raise TypeError('normalization_val must be a number')
normalization_val = np.round(1.0 * normalization_val)
cdict = {'red': tuple([(dist / normalization_val, colors[0] / normalization_val, colors[0] / normalization_val)
for (dist, colors) in interp_vals][::-1]),
'green': tuple([(dist / normalization_val, colors[1] / normalization_val, colors[1] / normalization_val)
for (dist, colors) in interp_vals][::-1]),
'blue': tuple([(dist / normalization_val, colors[2] / normalization_val, colors[2] / normalization_val)
for (dist, colors) in interp_vals][::-1]),
'alpha': tuple([(dist / normalization_val, colors[3] / normalization_val, colors[3] / normalization_val)
for (dist, colors) in interp_vals][::-1])}
return LinearSegmentedColormap(name, cdict)
def make_linear_alpha_cmap(name, solid_color, normalization_val, min_alpha=0, max_alpha=1):
"""
Generates a transparent to opaque color map based on a single solid color
Parameters
----------
name : String / Unicode
Name of the desired colormap
solid_color : List of numbers
red, green, blue, and alpha values for a specific color
normalization_val : number
The common maximum value for the red, green, blue, and alpha values. This is 1 in matplotlib
min_alpha : float (optional. Default = 0 : ie- transparent)
Lowest alpha value for the bottom of the color bar
max_alpha : float (optional. Default = 1 : ie- opaque)
Highest alpha value for the top of the color bar
Returns
-------
new_cmap : matplotlib.colors.LinearSegmentedColormap object
transparent to opaque color map based on the provided color
"""
if not isinstance(name, (str, unicode)):
raise TypeError('name should be a string')
if not isinstance(solid_color, (list, tuple, np.ndarray)):
raise TypeError('solid_color must be a list of numbers')
if not len(solid_color) == 4:
raise ValueError('solid-color should have fourth values')
if not np.all([isinstance(x, Number) for x in solid_color]):
raise TypeError('solid_color should have three numbers for red, green, blue')
if not isinstance(normalization_val, Number):
raise TypeError('normalization_val must be a number')
if not isinstance(min_alpha, Number):
raise TypeError('min_alpha should be a Number')
if not isinstance(max_alpha, Number):
raise TypeError('max_alpha should be a Number')
if min_alpha >= max_alpha:
raise ValueError('min_alpha must be less than max_alpha')
solid_color = np.array(solid_color) / normalization_val * 1.0
interp_table = [(1.0, (solid_color[0], solid_color[1], solid_color[2], max_alpha)),
(0, (solid_color[0], solid_color[1], solid_color[2], min_alpha))]
return cmap_from_rgba(name, interp_table, 1)
def cmap_hot_desaturated():
"""
Returns a desaturated color map based on the hot colormap
Returns
-------
new_cmap : matplotlib.colors.LinearSegmentedColormap object
Desaturated version of the hot color map
"""
hot_desaturated = [(255.0, (255, 76, 76, 255)),
(218.5, (107, 0, 0, 255)),
(182.1, (255, 96, 0, 255)),
(145.6, (255, 255, 0, 255)),
(109.4, (0, 127, 0, 255)),
(72.675, (0, 255, 255, 255)),
(36.5, (0, 0, 91, 255)),
(0, (71, 71, 219, 255))]
return cmap_from_rgba('hot_desaturated', hot_desaturated, 255)
def discrete_cmap(num_bins, cmap=None):
"""
Create an N-bin discrete colormap from the specified input map specified
Parameters
----------
num_bins : unsigned int
Number of discrete bins
cmap : matplotlib.colors.Colormap object
Base color map to discretize
Returns
-------
new_cmap : matplotlib.colors.LinearSegmentedColormap object
Discretized color map
Notes
-----
Jake VanderPlas License: BSD-style
https://gist.github.com/jakevdp/91077b0cae40f8f8244a
"""
if cmap is None:
cmap = default_cmap.name
elif isinstance(cmap, mpl.colors.Colormap):
cmap = cmap.name
elif not isinstance(cmap, (str, unicode)):
raise TypeError('cmap should be a string or a matplotlib.colors.Colormap object')
return plt.get_cmap(cmap, num_bins)
def rainbow_plot(axis, x_vec, y_vec, num_steps=32, **kwargs):
"""
Plots the input against the output vector such that the color of the curve changes as a function of index
Parameters
----------
axis : matplotlib.axes.Axes object
Axis to plot the curve
x_vec : 1D float numpy array
vector that forms the X axis
y_vec : 1D float numpy array
vector that forms the Y axis
num_steps : unsigned int (Optional)
Number of discrete color steps
"""
if not isinstance(axis, mpl.axes.Axes):
raise TypeError('axis must be a matplotlib.axes.Axes object')
if not isinstance(x_vec, (list, tuple, np.ndarray)):
raise TypeError('x_vec must be array-like of numbers')
if not isinstance(x_vec, (list, tuple, np.ndarray)):
raise TypeError('x_vec must be array-like of numbers')
x_vec = np.array(x_vec)
y_vec = np.array(y_vec)
assert x_vec.ndim == 1 and y_vec.ndim == 1, 'x_vec and y_vec must be 1D arrays'
assert x_vec.shape == y_vec.shape, 'x_vec and y_vec must have the same shape'
if not isinstance(num_steps, int):
raise TypeError('num_steps must be an integer < size of x_vec')
if num_steps < 2 or num_steps >= len(x_vec) // 2:
raise ValueError('num_steps should be a positive number. 1/4 to 1/16th of x_vec')
assert num_steps < x_vec.size, 'num_steps must be an integer < size of x_vec'
assert isinstance(kwargs, dict)
cmap = kwargs.pop('cmap', default_cmap)
cmap = get_cmap_object(cmap)
# Remove any color flag
_ = kwargs.pop('color', None)
pts_per_step = len(y_vec) // num_steps
for step in range(num_steps - 1):
axis.plot(x_vec[step * pts_per_step:(step + 1) * pts_per_step],
y_vec[step * pts_per_step:(step + 1) * pts_per_step],
color=cmap(255 * step // num_steps), **kwargs)
# plot the remainder:
axis.plot(x_vec[(num_steps - 1) * pts_per_step:],
y_vec[(num_steps - 1) * pts_per_step:],
color=cmap(255 * num_steps / num_steps), **kwargs)
def plot_line_family(axis, x_vec, line_family, line_names=None, label_prefix='', label_suffix='',
y_offset=0, show_cbar=False, **kwargs):
"""
Plots a family of lines with a sequence of colors
Parameters
----------
axis : matplotlib.axes.Axes object
Axis to plot the curve
x_vec : array-like
Values to plot against
line_family : 2D numpy array
family of curves arranged as [curve_index, features]
line_names : array-like
array of string or numbers that represent the identity of each curve in the family
label_prefix : string / unicode
prefix for the legend (before the index of the curve)
label_suffix : string / unicode
suffix for the legend (after the index of the curve)
y_offset : (optional) number
quantity by which the lines are offset from each other vertically (useful for spectra)
show_cbar : (optional) bool
Whether or not to show a colorbar (instead of a legend)
"""
if not isinstance(axis, mpl.axes.Axes):
raise TypeError('axis must be a matplotlib.axes.Axes object')
if not isinstance(x_vec, (list, tuple, np.ndarray)):
raise TypeError('x_vec must be array-like of numbers')
x_vec = np.array(x_vec)
assert x_vec.ndim == 1, 'x_vec must be a 1D array'
if not isinstance(line_family, list):
line_family = np.array(line_family)
if not isinstance(line_family, np.ndarray):
raise TypeError('line_family must be a 2d array of numbers')
assert line_family.ndim == 2, 'line_family must be a 2D array'
assert x_vec.size == line_family.shape[1], 'The size of the 2nd dimension of line_family must match with of x_vec'
num_lines = line_family.shape[0]
for var, var_name in zip([label_suffix, label_prefix], ['label_suffix', 'label_prefix']):
if not isinstance(var, (str, unicode)):
raise TypeError(var_name + ' needs to be a string')
if not isinstance(y_offset, Number):
raise TypeError('y_offset should be a Number')
assert isinstance(show_cbar, bool)
if line_names is not None:
if not isinstance(line_names, (list, tuple)):
raise TypeError('line_names should be a list of strings')
if not np.all([isinstance(x, (str, unicode)) for x in line_names]):
raise TypeError('line_names should be a list of strings')
if len(line_names) != num_lines:
raise ValueError('length of line_names not matching with that of line_family')
cmap = get_cmap_object(kwargs.pop('cmap', None))
if line_names is None:
# label_prefix = 'Line '
line_names = [str(line_ind) for line_ind in range(num_lines)]
line_names = ['{} {} {}'.format(label_prefix, cur_name, label_suffix) for cur_name in line_names]
for line_ind in range(num_lines):
axis.plot(x_vec, line_family[line_ind] + line_ind * y_offset,
label=line_names[line_ind],
color=cmap(int(255 * line_ind / (num_lines - 1))), **kwargs)
if show_cbar:
# put back the cmap parameter:
kwargs.update({'cmap': cmap})
_ = cbar_for_line_plot(axis, num_lines, **kwargs)
def plot_map(axis, img, show_xy_ticks=True, show_cbar=True, x_vec=None, y_vec=None,
num_ticks=4, stdevs=None, cbar_label=None, tick_font_size=14, **kwargs):
"""
Plots an image within the given axis with a color bar + label and appropriate X, Y tick labels.
This is particularly useful to get readily interpretable plots for papers
Parameters
----------
axis : matplotlib.axes.Axes object
Axis to plot this image onto
img : 2D numpy array with real values
Data for the image plot
show_xy_ticks : bool, Optional, default = None, shown unedited
Whether or not to show X, Y ticks
show_cbar : bool, optional, default = True
Whether or not to show the colorbar
x_vec : 1-D array-like or Number, optional
if an array-like is provided - these will be used for the tick values on the X axis
if a Number is provided, this will serve as an extent for tick values in the X axis.
For example x_vec=1.5 would cause the x tick labels to range from 0 to 1.5
y_vec : 1-D array-like or Number, optional
if an array-like is provided - these will be used for the tick values on the Y axis
if a Number is provided, this will serve as an extent for tick values in the Y axis.
For example y_vec=225 would cause the y tick labels to range from 0 to 225
num_ticks : unsigned int, optional, default = 4
Number of tick marks on the X and Y axes
stdevs : unsigned int (Optional. Default = None)
Number of standard deviations to consider for plotting. If None, full range is plotted.
cbar_label : str, optional, default = None
Labels for the colorbar. Use this for something like quantity (units)
tick_font_size : unsigned int, optional, default = 14
Font size to apply to x, y, colorbar ticks and colorbar label
kwargs : dictionary
Anything else that will be passed on to imshow
Returns
-------
im_handle : handle to image plot
handle to image plot
cbar : handle to color bar
handle to color bar
Note
----
The origin of the image will be set to the lower left corner. Use the kwarg 'origin' to change this
"""
if not isinstance(axis, mpl.axes.Axes):
raise TypeError('axis must be a matplotlib.axes.Axes object')
if not isinstance(img, np.ndarray):
raise TypeError('img should be a numpy array')
if not img.ndim == 2:
raise ValueError('img should be a 2D array')
if not isinstance(show_xy_ticks, bool):
raise TypeError('show_xy_ticks should be a boolean value')
if not isinstance(show_cbar, bool):
raise TypeError('show_cbar should be a boolean value')
# checks for x_vec and y_vec are done below
if num_ticks is not None:
if not isinstance(num_ticks, int):
raise TypeError('num_ticks should be a whole number')
if num_ticks < 2:
raise ValueError('num_ticks should be at least 2')
if tick_font_size is not None:
if not isinstance(tick_font_size, Number):
raise TypeError('tick_font_size must be a whole number')
if tick_font_size < 0:
raise ValueError('tick_font_size must be a whole number')
if stdevs is not None:
if not isinstance(stdevs, Number):
raise TypeError('stdevs should be a Number')
data_mean = np.mean(img)
data_std = np.std(img)
kwargs.update({'clim': [data_mean - stdevs * data_std,
data_mean + stdevs * data_std]})
kwargs.update({'origin': kwargs.pop('origin', 'lower')})
im_handle = axis.imshow(img, **kwargs)
assert isinstance(show_xy_ticks, bool)
if show_xy_ticks is True or x_vec is not None:
x_ticks = np.linspace(0, img.shape[1] - 1, num_ticks, dtype=int)
if x_vec is not None:
if isinstance(x_vec, (int, float)):
if x_vec > 0.01:
x_tick_labs = [str(np.round(ind * x_vec / img.shape[1], 2)) for ind in x_ticks]
else:
x_tick_labs = ['{0:.2e}'.format(ind * x_vec / img.shape[1]) for ind in x_ticks]
else:
if not isinstance(x_vec, (np.ndarray, list, tuple, range)) or len(x_vec) != img.shape[1]:
raise ValueError(
'x_vec should be array-like with shape equal to the second axis of img or img_size')
x_tick_labs = [str(np.round(x_vec[ind], 2)) for ind in x_ticks]
else:
x_tick_labs = [str(ind) for ind in x_ticks]
axis.set_xticks(x_ticks)
axis.set_xticklabels(x_tick_labs)
set_tick_font_size(axis, tick_font_size)
else:
axis.set_xticks([])
if show_xy_ticks is True or y_vec is not None:
y_ticks = np.linspace(0, img.shape[0] - 1, num_ticks, dtype=int)
if y_vec is not None:
if isinstance(y_vec, (int, float)):
if y_vec > 0.01:
y_tick_labs = [str(np.round(ind * y_vec / img.shape[1], 2)) for ind in y_ticks]
else:
y_tick_labs = ['{0:.2e}'.format(ind * y_vec / img.shape[1]) for ind in y_ticks]
else:
if not isinstance(y_vec, (np.ndarray, list, tuple, range)) or len(y_vec) != img.shape[0]:
raise ValueError('y_vec should be array-like with shape equal to the first axis of img')
y_tick_labs = [str(np.round(y_vec[ind], 2)) for ind in y_ticks]
else:
y_tick_labs = [str(ind) for ind in y_ticks]
axis.set_yticks(y_ticks)
axis.set_yticklabels(y_tick_labs)
set_tick_font_size(axis, tick_font_size)
else:
axis.set_yticks([])
cbar = None
if not isinstance(show_cbar, bool):
show_cbar = False
if show_cbar:
cbar = plt.colorbar(im_handle, ax=axis, orientation='vertical',
fraction=0.046, pad=0.04, use_gridspec=True)
# cbar = axis.cbar_axes[count].colorbar(im_handle)
if cbar_label is not None:
if not isinstance(cbar_label, (str, unicode)):
raise TypeError('cbar_label should be a string')
cbar.set_label(cbar_label, fontsize=tick_font_size)
cbar.ax.tick_params(labelsize=tick_font_size)
return im_handle, cbar
def plot_curves(excit_wfms, datasets, line_colors=[], dataset_names=[], evenly_spaced=True,
num_plots=25, x_label='', y_label='', subtitle_prefix='Position', title='',
use_rainbow_plots=False, fig_title_yoffset=1.05, h5_pos=None, **kwargs):
"""
Plots curves / spectras from multiple datasets from up to 25 evenly spaced positions
Parameters
-----------
excit_wfms : 1D numpy float array or list of same
Excitation waveform in the time domain
datasets : list of 2D numpy arrays or 2D hyp5.Dataset objects
Datasets containing data arranged as (pixel, time)
line_colors : list of strings
Colors to be used for each of the datasets
dataset_names : (Optional) list of strings
Names of the different datasets to be compared
evenly_spaced : boolean
Evenly spaced positions or first N positions
num_plots : unsigned int
Number of plots
x_label : (optional) String
X Label for all plots
y_label : (optional) String
Y label for all plots
subtitle_prefix : (optional) String
prefix for title over each plot
title : (optional) String
Main plot title
use_rainbow_plots : (optional) Boolean
Plot the lines as a function of spectral index (eg. time)
fig_title_yoffset : (optional) float
Y offset for the figure title. Value should be around 1
h5_pos : HDF5 dataset reference or 2D numpy array
Dataset containing position indices
Returns
---------
fig, axes
"""
for var, var_name in zip([use_rainbow_plots, evenly_spaced], ['use_rainbow_plots', 'evenly_spaced']):
if not isinstance(var, bool):
raise TypeError(var_name + ' should be of type: bool')
for var, var_name in zip([x_label, y_label, subtitle_prefix, title],
['x_label', 'y_label', 'subtitle_prefix', 'title']):
if var is not None:
if not isinstance(var, (str, unicode)):
raise TypeError(var_name + ' should be of type: str')
else:
var = ''
if fig_title_yoffset is not None:
if not isinstance(fig_title_yoffset, Number):
raise TypeError('fig_title_yoffset should be a Number')
else:
fig_title_yoffset = 1.0
if h5_pos is not None:
if not isinstance(h5_pos, h5py.Dataset):
raise TypeError('h5_pos should be a h5py.Dataset object')
if not isinstance(num_plots, int) or num_plots < 1:
raise TypeError('num_plots should be a number')
for var, var_name, dim_size in zip([datasets, excit_wfms], ['datasets', 'excit_wfms'], [2, 1]):
mesg = '{} should be {}D arrays or iterables (list or tuples) of {}D arrays' \
'.'.format(var_name, dim_size, dim_size)
if isinstance(var, (h5py.Dataset, np.ndarray)):
if not len(var.shape) == dim_size:
raise ValueError(mesg)
elif isinstance(var, (list, tuple)):
if not np.all([isinstance(dset, (h5py.Dataset, np.ndarray)) for dset in datasets]):
raise TypeError(mesg)
else:
raise TypeError(mesg)
# modes:
# 0 = one excitation waveform and one dataset
# 1 = one excitation waveform but many datasets
# 2 = one excitation waveform for each of many dataset
if isinstance(datasets, (h5py.Dataset, np.ndarray)):
# can be numpy array or h5py.dataset
num_pos = datasets.shape[0]
num_points = datasets.shape[1]
datasets = [datasets]
if isinstance(excit_wfms, (np.ndarray, h5py.Dataset)):
excit_wfms = [excit_wfms]
elif isinstance(excit_wfms, list):
if len(excit_wfms) == num_points:
excit_wfms = [np.array(excit_wfms)]
elif len(excit_wfms) == 1 and len(excit_wfms[0]) == num_points:
excit_wfms = [np.array(excit_wfms[0])]
else:
raise ValueError('If only a single dataset is provided, excit_wfms should be a 1D array')
line_colors = ['b']
dataset_names = ['Default']
mode = 0
else:
# dataset is a list of datasets
# First check if the datasets are correctly shaped:
num_pos_es = list()
num_points_es = list()
for dataset in datasets:
if not isinstance(dataset, (h5py.Dataset, np.ndarray)):
raise TypeError('datasets can be a list of 2D h5py.Dataset or numpy array objects')
if len(dataset.shape) != 2:
raise ValueError('Each datset should be a 2D array')
num_pos_es.append(dataset.shape[0])
num_points_es.append(dataset.shape[1])
num_pos_es = np.array(num_pos_es)
num_points_es = np.array(num_points_es)
if np.unique(num_pos_es).size > 1: # or np.unique(num_points_es).size > 1:
raise ValueError('The first dimension of the datasets are not matching: ' + str(num_pos_es))
num_pos = np.unique(num_pos_es)[0]
if len(excit_wfms) == len(datasets):
# one excitation waveform per dataset but now verify each size
if not np.all([len(cur_ex) == cur_dset.shape[1] for cur_ex, cur_dset in zip(excit_wfms, datasets)]):
raise ValueError('Number of points in the datasets do not match with the excitation waveforms')
mode = 2
else:
# one excitation waveform for all datasets
if np.unique(num_points_es).size > 1:
raise ValueError('Datasets don not contain the same number of points: ' + str(num_points_es))
# datasets of the same size but does this match with the size of excitation waveforms:
if len(excit_wfms) != np.unique(num_points_es)[0]:
raise ValueError('Number of points in dataset not matching with shape of excitation waveform')
excit_wfms = [excit_wfms]
mode = 1
for var, var_name in zip([dataset_names, line_colors], ['dataset_names', 'line_colors']):
if not isinstance(var, (list, tuple)) or not np.all([isinstance(x, (str, unicode)) for x in var]):
raise TypeError(var_name + ' should be a list of strings')
if len(var) > 0 and len(var) != len(datasets):
raise ValueError(var_name + ' is not of same length as datasets: ' + len(datasets))
# Next the identification of datasets:
if len(dataset_names) == 0:
dataset_names = ['Dataset' + ' ' + str(x) for x in range(len(dataset_names), len(datasets))]
if len(line_colors) == 0:
# TODO: Generate colors from a user-specified colormap or consider using line family
color_list = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'pink', 'brown', 'orange']
if len(datasets) < len(color_list):
remaining_colors = [x for x in color_list if x not in line_colors]
line_colors += remaining_colors[:len(datasets) - len(color_list)]
else:
raise ValueError('Insufficient number of line colors provided')
# cannot support rainbows with multiple datasets!
use_rainbow_plots = use_rainbow_plots and len(datasets) == 1
if mode != 2:
# convert it to something like mode 2
excit_wfms = [excit_wfms[0] for _ in range(len(datasets))]
if mode != 0:
# users are not allowed to specify colors
_ = kwargs.pop('color', None)
num_plots = min(min(num_plots, 49), num_pos)
nrows, ncols = get_plot_grid_size(num_plots)
if evenly_spaced:
chosen_pos = np.linspace(0, num_pos - 1, nrows * ncols, dtype=int)
else:
chosen_pos = np.arange(nrows * ncols, dtype=int)
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, sharex=True, figsize=(12, 12))
axes_lin = axes.flatten()
for count, posn in enumerate(chosen_pos):
if use_rainbow_plots:
rainbow_plot(axes_lin[count], excit_wfms[0], datasets[0][posn], **kwargs)
else:
for dataset, ex_wfm, col_val in zip(datasets, excit_wfms, line_colors):
axes_lin[count].plot(ex_wfm, dataset[posn], color=col_val, **kwargs)
if h5_pos is not None:
# print('Row ' + str(h5_pos[posn,1]) + ' Col ' + str(h5_pos[posn,0]))
axes_lin[count].set_title('Row ' + str(h5_pos[posn, 1]) + ' Col ' + str(h5_pos[posn, 0]), fontsize=12)
else:
axes_lin[count].set_title(subtitle_prefix + ' ' + str(posn), fontsize=12)
if count % ncols == 0:
axes_lin[count].set_ylabel(y_label, fontsize=12)
if count >= (nrows - 1) * ncols:
axes_lin[count].set_xlabel(x_label, fontsize=12)
axes_lin[count].axis('tight')
axes_lin[count].set_aspect('auto')
axes_lin[count].ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
if len(datasets) > 1:
axes_lin[count].legend(dataset_names, loc='best')
if title:
fig.suptitle(title, fontsize=14, y=fig_title_yoffset)
plt.tight_layout()
return fig, axes
###############################################################################
def plot_complex_spectra(map_stack, x_vec=None, num_comps=4, title=None, x_label='', y_label='', evenly_spaced=True,
subtitle_prefix='Component', amp_units=None, stdevs=2, **kwargs):
"""
Plots the amplitude and phase components of the provided stack of complex valued spectrograms (2D images)
Parameters
-------------
map_stack : 2D or 3D numpy complex matrices
stack of complex valued 1D spectra arranged as [component, spectra] or
2D images arranged as - [component, row, col]
x_vec : 1D array-like, optional, default=None
If the data are spectra (1D) instead of spectrograms (2D), x_vec is the reference array against which
num_comps : int
Number of images to plot
title : str, optional
Title to plot above everything else
x_label : str, optional
Label for x axis
y_label : str, optional
Label for y axis
evenly_spaced : bool, optional. Default = True
If True, images will be sampled evenly over the given dataset. Else, the first num_comps images will be plotted
subtitle_prefix : str, optional
Prefix for the title over each image
amp_units : str, optional
Units for amplitude
stdevs : int
Number of standard deviations to consider for plotting
**kwargs will be passed on either to plot_map() or pyplot.plot()
Returns
---------
fig, axes
"""
if not isinstance(map_stack, np.ndarray) or not map_stack.ndim in [2, 3]:
raise TypeError('map_stack should be a 2/3 dimensional array arranged as [component, row, col] or '
'[component, spectra')
if x_vec is not None:
if not isinstance(x_vec, (list, tuple, np.ndarray)):
raise TypeError('x_vec should be a 1D array')
x_vec = np.array(x_vec)
if x_vec.ndim != 1:
raise ValueError('x_vec should be a 1D array')
if x_vec.size != map_stack.shape[1]:
raise ValueError('x_vec: {} should be of the same size as the second dimension of map_stack: '
'{}'.format(x_vec.shape, map_stack.shape))
else:
if map_stack.ndim == 2:
x_vec = np.arange(map_stack.shape[1])
if num_comps is None:
num_comps = 4 # Default
else:
if not isinstance(num_comps, int) or not num_comps > 0:
raise TypeError('num_comps should be a positive integer')
for var, var_name in zip([title, x_label, y_label, subtitle_prefix, amp_units],
['title', 'x_label', 'y_label', 'subtitle_prefix', 'amp_units']):
if var is not None:
if not isinstance(var, (str, unicode)):
raise TypeError(var_name + ' should be a string')
if amp_units is None:
amp_units = 'a.u.'
if stdevs is not None:
if not isinstance(stdevs, Number) or stdevs <= 0:
raise TypeError('stdevs should be a positive number')
num_comps = min(24, min(num_comps, map_stack.shape[0]))
if evenly_spaced:
chosen_pos = np.linspace(0, map_stack.shape[0] - 1, num_comps, dtype=int)
else:
chosen_pos = np.arange(num_comps, dtype=int)
nrows, ncols = get_plot_grid_size(num_comps)
figsize = kwargs.pop('figsize', (4, 4)) # Individual plot size
figsize = (figsize[0] * ncols, figsize[1] * nrows)
fig, axes = plt.subplots(nrows * 2, ncols, figsize=figsize)
fig.subplots_adjust(hspace=0.1, wspace=0.4)
if title is not None:
fig.canvas.set_window_title(title)
fig.suptitle(title, y=1.025)
title_prefix = ''
for comp_counter, comp_pos in enumerate(chosen_pos):
ax_ind = (comp_counter // ncols) * (2 * ncols) + comp_counter % ncols
cur_axes = [axes.flat[ax_ind], axes.flat[ax_ind + ncols]]
funcs = [np.abs, np.angle]
labels = ['Amplitude (' + amp_units + ')', 'Phase (rad)']
for func, comp_name, axis, std_val in zip(funcs, labels, cur_axes, [stdevs, None]):
y_vec = func(map_stack[comp_pos])
if map_stack.ndim > 2:
kwargs['stdevs'] = std_val
_ = plot_map(axis, y_vec, **kwargs)
else:
axis.plot(x_vec, y_vec, **kwargs)
if num_comps > 1:
title_prefix = '%s %d - ' % (subtitle_prefix, comp_counter)
axis.set_title('%s%s' % (title_prefix, comp_name))
axis.set_aspect('auto')
if ax_ind % ncols == 0:
axis.set_ylabel(y_label)
if np.ceil((ax_ind + ncols) / ncols) == nrows:
axis.set_xlabel(x_label)
fig.tight_layout()
return fig, axes
###############################################################################
def plot_scree(scree, title='Scree', **kwargs):
"""
Plots the scree or scree
Parameters
-------------
scree : 1D real numpy array
The scree vector from SVD
title : str
Figure title. Default Scree
Returns
---------
fig, axes
"""
if isinstance(scree, (list, tuple)):
scree = np.array(scree)
if not (isinstance(scree, np.ndarray) or isinstance(scree, h5py.Dataset)):
raise TypeError('scree must be a 1D array or Dataset')
if not isinstance(title, (str, unicode)):
raise TypeError('title must be a string')
fig = plt.figure(figsize=kwargs.pop('figsize', (6.5, 6)))
axis = fig.add_axes([0.1, 0.1, .8, .8]) # left, bottom, width, height (range 0 to 1)
kwargs.update({'color': kwargs.pop('color', 'b')})
kwargs.update({'marker': kwargs.pop('marker', '*')})
axis.loglog(np.arange(len(scree)) + 1, scree, **kwargs)
axis.set_xlabel('Component')
axis.set_ylabel('Variance')
axis.set_title(title)
axis.set_xlim(left=1, right=len(scree))
axis.set_ylim(bottom=np.min(scree), top=np.max(scree))
fig.canvas.set_window_title("Scree")
return fig, axis
# ###############################################################################
def plot_map_stack(map_stack, num_comps=9, stdevs=2, color_bar_mode=None, evenly_spaced=False, reverse_dims=False,
subtitle='Component', title='Map Stack', colorbar_label='', fig_mult=(5, 5), pad_mult=(0.1, 0.07),
x_label=None, y_label=None, title_yoffset=None, title_size=None, **kwargs):
"""
Plots the provided stack of maps
Parameters
-------------
map_stack : 3D real numpy array
structured as [component, rows, cols]
num_comps : unsigned int
Number of components to plot
stdevs : int
Number of standard deviations to consider for plotting
color_bar_mode : String, Optional
Options are None, single or each. Default None
evenly_spaced : bool
Default False
reverse_dims : Boolean (Optional), default = False
Set this to True to accept data structured as [rows, cols, component]
subtitle : String or list of strings
The titles for each of the plots.
If a single string is provided, the plot titles become ['title 01', title 02', ...].
if a list of strings (equal to the number of components) are provided, these are used instead.
title : String
###Insert description here### Default 'Map Stack'
colorbar_label : String
label for colorbar. Default is an empty string.
fig_mult : length 2 array_like of uints
Size multipliers for the figure. Figure size is calculated as (num_rows*`fig_mult[0]`, num_cols*`fig_mult[1]`).
Default (4, 4)
pad_mult : length 2 array_like of floats
Multipliers for the axis padding between plots in the stack. Padding is calculated as
(pad_mult[0]*fig_mult[1], pad_mult[1]*fig_mult[0]) for the width and height padding respectively.
Default (0.1, 0.07)
x_label : (optional) String
X Label for all plots
y_label : (optional) String
Y label for all plots
title_yoffset : float
Offset to move the figure title vertically in the figure
title_size : float
Size of figure title
kwargs : dictionary
Keyword arguments to be passed to either matplotlib.pyplot.figure, mpl_toolkits.axes_grid1.ImageGrid, or
pycroscopy.vis.plot_utils.plot_map. See specific function documentation for the relavent options.
Returns
---------
fig, axes
"""
if not isinstance(map_stack, np.ndarray) or not map_stack.ndim == 3:
raise TypeError('map_stack should be a 3 dimensional array arranged as [component, row, col]')
if num_comps is None:
num_comps = 4 # Default
else:
if not isinstance(num_comps, int) or num_comps < 1:
raise TypeError('num_comps should be a positive integer')
for var, var_name in zip([title, colorbar_label, color_bar_mode, x_label, y_label],
['title', 'colorbar_label', 'color_bar_mode', 'x_label', 'y_label']):
if var is not None:
if not isinstance(var, (str, unicode)):
raise TypeError(var_name + ' should be a string')
if title is None:
title = ''
if colorbar_label is None:
colorbar_label = ''
if x_label is None:
x_label = ''
if y_label is None:
y_label = ''
if color_bar_mode not in [None, 'single', 'each']:
raise ValueError('color_bar_mode must be either None, "single", or "each"')
for var, var_name in zip([stdevs, title_yoffset, title_size],
['stdevs', 'title_yoffset', 'title_size']):
if var is not None:
if not isinstance(var, Number) or var <= 0:
raise TypeError(var_name + ' of value: {} should be a number > 0'.format(var))
for var, var_name in zip([evenly_spaced, reverse_dims], ['evenly_spaced', 'reverse_dims']):
if not isinstance(var, bool):
raise TypeError(var_name + ' should be a bool')
for var, var_name in zip([fig_mult, pad_mult], ['fig_mult', 'pad_mult']):
if not isinstance(var, (list, tuple, np.ndarray)) or len(var) != 2:
raise TypeError(var_name + ' should be a tuple / list / numpy array of size 2')
if not np.all([x > 0 and isinstance(x, Number) for x in var]):
raise ValueError(var_name + ' should contain positive numbers')
if reverse_dims:
map_stack = np.transpose(map_stack, (2, 0, 1))
num_comps = abs(num_comps)
num_comps = min(num_comps, map_stack.shape[0])
if evenly_spaced:
chosen_pos = np.linspace(0, map_stack.shape[0] - 1, num_comps, dtype=int)
else:
chosen_pos = np.arange(num_comps, dtype=int)
if isinstance(subtitle, list):
if len(subtitle) > num_comps:
# remove additional subtitles
subtitle = subtitle[:num_comps]
elif len(subtitle) < num_comps:
# add subtitles
subtitle += ['Component' + ' ' + str(x) for x in range(len(subtitle), num_comps)]
else:
if not isinstance(subtitle, str):
subtitle = 'Component'
subtitle = [subtitle + ' ' + str(x) for x in chosen_pos]
fig_h, fig_w = fig_mult
p_rows, p_cols = get_plot_grid_size(num_comps)
if p_rows * p_cols < num_comps:
p_cols += 1
pad_w, pad_h = pad_mult
'''
Set defaults for kwargs to the figure creation and extract any non-default values from current kwargs
'''
figkwargs = dict()
if sys.version_info.major == 3:
inspec_func = inspect.getfullargspec
else:
inspec_func = inspect.getargspec
for key in inspec_func(plt.figure).args:
if key in kwargs:
figkwargs.update({key: kwargs.pop(key)})
fig = plt.figure(figsize=(p_cols * fig_w, p_rows * fig_h), **figkwargs)
'''
Set defaults for kwargs to the ImageGrid and extract any non-default values from current kwargs
'''
igkwargs = {'cbar_pad': '1%',
'cbar_size': '5%',
'cbar_location': 'right',
'direction': 'row',
'add_all': True,
'share_all': False,
'aspect': True,
'label_mode': 'L'}
for key in igkwargs.keys():
if key in kwargs:
igkwargs.update({key: kwargs.pop(key)})
axes = ImageGrid(fig, 111, nrows_ncols=(p_rows, p_cols),
cbar_mode=color_bar_mode,
axes_pad=(pad_w * fig_w, pad_h * fig_h),
**igkwargs)
fig.canvas.set_window_title(title)
# These parameters have not been easy to fix:
if title_yoffset is None:
title_yoffset = 0.9
if title_size is None:
title_size = 16 + (p_rows + p_cols)
fig.suptitle(title, fontsize=title_size, y=title_yoffset)
for count, index, curr_subtitle in zip(range(chosen_pos.size), chosen_pos, subtitle):
im, im_cbar = plot_map(axes[count],
map_stack[index],
stdevs=stdevs, show_cbar=False, **kwargs)
axes[count].set_title(curr_subtitle)
if color_bar_mode is 'each':
cb = axes.cbar_axes[count].colorbar(im)
cb.set_label_text(colorbar_label)
if count % p_cols == 0:
axes[count].set_ylabel(y_label)
if count >= (p_rows - 1) * p_cols:
axes[count].set_xlabel(x_label)
if color_bar_mode is 'single':
cb = axes.cbar_axes[0].colorbar(im)
cb.set_label_text(colorbar_label)
return fig, axes
###############################################################################
def export_fig_data(fig, filename, include_images=False):
"""
Export the data of all plots in the figure `fig` to a plain text file.
Parameters
----------
fig : matplotlib.figure.Figure
The figure containing the data to be exported
filename : str
The filename of the output text file
include_images : bool
Should images in the figure also be exported
Returns
-------
"""
# Get the data from the figure
axes = fig.get_axes()
axes_dict = dict()
for ax in axes:
ax_dict = dict()
ims = ax.get_images()
if len(ims) != 0 and include_images:
im_dict = dict()
for im in ims:
# Image data
im_lab = im.get_label()
im_dict['data'] = im.get_array().data
# X-Axis
x_ax = ax.get_xaxis()
x_lab = x_ax.label.get_label()
if x_lab == '':
x_lab = 'X'
im_dict[x_lab] = x_ax.get_data_interval()
# Y-Axis
y_ax = ax.get_yaxis()
y_lab = y_ax.label.get_label()
if y_lab == '':
y_lab = 'Y'
im_dict[y_lab] = y_ax.get_data_interval()
ax_dict['Images'] = {im_lab: im_dict}
lines = ax.get_lines()
if len(lines) != 0:
line_dict = dict()
xlab = ax.get_xlabel()
ylab = ax.get_ylabel()
if xlab == '':
xlab = 'X Data'
if ylab == '':
ylab = 'Y Data'
for line in lines:
line_dict[line.get_label()] = {xlab: line.get_xdata(),
ylab: line.get_ydata()}
ax_dict['Lines'] = line_dict
if ax_dict != dict():
axes_dict[ax.get_title()] = ax_dict
'''
Now that we have the data from the figure, we need to write it to file.
'''
filename = os.path.abspath(filename)
basename, ext = os.path.splitext(filename)
folder, _ = os.path.split(basename)
spacer = r'**********************************************\n'
data_file = open(filename, 'w')
data_file.write(fig.get_label() + '\n')
data_file.write('\n')
for ax_lab, ax in axes_dict.items():
data_file.write('Axis: {} \n'.format(ax_lab))
if 'Images' not in ax:
continue
for im_lab, im in ax['Images'].items():
data_file.write('Image: {} \n'.format(im_lab))
data_file.write('\n')
im_data = im.pop('data')
for row in im_data:
row.tofile(data_file, sep='\t', format='%s')
data_file.write('\n')
data_file.write('\n')
for key, val in im.items():
data_file.write(key + '\n')
val.tofile(data_file, sep='\n', format='%s')
data_file.write('\n')
data_file.write(spacer)
if 'Lines' not in ax:
continue
for line_lab, line_dict in ax['Lines'].items():
data_file.write('Line: {} \n'.format(line_lab))
data_file.write('\n')
dim1, dim2 = line_dict.keys()
data_file.write('{} \t {} \n'.format(dim1, dim2))
for val1, val2 in zip(line_dict[dim1], line_dict[dim2]):
data_file.write('{} \t {} \n'.format(str(val1), str(val2)))
data_file.write(spacer)
data_file.write(spacer)
data_file.close()
| 38.710744 | 120 | 0.611715 |
b78f1effbd0ed70c26c53822c8576d3bd3708f1d
| 23,949 |
py
|
Python
|
HEC/ArcGeom.py
|
LANINCE3/LAN_INF
|
6c02093e5d48fe60f672717d86a1c29571fa4b56
|
[
"MIT"
] | null | null | null |
HEC/ArcGeom.py
|
LANINCE3/LAN_INF
|
6c02093e5d48fe60f672717d86a1c29571fa4b56
|
[
"MIT"
] | null | null | null |
HEC/ArcGeom.py
|
LANINCE3/LAN_INF
|
6c02093e5d48fe60f672717d86a1c29571fa4b56
|
[
"MIT"
] | null | null | null |
import os
import arcpy
import traceback
from Geom import get_legs, pi, getTheta, get_segment_length
from math import sin, cos, hypot
import bisect
from gc import collect
def buffer_inlets_and_roadside(line_feature, point_feature, output_feature, buff_distance):
buf_point_fc = os.path.join(os.path.dirname(output_feature), "{0}_buf".format(os.path.basename(point_feature)))
buf_line_fc = os.path.join(os.path.dirname(output_feature), "{0}_buf".format(os.path.basename(line_feature)))
pass
def disolve_subcatchment_on_area(polygon_fc, field_name, out_folder):
out_fc_path = os.path.join(out_folder, "{0}_SB".format(os.path.basename(polygon_fc)))
arcpy.Dissolve_management(in_features=polygon_fc,out_feature_class=out_fc_path, dissolve_field=field_name,
multi_part="SINGLE_PART", statistics_fields="LAST")
def get_single_poly_centroid(fc, id_field):
with arcpy.da.SearchCursor (fc , [ "OID@" , id_field, "SHAPE@" ]) as cursor:
pg = cursor[0][2]
cenX, cenY = pg.centroid.X, pg.centroid.Y
return cenX, cenY
def get_polygon_centroids(fc, id_field, exp=""):
with arcpy.da.SearchCursor (fc , [ "OID@" , id_field, "SHAPE@" ] , exp) as cursor:
for row in cursor:
pg = row[2]
if pg.isMultiPart:
pass
else:
cenX, cenY = pg.centroid.X, pg.centroid.Y
return cenX, cenY
# def buffer_polylines(fc, id_field, exp, buffer_distance):
# with arcpy.da.SearchCursor (fc , [ "OID@" , id_field, "SHAPE@" ] , exp) as cursor:
# for row in cursor:
# pline = row[2]
# pline.buffer()
def create_fishnet(boundry_fc, boundry_fc_id_field, point_fc, output_gdb, cell_size=370, cell_size_units="Feet"):
sr = getSpatialReferencefactoryCode(boundry_fc)
#Buffers FC
buffer_distance = 3*cell_size
buf_str = "{0} {1}".format(int(buffer_distance), cell_size_units)
cenX, cenY = get_single_poly_centroid(boundry_fc,boundry_fc_id_field)
buffer_fc = os.path.join(output_gdb,'{0}_buff'.format(os.path.basename(point_fc)))
buffered_points = arcpy.Buffer_analysis(point_fc,buffer_fc,line_side="FULL",method="PLANAR",)
def getSpatialReferencefactoryCode(fc):
spatial_ref = arcpy.Describe(fc).spatialReference
return spatial_ref.factoryCode
def get_intersection(l1, l2):
"""
:param l1:
:param l2:
:return:
"""
pass
def get_vertices(fc, exp):
"""Returns points of a point feature class
:param fc:
:param exp:
:return:
"""
try:
coordinate_array = []
total_lenght = 0.0
with arcpy.da.SearchCursor(fc, ["OID@", "SHAPE@"], exp) as cursor:
for row in cursor:
part_array = []
total_lenght += row[1].length
for part in row[1]:
for pnt in part:
if pnt:
part_array.append([round(float(pnt.X), 7), round(float(pnt.Y), 7)])
coordinate_array.append(part_array)
return coordinate_array, total_lenght
except:
print('{0}'.format(traceback.format_exc()))
def generate_xy_stations(coordinate_array, toatl_length, sta_dist=50, start_station_float=0.0):
try:
oids, stations, x_coords, y_coords, = [], [], [], []
running_length_total = None
if int(sta_dist)==0:
sta_dist = 50
station_bins = list(range(0, 999999995, sta_dist))
start_station = None
previous_station = None
x1, y1 = None, None
prevpart_x, prevpart_y = None, None
j = 0
if start_station_float != 0.0:
running_length_total = float(start_station_float)
else:
running_length_total = 0.0
part_arrrays_to_revisit = []
for k in range(len(coordinate_array)):
part_array = coordinate_array[k]
if k == 0:
for i in range(len(part_array)):
if i < len(part_array) - 1:
pnt_1 = part_array[i]
pnt_2 = part_array[i + 1]
# a is length in x direction, b is length in y direction
a, b = get_legs(pnt_1[0], pnt_1[1], pnt_2[0], pnt_2[1])
x1, y1, = pnt_1[0], pnt_1[1]
x2, y2 = pnt_2[0], pnt_2[1]
if b == 0.0:
theta = 0
elif a == 0.0:
theta = pi / 2.0
else:
theta = getTheta(b, a)
length = abs(get_segment_length(pnt_1[0], pnt_1[1], pnt_2[0], pnt_2[1]))
if i == 0:
oids.append(j)
start_station = '{:.02f}'.format(round(toatl_length - running_length_total,2))
previous_station = running_length_total
stations.append(start_station)
x_coords.append(x1)
y_coords.append(y1)
# print('index: {0} station: {1}\n\t|-x: {2}\ty: {3}'.format(i, start_station, x1, y1))
next_station_float = station_bins[
bisect.bisect_right(station_bins, running_length_total)]
# Begins creating station-ing points
if running_length_total + length <= next_station_float:
pass
else:
while running_length_total + length > next_station_float:
station = '{:.02f}'.format (round (toatl_length - next_station_float , 2))
dif = next_station_float - previous_station
if theta > 0 and theta < pi / 2.0:
# print('\t#Quadrant 1, theta:{0}'.format(degrees(theta)))
x1 += round(float(dif * cos(theta)), 8)
y1 += round(float(dif * sin(theta)), 8)
elif (theta > pi / 2.0) and (theta < pi):
# print('\t#Quadrant 2')
x1 += round(float(dif * cos(theta)), 8)
y1 += round(float(dif * sin(theta)), 8)
elif theta > pi and (theta < 3 * pi / 2.0):
# print('\t#Quadrant 3')
x1 += round(float(dif * cos(theta)), 8)
y1 += round(float(dif * sin(theta)), 8)
elif theta > 3 * pi / 2.0 and (theta < 2.0 * pi):
# print('\t#Quadrant 4')
x1 += round(float(dif * cos(theta)), 8)
y1 += round(float(dif * sin(theta)), 8)
elif theta < 0 and (theta > - pi / 2.0):
# print('\t#Quadrant 4, theta:{0}'.format(degrees(theta)))
x1 += round(float(dif * cos(theta)), 8)
y1 += round(float(dif * sin(theta)), 8)
elif (theta < - pi / 2.0) and (theta > - pi):
# print('\tQuandrant 3')
x1 += round(float(dif * cos(theta)), 8)
y1 += round(float(dif * sin(theta)), 8)
elif (theta < - pi) and (theta > - 3 * pi / 2.0):
# print('\tQuandrant 2')
x1 += round(float(dif * cos(theta)), 8)
y1 += round(float(dif * sin(theta)), 8)
elif (theta == 0) or (theta == 2 * pi):
# X Axis
x1 += dif
elif (theta == pi / 2.0) or (theta == pi / -2.0):
# Y Axis
y1 += dif
elif (theta > pi * 2.0) or (theta < - pi * 2.0):
print('\n\n!!!!ARGGGGG!!!!!\n\n')
else:
x1 += round(float(dif * cos(theta)), 8)
y1 += round(float(dif * sin(theta)), 8)
j += 1
oids.append(j)
stations.append(station)
x_coords.append(x1)
y_coords.append(y1)
previous_station = next_station_float
next_station_float += sta_dist
running_length_total += length
previous_station = running_length_total
else:
pnt_1 = part_array[i]
prevpart_x, prevpart_y, = pnt_1[0], pnt_1[1]
else:
xi, yi = part_array[0][0], part_array[0][1]
xf, yf = part_array[len(part_array) - 1][0], part_array[len(part_array) - 1][1]
if (round(prevpart_x, 2) == round(xi, 2)) and (round(prevpart_y, 2) == round(yi, 2)):
for i in range(len(part_array)):
if i < len(part_array) - 1:
pnt_1 = part_array[i]
pnt_2 = part_array[i + 1]
# a is length in x direction, b is length in y direction
a, b = get_legs(pnt_1[0], pnt_1[1], pnt_2[0], pnt_2[1])
x1, y1, = pnt_1[0], pnt_1[1]
x2, y2 = pnt_2[0], pnt_2[1]
if b == 0.0:
theta = 0
elif a == 0.0:
theta = pi / 2.0
else:
theta = getTheta(b, a)
length = abs(get_segment_length(pnt_1[0], pnt_1[1], pnt_2[0], pnt_2[1]))
next_station_float = station_bins[
bisect.bisect_right(station_bins, running_length_total)]
# Begins creating station-ing points
if running_length_total + length <= next_station_float:
pass
else:
while running_length_total + length > next_station_float:
station = '{:.02f}'.format (round (toatl_length - next_station_float , 2))
dif = next_station_float - previous_station
if theta > 0 and theta < pi / 2.0:
# print('\t#Quadrant 1, theta:{0}'.format(degrees(theta)))
x1 += round(float(dif * cos(theta)), 8)
y1 += round(float(dif * sin(theta)), 8)
elif (theta > pi / 2.0) and (theta < pi):
# print('\t#Quadrant 2')
x1 += round(float(dif * cos(theta)), 8)
y1 += round(float(dif * sin(theta)), 8)
elif theta > pi and (theta < 3 * pi / 2.0):
# print('\t#Quadrant 3')
x1 += round(float(dif * cos(theta)), 8)
y1 += round(float(dif * sin(theta)), 8)
elif theta > 3 * pi / 2.0 and (theta < 2.0 * pi):
# print('\t#Quadrant 4')
x1 += round(float(dif * cos(theta)), 8)
y1 += round(float(dif * sin(theta)), 8)
elif theta < 0 and (theta > - pi / 2.0):
# print('\t#Quadrant 4, theta:{0}'.format(degrees(theta)))
x1 += round(float(dif * cos(theta)), 8)
y1 += round(float(dif * sin(theta)), 8)
elif (theta < - pi / 2.0) and (theta > - pi):
# print('\tQuandrant 3')
x1 += round(float(dif * cos(theta)), 8)
y1 += round(float(dif * sin(theta)), 8)
elif (theta < - pi) and (theta > - 3 * pi / 2.0):
# print('\tQuandrant 2')
x1 += round(float(dif * cos(theta)), 8)
y1 += round(float(dif * sin(theta)), 8)
elif (theta == 0) or (theta == 2 * pi):
# X Axis
x1 += dif
elif (theta == pi / 2.0) or (theta == pi / -2.0):
# Y Axis
y1 += dif
elif (theta > pi * 2.0) or (theta < - pi * 2.0):
print('\n\n!!!!ARGGGGG!!!!!\n\n')
else:
x1 += round(float(dif * cos(theta)), 8)
y1 += round(float(dif * sin(theta)), 8)
j += 1
oids.append(j)
stations.append(station)
x_coords.append(x1)
y_coords.append(y1)
previous_station = next_station_float
next_station_float += sta_dist
running_length_total += length
previous_station = running_length_total
else:
pnt_1 = part_array[i]
prevpart_x, prevpart_y, = pnt_1[0], pnt_1[1]
elif (round(prevpart_x, 2) == round(xf, 2)) and (round(prevpart_y, 2) == round(yf, 2)):
part_array = part_array.reverse()
for i in range(len(part_array)):
if i < len(part_array) - 1:
pnt_1 = part_array[i]
pnt_2 = part_array[i + 1]
# a is length in x direction, b is length in y direction
a, b = get_legs(pnt_1[0], pnt_1[1], pnt_2[0], pnt_2[1])
x1, y1, = pnt_1[0], pnt_1[1]
x2, y2 = pnt_2[0], pnt_2[1]
if b == 0.0:
theta = 0
elif a == 0.0:
theta = pi / 2.0
else:
theta = getTheta(b, a)
length = abs(get_segment_length(pnt_1[0], pnt_1[1], pnt_2[0], pnt_2[1]))
next_station_float = station_bins[
bisect.bisect_right(station_bins, running_length_total)]
# Begins creating station-ing points
if running_length_total + length <= next_station_float:
pass
else:
while running_length_total + length > next_station_float:
station = '{:.02f}'.format (round (toatl_length - next_station_float , 2))
dif = next_station_float - previous_station
if theta > 0 and theta < pi / 2.0:
# print('\t#Quadrant 1, theta:{0}'.format(degrees(theta)))
x1 += round(float(dif * cos(theta)), 8)
y1 += round(float(dif * sin(theta)), 8)
elif (theta > pi / 2.0) and (theta < pi):
# print('\t#Quadrant 2')
x1 += round(float(dif * cos(theta)), 8)
y1 += round(float(dif * sin(theta)), 8)
elif theta > pi and (theta < 3 * pi / 2.0):
# print('\t#Quadrant 3')
x1 += round(float(dif * cos(theta)), 8)
y1 += round(float(dif * sin(theta)), 8)
elif theta > 3 * pi / 2.0 and (theta < 2.0 * pi):
# print('\t#Quadrant 4')
x1 += round(float(dif * cos(theta)), 8)
y1 += round(float(dif * sin(theta)), 8)
elif theta < 0 and (theta > - pi / 2.0):
# print('\t#Quadrant 4, theta:{0}'.format(degrees(theta)))
x1 += round(float(dif * cos(theta)), 8)
y1 += round(float(dif * sin(theta)), 8)
elif (theta < - pi / 2.0) and (theta > - pi):
# print('\tQuandrant 3')
x1 += round(float(dif * cos(theta)), 8)
y1 += round(float(dif * sin(theta)), 8)
elif (theta < - pi) and (theta > - 3 * pi / 2.0):
# print('\tQuandrant 2')
x1 += round(float(dif * cos(theta)), 8)
y1 += round(float(dif * sin(theta)), 8)
elif (theta == 0) or (theta == 2 * pi):
# X Axis
x1 += dif
elif (theta == pi / 2.0) or (theta == pi / -2.0):
# Y Axis
y1 += dif
elif (theta > pi * 2.0) or (theta < - pi * 2.0):
print('\n\n!!!!ARGGGGG!!!!!\n\n')
else:
x1 += round(float(dif * cos(theta)), 8)
y1 += round(float(dif * sin(theta)), 8)
j += 1
oids.append(j)
stations.append(station)
x_coords.append(x1)
y_coords.append(y1)
previous_station = next_station_float
next_station_float += sta_dist
running_length_total += length
previous_station = running_length_total
else:
pnt_1 = part_array[i]
prevpart_x, prevpart_y, = pnt_1[0], pnt_1[1]
else:
part_arrrays_to_revisit.append(part_array)
out_dict = {"OID": oids, "Stations": stations, "X": x_coords, "Y": y_coords}
return out_dict
except:
print('{0}'.format(traceback.format_exc()))
def CopyParallelL(plyP,sLength):
"""Copies an arcpy Poly Line Left. Will be commonly applied to upstream XS of a bridge location.
:param plyP:
:param sLength:
:return:
"""
part=plyP.getPart(0)
lArray=arcpy.Array()
for ptX in part:
dL=plyP.measureOnLine(ptX)
ptX0=plyP.positionAlongLine (dL-0.01).firstPoint
ptX1=plyP.positionAlongLine (dL+0.01).firstPoint
dX=float(ptX1.X)-float(ptX0.X)
dY=float(ptX1.Y)-float(ptX0.Y)
lenV=hypot(dX,dY)
sX=-dY*sLength/lenV;sY=dX*sLength/lenV
leftP=arcpy.Point(ptX.X+sX,ptX.Y+sY)
lArray.add(leftP)
array = arcpy.Array([lArray])
section=arcpy.Polyline(array)
return section
def CopyParallelR(plyP,sLength):
"""Copies an arcpy Poly Line Right
:param plyP:
:param sLength:
:return:
"""
part=plyP.getPart(0)
rArray=arcpy.Array()
for ptX in part:
dL=plyP.measureOnLine(ptX)
ptX0=plyP.positionAlongLine (dL-0.01).firstPoint
ptX1=plyP.positionAlongLine (dL+0.01).firstPoint
dX=float(ptX1.X)-float(ptX0.X)
dY=float(ptX1.Y)-float(ptX0.Y)
lenV=hypot(dX,dY)
sX=-dY*sLength/lenV;sY=dX*sLength/lenV
rightP=arcpy.Point(ptX.X-sX, ptX.Y-sY)
rArray.add(rightP)
array = arcpy.Array([rArray])
section=arcpy.Polyline(array)
return section
def create_polygon_centroids(poly_fc,fc_path, new_fc,poly_fields):
fields = [ str(field) for field in poly_fields]
print(fields)
sr = arcpy.Describe (poly_fc).spatialReference
if arcpy.Exists(os.path.join(fc_path, new_fc)):
arcpy.Delete_management(os.path.join(fc_path, new_fc))
arcpy.CreateFeatureclass_management(fc_path,new_fc,'POINT', spatial_reference=sr)
fds = arcpy.ListFields(poly_fc)
for fd in fds:
if str(fd.name) in fields:
print('\t\t\t{0}: __{1}__'.format(fd.name,fd.type))
if str(fd.type).find('OID') != -1 or str(fd.type).find('Integer') != -1 :
arcpy.AddField_management(os.path.join(fc_path, new_fc), str(fd.name), "LONG")
elif str(fd.type).find('String') != -1:
arcpy.AddField_management(os.path.join(fc_path, new_fc), str(fd.name), "TEXT", field_length=fd.length)
elif str(fd.type).find('Double') != -1 or str(fd.type).find('Single') != -1:
arcpy.AddField_management(os.path.join(fc_path, new_fc), str(fd.name), "FLOAT")
else:
pass
fds = ['SHAPE@']
fds += fields
with arcpy.da.SearchCursor(poly_fc, fds) as sCursor:
with arcpy.da.InsertCursor(os.path.join(fc_path, new_fc), fds) as iCursor:
for row in sCursor:
polygon = row[0]
if polygon is None:
collect()
else:
if polygon.isMultipart:
pass
else:
cent = polygon.trueCentroid
irow = [cent]
irow += [val for val in row[1:]]
# print('\t\|{0}: ({1}, {2})'.format(str(row[2]),round(float(cent.X),2), round(float(cent.Y),2)))
iCursor.insertRow(irow)
| 55.182028 | 122 | 0.412877 |
a78fd7aa04f213462509f4072d37c76f8d980f90
| 3,843 |
py
|
Python
|
pybullet-gym/pybulletgym/envs/mujoco/robots/locomotors/walker_base.py
|
SmaleZ/vcl_diayn
|
b2c47a681675b405d2011bc4a43c3914f3af4ecc
|
[
"MIT"
] | 2 |
2021-07-12T17:11:35.000Z
|
2021-07-13T05:56:30.000Z
|
pybullet-gym/pybulletgym/envs/mujoco/robots/locomotors/walker_base.py
|
SmaleZ/vcl_diayn
|
b2c47a681675b405d2011bc4a43c3914f3af4ecc
|
[
"MIT"
] | null | null | null |
pybullet-gym/pybulletgym/envs/mujoco/robots/locomotors/walker_base.py
|
SmaleZ/vcl_diayn
|
b2c47a681675b405d2011bc4a43c3914f3af4ecc
|
[
"MIT"
] | null | null | null |
from pybulletgym.envs.mujoco.robots.robot_bases import XmlBasedRobot
import numpy as np
class WalkerBase(XmlBasedRobot):
def __init__(self, power):
self.power = power
self.camera_x = 0
self.start_pos_x, self.start_pos_y, self.start_pos_z = 0, 0, 0
self.walk_target_x = 1e3 # kilometers away
self.walk_target_y = 0
self.body_xyz = [0, 0, 0]
def robot_specific_reset(self, bullet_client):
self._p = bullet_client
for j in self.ordered_joints:
j.reset_current_position(self.np_random.uniform(low=-0.1, high=0.1), 0)
self.feet = [self.parts[f] for f in self.foot_list]
self.feet_contact = np.array([0.0 for f in self.foot_list], dtype=np.float32)
self.scene.actor_introduce(self)
self.initial_z = None
def apply_action(self, a):
assert (np.isfinite(a).all())
i = 0
for n, j in enumerate(self.ordered_joints):
if j.power_coef != 0: # in case the ignored joints are added, they have 0 power
j.set_motor_torque(self.power * j.power_coef * float(np.clip(a[n-i], -1, +1)))
else:
i += 1
def calc_state(self):
j = np.array([j.current_relative_position() for j in self.ordered_joints], dtype=np.float32).flatten()
# even elements [0::2] position, scaled to -1..+1 between limits
# odd elements [1::2] angular speed, scaled to show -1..+1
self.joint_speeds = j[1::2]
self.joints_at_limit = np.count_nonzero(np.abs(j[0::2]) > 0.99)
body_pose = self.robot_body.pose()
parts_xyz = np.array([p.pose().xyz() for p in self.parts.values()]).flatten()
self.body_xyz = (
parts_xyz[0::3].mean(), parts_xyz[1::3].mean(), body_pose.xyz()[2]) # torso z is more informative than mean z
self.body_rpy = body_pose.rpy()
z = self.body_xyz[2]
if self.initial_z is None:
self.initial_z = z
r, p, yaw = self.body_rpy
self.walk_target_theta = np.arctan2(self.walk_target_y - self.body_xyz[1],
self.walk_target_x - self.body_xyz[0])
self.walk_target_dist = np.linalg.norm(
[self.walk_target_y - self.body_xyz[1], self.walk_target_x - self.body_xyz[0]])
angle_to_target = self.walk_target_theta - yaw
rot_speed = np.array(
[[np.cos(-yaw), -np.sin(-yaw), 0],
[np.sin(-yaw), np.cos(-yaw), 0],
[ 0, 0, 1]]
)
vx, vy, vz = np.dot(rot_speed, self.robot_body.speed()) # rotate speed back to body point of view
more = np.array([ z-self.initial_z,
np.sin(angle_to_target), np.cos(angle_to_target),
0.3 * vx, 0.3 * vy, 0.3 * vz, # 0.3 is just scaling typical speed into -1..+1, no physical sense here
r, p], dtype=np.float32)
return np.clip( np.concatenate([more] + [j] + [self.feet_contact]), -5, +5)
def calc_potential(self):
# progress in potential field is speed*dt, typical speed is about 2-3 meter per second, this potential will change 2-3 per frame (not per second),
# all rewards have rew/frame units and close to 1.0
try:
debugmode = 0
if debugmode:
print("calc_potential: self.walk_target_dist")
print(self.walk_target_dist)
print("self.scene.dt")
print(self.scene.dt)
print("self.scene.frame_skip")
print(self.scene.frame_skip)
print("self.scene.timestep")
print(self.scene.timestep)
return - self.walk_target_dist / self.scene.dt
except AttributeError:
return - self.walk_target_dist
| 45.211765 | 154 | 0.583659 |
48dc113f30e7be8854538555da990e232f0295b9
| 7,516 |
py
|
Python
|
DEAL/02_ResNet/utils/resnet_utils.py
|
ptrckhmmr/DEAL
|
164ba36a21f7f779557e025bd5acc8a4a42f01a1
|
[
"Apache-2.0"
] | 13 |
2020-10-08T20:40:49.000Z
|
2022-03-12T07:21:39.000Z
|
DEAL/02_ResNet/utils/resnet_utils.py
|
DeepLearningResearch/DEAL
|
164ba36a21f7f779557e025bd5acc8a4a42f01a1
|
[
"Apache-2.0"
] | null | null | null |
DEAL/02_ResNet/utils/resnet_utils.py
|
DeepLearningResearch/DEAL
|
164ba36a21f7f779557e025bd5acc8a4a42f01a1
|
[
"Apache-2.0"
] | 1 |
2021-01-21T09:32:22.000Z
|
2021-01-21T09:32:22.000Z
|
import tensorflow as tf
import tensorflow.contrib.slim as slim
import os
from keras.datasets import cifar10, cifar100, mnist
from keras.utils import to_categorical
import numpy as np
import random
from scipy import misc
import os
import shutil
def check_folder(log_dir):
if not os.path.exists(log_dir):
os.makedirs(log_dir)
return log_dir
def show_all_variables():
model_vars = tf.trainable_variables()
slim.model_analyzer.analyze_vars(model_vars, print_info=True)
def str2bool(x):
return x.lower() in ('true')
def load_cifar10() :
(train_data, train_labels), (test_data, test_labels) = cifar10.load_data()
# train_data = train_data / 255.0
# test_data = test_data / 255.0
train_data, test_data = normalize(train_data, test_data)
train_labels = to_categorical(train_labels, 10)
test_labels = to_categorical(test_labels, 10)
seed = 777
np.random.seed(seed)
np.random.shuffle(train_data)
np.random.seed(seed)
np.random.shuffle(train_labels)
return train_data, train_labels, test_data, test_labels
def load_cifar100() :
(train_data, train_labels), (test_data, test_labels) = cifar100.load_data()
# train_data = train_data / 255.0
# test_data = test_data / 255.0
train_data, test_data = normalize(train_data, test_data)
train_labels = to_categorical(train_labels, 100)
test_labels = to_categorical(test_labels, 100)
seed = 777
np.random.seed(seed)
np.random.shuffle(train_data)
np.random.seed(seed)
np.random.shuffle(train_labels)
return train_data, train_labels, test_data, test_labels
def load_mnist() :
(train_data, train_labels), (test_data, test_labels) = mnist.load_data()
train_data = np.expand_dims(train_data, axis=-1)
test_data = np.expand_dims(test_data, axis=-1)
train_data, test_data = normalize(train_data, test_data)
train_labels = to_categorical(train_labels, 10)
test_labels = to_categorical(test_labels, 10)
seed = 777
np.random.seed(seed)
np.random.shuffle(train_data)
np.random.seed(seed)
np.random.shuffle(train_labels)
return train_data, train_labels, test_data, test_labels
def load_fashion() :
(train_data, train_labels), (test_data, test_labels) = fashion_mnist.load_data()
train_data = np.expand_dims(train_data, axis=-1)
test_data = np.expand_dims(test_data, axis=-1)
train_data, test_data = normalize(train_data, test_data)
train_labels = to_categorical(train_labels, 10)
test_labels = to_categorical(test_labels, 10)
seed = 777
np.random.seed(seed)
np.random.shuffle(train_data)
np.random.seed(seed)
np.random.shuffle(train_labels)
return train_data, train_labels, test_data, test_labels
def load_tiny() :
IMAGENET_MEAN = [123.68, 116.78, 103.94]
path = './tiny-imagenet-200'
num_classes = 200
print('Loading ' + str(num_classes) + ' classes')
X_train = np.zeros([num_classes * 500, 3, 64, 64], dtype=np.float32)
y_train = np.zeros([num_classes * 500], dtype=np.float32)
trainPath = path + '/train'
print('loading training images...')
i = 0
j = 0
annotations = {}
for sChild in os.listdir(trainPath):
sChildPath = os.path.join(os.path.join(trainPath, sChild), 'images')
annotations[sChild] = j
for c in os.listdir(sChildPath):
X = misc.imread(os.path.join(sChildPath, c), mode='RGB')
if len(np.shape(X)) == 2:
X_train[i] = np.array([X, X, X])
else:
X_train[i] = np.transpose(X, (2, 0, 1))
y_train[i] = j
i += 1
j += 1
if (j >= num_classes):
break
print('finished loading training images')
val_annotations_map = get_annotations_map()
X_test = np.zeros([num_classes * 50, 3, 64, 64], dtype=np.float32)
y_test = np.zeros([num_classes * 50], dtype=np.float32)
print('loading test images...')
i = 0
testPath = path + '/val/images'
for sChild in os.listdir(testPath):
if val_annotations_map[sChild] in annotations.keys():
sChildPath = os.path.join(testPath, sChild)
X = misc.imread(sChildPath, mode='RGB')
if len(np.shape(X)) == 2:
X_test[i] = np.array([X, X, X])
else:
X_test[i] = np.transpose(X, (2, 0, 1))
y_test[i] = annotations[val_annotations_map[sChild]]
i += 1
else:
pass
print('finished loading test images : ' + str(i))
X_train = X_train.astype(np.float32)
X_test = X_test.astype(np.float32)
# X_train /= 255.0
# X_test /= 255.0
# for i in range(3) :
# X_train[:, :, :, i] = X_train[:, :, :, i] - IMAGENET_MEAN[i]
# X_test[:, :, :, i] = X_test[:, :, :, i] - IMAGENET_MEAN[i]
X_train, X_test = normalize(X_train, X_test)
# convert class vectors to binary class matrices
y_train = to_categorical(y_train, num_classes)
y_test = to_categorical(y_test, num_classes)
X_train = np.transpose(X_train, [0, 3, 2, 1])
X_test = np.transpose(X_test, [0, 3, 2, 1])
seed = 777
np.random.seed(seed)
np.random.shuffle(X_train)
np.random.seed(seed)
np.random.shuffle(y_train)
return X_train, y_train, X_test, y_test
def normalize(X_train, X_test):
mean = np.mean(X_train, axis=(0, 1, 2, 3))
std = np.std(X_train, axis=(0, 1, 2, 3))
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
return X_train, X_test
def get_annotations_map():
valAnnotationsPath = './tiny-imagenet-200/val/val_annotations.txt'
valAnnotationsFile = open(valAnnotationsPath, 'r')
valAnnotationsContents = valAnnotationsFile.read()
valAnnotations = {}
for line in valAnnotationsContents.splitlines():
pieces = line.strip().split()
valAnnotations[pieces[0]] = pieces[1]
return valAnnotations
def _random_crop(batch, crop_shape, padding=None):
oshape = np.shape(batch[0])
if padding:
oshape = (oshape[0] + 2 * padding, oshape[1] + 2 * padding)
new_batch = []
npad = ((padding, padding), (padding, padding), (0, 0))
for i in range(len(batch)):
new_batch.append(batch[i])
if padding:
new_batch[i] = np.lib.pad(batch[i], pad_width=npad,
mode='constant', constant_values=0)
nh = random.randint(0, oshape[0] - crop_shape[0])
nw = random.randint(0, oshape[1] - crop_shape[1])
new_batch[i] = new_batch[i][nh:nh + crop_shape[0],
nw:nw + crop_shape[1]]
return new_batch
def _random_flip_leftright(batch):
for i in range(len(batch)):
if bool(random.getrandbits(1)):
batch[i] = np.fliplr(batch[i])
return batch
def data_augmentation(batch, img_size, dataset_name):
if dataset_name == 'mnist' :
batch = _random_crop(batch, [img_size[0], img_size[1]], 4)
elif dataset_name =='tiny' :
batch = _random_flip_leftright(batch)
batch = _random_crop(batch, [img_size[0], img_size[1]], 8)
else :
batch = _random_flip_leftright(batch)
batch = _random_crop(batch, [img_size[0], img_size[1]], 4)
return batch
def remove_checkpoints():
path = './checkpoint/'
if len(os.listdir(path)) != 0:
shutil.rmtree(path)
print('Checkpoints have been removed!')
else:
print('No checkpoints have been found!')
| 29.825397 | 84 | 0.639037 |
bc47bd1b17af88e0259a3a2f534f38365ba59f3a
| 862 |
py
|
Python
|
docker/ngram/ngram_scripts/build_recommender.py
|
smsahu/seldon-server
|
7f6dc5d405736e44205323f04ce431064dd854b3
|
[
"Apache-2.0"
] | 1,645 |
2015-02-13T12:31:44.000Z
|
2022-03-17T07:50:05.000Z
|
docker/ngram/ngram_scripts/build_recommender.py
|
smsahu/seldon-server
|
7f6dc5d405736e44205323f04ce431064dd854b3
|
[
"Apache-2.0"
] | 57 |
2015-03-26T16:00:23.000Z
|
2021-05-10T11:03:40.000Z
|
docker/ngram/ngram_scripts/build_recommender.py
|
smsahu/seldon-server
|
7f6dc5d405736e44205323f04ce431064dd854b3
|
[
"Apache-2.0"
] | 371 |
2015-03-16T11:04:16.000Z
|
2022-02-27T01:16:02.000Z
|
import sys, getopt, argparse
from seldon.text.ngram_recommend import NgramModel
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='upload model')
parser.add_argument('--arpa', help='arpa file', required=True)
parser.add_argument('--dst', help='dst folder', required=True)
parser.add_argument('--aws_key', help='aws key - needed if input or output is on AWS and no IAM')
parser.add_argument('--aws_secret', help='aws secret - needed if input or output on AWS and no IAM')
args = parser.parse_args()
opts = vars(args)
recommender = NgramModel()
recommender.fit(args.arpa)
import seldon
if "aws_key" in opts:
rw = seldon.Recommender_wrapper(aws_key=args.aws_key,aws_secret=args.aws_secret)
else:
rw = seldon.Recommender_wrapper()
rw.save_recommender(recommender,args.dst)
| 31.925926 | 105 | 0.701856 |
4584772afe0682a40a2f4b318d9c7f8b0ee623c6
| 11,232 |
py
|
Python
|
advanced_functionality/distributed_tensorflow_mask_rcnn/container-optimized/resources/train.py
|
nikhilarunw/amazon-sagemaker-examples
|
9fd2156b329fba087881b19d86aa68b5f5b50c7b
|
[
"Apache-2.0"
] | null | null | null |
advanced_functionality/distributed_tensorflow_mask_rcnn/container-optimized/resources/train.py
|
nikhilarunw/amazon-sagemaker-examples
|
9fd2156b329fba087881b19d86aa68b5f5b50c7b
|
[
"Apache-2.0"
] | 4 |
2020-09-26T00:53:42.000Z
|
2022-02-10T01:41:50.000Z
|
advanced_functionality/distributed_tensorflow_mask_rcnn/container-optimized/resources/train.py
|
nikhilarunw/amazon-sagemaker-examples
|
9fd2156b329fba087881b19d86aa68b5f5b50c7b
|
[
"Apache-2.0"
] | 1 |
2020-04-12T17:19:16.000Z
|
2020-04-12T17:19:16.000Z
|
import json
import os
import shutil
import subprocess
import sys
import time
import signal
import socket
import glob
from contextlib import contextmanager
def setup():
# Read info that SageMaker provides
current_host = os.environ['SM_CURRENT_HOST']
hosts = json.loads(os.environ['SM_HOSTS'])
# Enable SSH connections between containers
_start_ssh_daemon()
if current_host == _get_master_host_name(hosts):
_wait_for_worker_nodes_to_start_sshd(hosts)
class TimeoutError(Exception):
pass
@contextmanager
def timeout(seconds=0, minutes=0, hours=0):
"""
Add a signal-based timeout to any block of code.
If multiple time units are specified, they will be added together to determine time limit.
Usage:
with timeout(seconds=5):
my_slow_function(...)
Args:
- seconds: The time limit, in seconds.
- minutes: The time limit, in minutes.
- hours: The time limit, in hours.
"""
limit = seconds + 60 * minutes + 3600 * hours
def handler(signum, frame): # pylint: disable=W0613
raise TimeoutError('timed out after {} seconds'.format(limit))
try:
signal.signal(signal.SIGALRM, handler)
signal.setitimer(signal.ITIMER_REAL, limit)
yield
finally:
signal.alarm(0)
def _get_master_host_name(hosts):
return sorted(hosts)[0]
def _start_ssh_daemon():
subprocess.Popen(["/usr/sbin/sshd", "-D"])
def _wait_for_worker_nodes_to_start_sshd(hosts, interval=1, timeout_in_seconds=180):
with timeout(seconds=timeout_in_seconds):
while hosts:
print("hosts that aren't SSHable yet: %s", str(hosts))
for host in hosts:
ssh_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if _can_connect(host, 22, ssh_socket):
hosts.remove(host)
time.sleep(interval)
def _can_connect(host, port, s):
try:
print("testing connection to host %s", host)
s.connect((host, port))
s.close()
print("can connect to host %s", host)
return True
except socket.error:
print("can't connect to host %s", host)
return False
def wait_for_training_processes_to_appear_and_finish(proccess_id_string, worker):
training_process_started = False
while True:
time.sleep(300)
training_process_ps = subprocess.check_output(f'ps -elf | grep "{proccess_id_string}"', encoding='utf-8', shell=True)
print(training_process_ps)
training_process_count = subprocess.check_output(f'ps -elf | grep "{proccess_id_string}" | wc -l', encoding='utf-8', shell=True)
training_process_count_str = training_process_count.replace("\n", "").strip()
training_process_count = int(training_process_count_str) - 2
training_process_running = training_process_count > 0
if training_process_started:
print(f'training processes running: {training_process_count}')
if not training_process_running:
print(f'Worker {worker} training completed.')
time.sleep(5)
sys.exit(0)
if not training_process_started:
if training_process_running:
training_process_started = True
else:
print(f'Worker {worker} exiting: training not started in 300 seconds.')
sys.exit(1)
def build_host_arg(host_list, gpu_per_host):
arg = ""
for ind, host in enumerate(host_list):
if ind != 0:
arg += ","
arg += f'{host}:{gpu_per_host}'
return arg
def copy_files(src, dest):
src_files = os.listdir(src)
for file in src_files:
path = os.path.join(src, file)
if os.path.isfile(path):
shutil.copy(path, dest)
def train():
import pprint
pprint.pprint(dict(os.environ), width = 1)
model_dir = os.environ['SM_MODEL_DIR']
log_dir = None
copy_logs_to_model_dir = False
try:
log_dir = os.environ['SM_CHANNEL_LOG']
copy_logs_to_model_dir = True
except KeyError:
log_dir = model_dir
train_data_dir = os.environ['SM_CHANNEL_TRAIN']
print("pre-setup check")
setup()
current_host = os.environ['SM_CURRENT_HOST']
all_hosts = json.loads(os.environ['SM_HOSTS'])
if_name = os.environ['SM_NETWORK_INTERFACE_NAME']
is_master = current_host == sorted(all_hosts)[0]
if not is_master:
print(f'Worker: {current_host}')
process_search_term = "/usr/local/bin/python3.6 /mask-rcnn-tensorflow/MaskRCNN/train.py"
wait_for_training_processes_to_appear_and_finish(process_search_term, current_host)
print(f'Worker {current_host} has completed')
else:
print(f'Master: {current_host}')
hyperparamters = json.loads(os.environ['SM_HPS'])
try:
batch_norm = hyperparamters['batch_norm']
except KeyError:
batch_norm = 'FreezeBN'
try:
mode_fpn = hyperparamters['mode_fpn']
except KeyError:
mode_fpn = "True"
try:
mode_mask = hyperparamters['mode_mask']
except KeyError:
mode_mask = "True"
try:
eval_period = hyperparamters['eval_period']
except KeyError:
eval_period = 1
try:
lr_epoch_schedule = hyperparamters['lr_epoch_schedule']
except KeyError:
lr_epoch_schedule = '[(16, 0.1), (20, 0.01), (24, None)]'
try:
horovod_cycle_time = hyperparamters['horovod_cycle_time']
except KeyError:
horovod_cycle_time = 0.5
try:
horovod_fusion_threshold = hyperparamters['horovod_fusion_threshold']
except KeyError:
horovod_fusion_threshold = 67108864
try:
data_train = hyperparamters['data_train']
except KeyError:
data_train = '["train2017"]'
try:
data_val = hyperparamters['data_val']
except KeyError:
data_val = '("val2017",)'
try:
nccl_min_rings = hyperparamters['nccl_min_rings']
except KeyError:
nccl_min_rings = 8
try:
batch_size_per_gpu = hyperparamters['batch_size_per_gpu']
except KeyError:
batch_size_per_gpu = 4
try:
images_per_epoch = hyperparamters['images_per_epoch']
except KeyError:
images_per_epoch = 120000
try:
backbone_weights = hyperparamters['backbone_weights']
except KeyError:
backbone_weights = 'ImageNet-R50-AlignPadding.npz'
try:
resnet_arch = hyperparamters['resnet_arch']
except KeyError:
resnet_arch = 'resnet50'
try:
rpn_anchor_stride = hyperparamters['rpn_anchor_stride']
except KeyError:
rpn_anchor_stride = 16
try:
rpn_anchor_sizes = hyperparamters['rpn_anchor_sizes']
except KeyError:
rpn_anchor_sizes = '(32, 64, 128, 256, 512)'
try:
rpn_anchor_ratios = hyperparamters['rpn_anchor_ratios']
except KeyError:
rpn_anchor_ratios = '(0.5, 1., 2.)'
try:
rpn_positive_anchor_thresh = hyperparamters['rpn_positive_anchor_thresh']
except KeyError:
rpn_positive_anchor_thresh = 0.7
try:
rpn_negative_anchor_thresh = hyperparamters['rpn_negative_anchor_thresh']
except KeyError:
rpn_negative_anchor_thresh = 0.3
try:
rpn_batch_per_im = hyperparamters['rpn_batch_per_im']
except KeyError:
rpn_batch_per_im = 256
try:
frcnn_batch_per_im = hyperparamters['frcnn_batch_per_im']
except KeyError:
frcnn_batch_per_im = 512
try:
fpn_anchor_strides = hyperparamters['fpn_anchor_strides']
except KeyError:
fpn_anchor_strides = '(4, 8, 16, 32, 64)'
try:
num_category = hyperparamters['num_category']
except KeyError:
num_category = 0
try:
class_names = hyperparamters['class_names']
except KeyError:
class_names = ''
try:
trainer = hyperparamters['trainer']
except KeyError:
trainer = 'horovod'
resnet_num_blocks = '[3, 4, 6, 3]'
if resnet_arch == 'resnet101':
resnet_num_blocks = '[3, 4, 23, 3]'
gpus_per_host = int(os.environ['SM_NUM_GPUS'])
numprocesses = len(all_hosts) * int(gpus_per_host)
mpirun_cmd = f"""HOROVOD_CYCLE_TIME={horovod_cycle_time} \\
HOROVOD_FUSION_THRESHOLD={horovod_fusion_threshold} \\
mpirun -np {numprocesses} \\
--host {build_host_arg(all_hosts, gpus_per_host)} \\
--allow-run-as-root \\
--display-map \\
--tag-output \\
-mca btl_tcp_if_include {if_name} \\
-mca oob_tcp_if_include {if_name} \\
-x NCCL_SOCKET_IFNAME={if_name} \\
--mca plm_rsh_no_tree_spawn 1 \\
-bind-to none -map-by slot \\
-mca pml ob1 -mca btl ^openib \\
-mca orte_abort_on_non_zero_status 1 \\
-x TENSORPACK_FP16=1 \\
-x NCCL_MIN_NRINGS={nccl_min_rings} -x NCCL_DEBUG=INFO \\
-x HOROVOD_CYCLE_TIME -x HOROVOD_FUSION_THRESHOLD \\
-x LD_LIBRARY_PATH -x PATH \\
--output-filename {model_dir} \\
/usr/local/bin/python3.6 /mask-rcnn-tensorflow/MaskRCNN/train.py \
--logdir {log_dir} \
--fp16 \
--throughput_log_freq=2000 \
--images_per_epoch {images_per_epoch} \
--config \
MODE_FPN={mode_fpn} \
MODE_MASK={mode_mask} \
DATA.BASEDIR={train_data_dir} \
BACKBONE.RESNET_NUM_BLOCKS='{resnet_num_blocks}' \
BACKBONE.WEIGHTS={train_data_dir}/pretrained-models/{backbone_weights} \
BACKBONE.NORM={batch_norm} \
DATA.TRAIN='{data_train}' \
DATA.VAL='{data_val}' \
TRAIN.BATCH_SIZE_PER_GPU={batch_size_per_gpu} \
TRAIN.EVAL_PERIOD={eval_period} \
TRAIN.LR_EPOCH_SCHEDULE='{lr_epoch_schedule}' \
RPN.ANCHOR_STRIDE={rpn_anchor_stride} \
RPN.ANCHOR_SIZES='{rpn_anchor_sizes}' \
RPN.ANCHOR_RATIOS='{rpn_anchor_ratios}' \
RPN.POSITIVE_ANCHOR_THRESH={rpn_positive_anchor_thresh} \
RPN.NEGATIVE_ANCHOR_THRESH={rpn_negative_anchor_thresh} \
RPN.BATCH_PER_IM={rpn_batch_per_im} \
FPN.ANCHOR_STRIDES='{fpn_anchor_strides}' \
FRCNN.BATCH_PER_IM={frcnn_batch_per_im} \
RPN.TOPK_PER_IMAGE=True \
PREPROC.PREDEFINED_PADDING=True \
TRAIN.GRADIENT_CLIP=0 \
TRAINER='{trainer}'"""
print("--------Begin MPI Run Command----------")
print(mpirun_cmd)
print("--------End MPI Run Comamnd------------")
exitcode = 0
try:
process = subprocess.Popen(mpirun_cmd, encoding='utf-8', cwd="/mask-rcnn-tensorflow",
shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
while True:
if process.poll() != None:
break
output = process.stdout.readline()
if output:
print(output.strip())
exitcode = process.poll()
print(f"mpirun exit code:{exitcode}")
exitcode = 0
except Exception as e:
print("train exception occured", file=sys.stderr)
exitcode = 1
print(str(e), file=sys.stderr)
finally:
if copy_logs_to_model_dir:
copy_files(log_dir, model_dir)
sys.stdout.flush()
sys.stderr.flush()
sys.exit(exitcode)
if __name__ == "__main__":
train()
| 29.480315 | 136 | 0.646724 |
ddadd018562d12cd71369e27fbab64834eb52d16
| 3,559 |
py
|
Python
|
example/django1_10/bugsnag_demo/settings.py
|
rajeev02101987/bugsnag-python
|
84d6339dbfb9cc6068a972bca219836a402e0a74
|
[
"MIT"
] | null | null | null |
example/django1_10/bugsnag_demo/settings.py
|
rajeev02101987/bugsnag-python
|
84d6339dbfb9cc6068a972bca219836a402e0a74
|
[
"MIT"
] | null | null | null |
example/django1_10/bugsnag_demo/settings.py
|
rajeev02101987/bugsnag-python
|
84d6339dbfb9cc6068a972bca219836a402e0a74
|
[
"MIT"
] | null | null | null |
"""
Django settings for bugsnag_demo project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3n3h7r@tpqnwqtt8#avxh_t75k_6zf3x)@6cg!u(&xmz79(26h'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE = (
"bugsnag.django.middleware.BugsnagMiddleware",
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware'
)
ROOT_URLCONF = 'bugsnag_demo.urls'
WSGI_APPLICATION = 'bugsnag_demo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
# Initialize Bugsnag to begin tracking errors. Only an api key is required, but here are some other helpful configuration details:
BUGSNAG = {
# get your own api key at bugsnag.com
"api_key": "YOUR_API_KEY_HERE",
# By default, requests are sent asynchronously. If you would like to block until the request is done, you can set to false.
"asynchronous": False,
# if you track deploys or session rates, make sure to set the correct version.
"app_version": '1.2.3',
# Defaults to false, this allows you to log each session which will be used to calculate crash rates in your dashboard for each release.
"auto_capture_sessions": True,
# Sets which exception classes should never be sent to Bugsnag.
"ignore_classes": ['django.http.response.Http404', 'DontCare'],
# Defines the release stage for all events that occur in this app.
"release_stage": 'development',
# Defines which release stages bugsnag should report. e.g. ignore staging errors.
"notify_release_stages": [ 'development', 'production'],
# Any param key that contains one of these strings will be filtered out of all error reports.
"params_filters": ["credit_card_number", "password", "ssn"],
# We mark stacktrace lines as inProject if they come from files inside root:
# "project_root": "/path/to/your/project",
# Useful if you are wrapping bugsnag.notify() in with your own library, to ensure errors group properly.
# "traceback_exclude_module": [myapp.custom_logging],
}
| 30.161017 | 140 | 0.730261 |
dc5c675c60d4765e97dddfa07d7825520fe1db3b
| 540 |
py
|
Python
|
Python-Data-structure/WEEK 6/ex_10.py
|
mhmdreda99/Python-for-everybody-specializaion
|
9a520111bc4837b92709e0ffe54094bc184642d7
|
[
"MIT"
] | null | null | null |
Python-Data-structure/WEEK 6/ex_10.py
|
mhmdreda99/Python-for-everybody-specializaion
|
9a520111bc4837b92709e0ffe54094bc184642d7
|
[
"MIT"
] | null | null | null |
Python-Data-structure/WEEK 6/ex_10.py
|
mhmdreda99/Python-for-everybody-specializaion
|
9a520111bc4837b92709e0ffe54094bc184642d7
|
[
"MIT"
] | null | null | null |
fname = input('Enter File: ')
if len(fname) < 1:
fname = 'clown.txt'
hand = open(fname)
di = dict()
for lin in hand:
lin = lin.rstrip()
wds = lin.split()
for w in wds:
di[w] = di.get(w,0) + 1
#print(di)
# new code starts from here
#x = sotred(di.items())
#print(x[:5])
#flip for sorting
tmp = list()
for k,v in di.items():
print(k,v)
newt = (v,k)
tmp.append(newt)
#print('Flipped',tmp)
tmp = sorted(tmp, reverse=T)
#print('Sorted',tmp[:5])
#flip back to key
for v,k in tmp[:5]:
print(k,v)
| 15.882353 | 31 | 0.57037 |
7dff31518f5735d3790d4eb73fd163eef67d9e5e
| 1,317 |
py
|
Python
|
mindspore/ops/_op_impl/tbe/tanh.py
|
GuoSuiming/mindspore
|
48afc4cfa53d970c0b20eedfb46e039db2a133d5
|
[
"Apache-2.0"
] | 3,200 |
2020-02-17T12:45:41.000Z
|
2022-03-31T20:21:16.000Z
|
mindspore/ops/_op_impl/tbe/tanh.py
|
forwhat461/mindspore
|
59a277756eb4faad9ac9afcc7fd526e8277d4994
|
[
"Apache-2.0"
] | 176 |
2020-02-12T02:52:11.000Z
|
2022-03-28T22:15:55.000Z
|
mindspore/ops/_op_impl/tbe/tanh.py
|
forwhat461/mindspore
|
59a277756eb4faad9ac9afcc7fd526e8277d4994
|
[
"Apache-2.0"
] | 621 |
2020-03-09T01:31:41.000Z
|
2022-03-30T03:43:19.000Z
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tanh op"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
tanh_op_info = TBERegOp("Tanh") \
.fusion_type("ELEMWISE") \
.async_flag(False) \
.binfile_name("tanh.so") \
.compute_cost(10) \
.kernel_name("tanh") \
.partial_flag(True) \
.input(0, "x", False, "required", "all") \
.output(0, "y", False, "required", "all") \
.op_pattern("formatAgnostic") \
.dtype_format(DataType.F16_None, DataType.F16_None) \
.dtype_format(DataType.F32_None, DataType.F32_None) \
.get_op_info()
@op_info_register(tanh_op_info)
def _tanh_tbe():
"""Tanh TBE register"""
return
| 34.657895 | 79 | 0.668185 |
53c38d1d8a98cd22445ad182e4767bcadbedf8e4
| 12,030 |
py
|
Python
|
evolution/uLoadCvode.py
|
really-lilly/oldEv
|
64ce05d991f96fdca9a3170837e5e5e17f584cac
|
[
"MIT"
] | 1 |
2021-09-28T19:57:07.000Z
|
2021-09-28T19:57:07.000Z
|
evolution/uLoadCvode.py
|
really-lilly/oldEv
|
64ce05d991f96fdca9a3170837e5e5e17f584cac
|
[
"MIT"
] | 1 |
2022-02-17T21:19:53.000Z
|
2022-02-17T21:19:53.000Z
|
evolution/uLoadCvode.py
|
really-lilly/oldEv
|
64ce05d991f96fdca9a3170837e5e5e17f584cac
|
[
"MIT"
] | null | null | null |
'''
Author: Ciaran Welsh & Lillian Tatka
The purpose of this script is to load the sundials module and interface with it.
'''
import ctypes as ct
from typing import List
from distro import id
import numpy as np
from os.path import exists, join, dirname, isdir
from sys import platform
from pathlib import Path
CV_BDF = 2
CV_ADAMS = 1
CV_NORMAL = 1
CV_SUCCESS = 0
# sundials is a submodule.
parent_dir = Path(dirname(__file__)) # top level root directory
PROJ_ROOT = parent_dir.parent.absolute()
print(f" Root: {PROJ_ROOT}")
SUNDIALS_SRC = join(PROJ_ROOT, "sundials") # sundials source directory
SUNDIALS_INSTALL_PREFIX = join(PROJ_ROOT, f"sundials-install-{platform}")
print(SUNDIALS_INSTALL_PREFIX)
#print(f"Install prefix: {SUNDIALS_INSTALL_PREFIX}")
if not isdir(SUNDIALS_INSTALL_PREFIX):
raise ValueError("""
You need to install sundials using cmake. Use:
cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=../sundials-install-{platform} ..
cmake --build . --target install --config Release -j 12
where platform is the output from sys.platform in Python.
""")
# If things fail because a library couldn't be found, it is probably because of something
# in the following code block. Here we are telling the computer where to look for the sundials
# libraries. The path will be different depending on the platform. This wasn't tested on every platform,
# so it may be necessary to add another "elif" it is not working on your machine.
PLATFORM_SHARED_LIBRARY_EXTENSION = None #This is the name of the library directory in sundials-install-{platform}
PLATFORM_SHARED_LIBRARY_PREFIX = None #This is the prefix of the library files, eg. {lib}sundials.so.6
if platform == "win32":
SUNDIALS_LIB_DIR = join(SUNDIALS_INSTALL_PREFIX, "lib")
PLATFORM_SHARED_LIBRARY_EXTENSION = "dll"
PLATFORM_SHARED_LIBRARY_PREFIX = ""
elif platform == "linux":
PLATFORM_SHARED_LIBRARY_EXTENSION = "so"
distribution = id()
print(f"distribution is {distribution}")
if distribution == "centos":
PLATFORM_SHARED_LIBRARY_PREFIX = "lib64"
SUNDIALS_LIB_DIR = join(SUNDIALS_INSTALL_PREFIX, "lib")
elif distribution == "ubuntu":
PLATFORM_SHARED_LIBRARY_PREFIX = "lib"
SUNDIALS_LIB_DIR = join(SUNDIALS_INSTALL_PREFIX, "lib")
else: # I guess this is the one that hyak uses...
PLATFORM_SHARED_LIBRARY_PREFIX = "lib"
SUNDIALS_LIB_DIR = join(SUNDIALS_INSTALL_PREFIX, "lib64")
elif platform == "darwin":
SUNDIALS_LIB_DIR = join(SUNDIALS_INSTALL_PREFIX, "lib")
PLATFORM_SHARED_LIBRARY_EXTENSION = "dylib"
PLATFORM_SHARED_LIBRARY_PREFIX = "lib"
else:
raise ValueError("Unsupported platform")
def sundialsLibraries():
"""Return a dict of string to cvode binary filepath mappings.
:return:
"""
sundialsLibs = dict(
sundials_cvode=join(SUNDIALS_LIB_DIR, f"{PLATFORM_SHARED_LIBRARY_PREFIX}sundials_cvode.{PLATFORM_SHARED_LIBRARY_EXTENSION}"),
sundials_nvecserial=join(SUNDIALS_LIB_DIR, f"{PLATFORM_SHARED_LIBRARY_PREFIX}sundials_nvecserial.{PLATFORM_SHARED_LIBRARY_EXTENSION}")
)
for k, v in sundialsLibs.items():
if not exists(v):
raise ValueError(f"Sundials library \"{k}\" was not found at \"{v}\"")
return sundialsLibs
def loadSundialsLibrary(libName:str) -> ct.CDLL:
"""Load a sundials library into Python using ctypes
:param libName: the name of the library to load. Available names are keys of the dict returned by sundialsLibraries
"""
lib = None
if platform == "win32":
lib = ct.WinDLL(sundialsLibraries()[libName])
else:
lib = ct.CDLL(sundialsLibraries()[libName])
if not lib:
raise ValueError("Cannot find library named \""+ libName +"\"")
if not lib:
raise ValueError(f"Library \"{libName}\" not loaded into Python")
return lib
def loadSundialsFunc(lib:ct.CDLL, funcname: str, argtypes: List, restype) -> ct.CDLL._FuncPtr:
"""load a sundials function from the binary using ctypes
:param funcname: The name of the function to load
:param argtypes: The arguments types of the function to load, i.e. the C signature
:param restype: The return type of the function
:return:
"""
func = lib.__getattr__(funcname)
func.restype = restype
func.argtypes = argtypes
return func
def setUserData(n):
return ct.c_double * n
PARAMETER_ARRAY = setUserData(2)
class cvodeuserdata(ct.Structure):
_fields_ = [('k', PARAMETER_ARRAY)]
# The ode function has the form f(t, y, ydot, user_data)
callback_type = ct.CFUNCTYPE(ct.c_int, ct.c_double, ct.c_void_p, ct.c_void_p, ct.c_void_p)
error_callback_type = ct.CFUNCTYPE(None, ct.c_int, ct.c_char_p, ct.c_char_p, ct.c_char_p, ct.c_void_p)
Jac_callback_type = ct.CFUNCTYPE(ct.c_int, ct.c_double, ct.c_void_p, ct.c_void_p,
ct.c_void_p, ct.c_void_p, ct.c_void_p, ct.c_void_p, ct.c_void_p)
class TCvode:
def __init__(self, algType=CV_BDF, ignoreErrors=True):
self.ignoreErrors = ignoreErrors
self.cvode_mem = None
self.dllcvode = None
self.dllnvector = None
self.dllcvode = loadSundialsLibrary("sundials_cvode")
self.dllnvector = loadSundialsLibrary("sundials_nvecserial")
self.dllcvode.CVodeCreate.restype = ct.c_void_p
self.dllcvode.CVodeCreate.argTypes = [ct.c_int]
self.dllcvode.CVodeInit.restype = ct.c_int
self.dllcvode.CVodeInit.argType = [ct.c_void_p, callback_type, ct.c_double, ct.c_void_p]
self.dllcvode.CVodeSetMinStep.argTypes = [ct.c_void_p, ct.c_double]
self.dllcvode.CVodeSetMinStep.restype = ct.c_longlong
self.dllcvode.CVodeSetUserData.argTypes = [ct.c_void_p, ct.c_void_p]
self.dllnvector.N_VGetArrayPointer_Serial.argTypes = [ct.c_void_p]
self.dllnvector.N_VGetArrayPointer_Serial.restype = ct.POINTER(ct.c_double)
self.dllnvector.N_VNew_Serial.restype = ct.c_void_p
self.dllnvector.N_VNew_Serial.argTypes = [ct.c_longlong]
self.dllcvode.CVodeSStolerances.restype = ct.c_int
self.dllcvode.CVodeSStolerances.argTypes = [ct.c_void_p, ct.c_double, ct.c_void_p]
self.dllcvode.SUNDenseMatrix.restype = ct.c_void_p
self.dllcvode.SUNDenseMatrix.argTypes = [ct.c_int, ct.c_int]
self.dllcvode.SUNLinSol_Dense.restype = ct.c_void_p
self.dllcvode.SUNLinSol_Dense.argTypes = [ct.c_void_p, ct.c_void_p]
self.dllcvode.CVodeSetLinearSolver.restype = ct.c_int
self.dllcvode.CVodeSetLinearSolver.argTypes = [ct.c_void_p, ct.c_void_p, ct.c_void_p]
self.dllcvode.CVode.restype = ct.c_int
self.dllcvode.CVode.argTypes = [ct.c_void_p, ct.c_double,
ct.c_void_p, ct.POINTER(ct.c_double), ct.c_int64]
self.dllcvode.CVodeSetErrHandlerFn.restype = ct.c_int
self.dllcvode.CVodeSetErrHandlerFn.argType = [ct.c_void_p, error_callback_type, ct.c_void_p]
self.cvode_mem = ct.c_void_p(self.dllcvode.CVodeCreate(algType))
def setIgnoreErrors(self, ignoreErrors):
self.ignoreErrors = ignoreErrors
def errorHandler(self, errorCode, module, function, msg, eg_data):
if not self.ignoreErrors:
print("Bad model ----------- " + str(errorCode), end='')
def setModel(self, fcn, JacFcn=None):
self.fcn = fcn
if JacFcn != None:
self.JacFcn = fcn
self.Jac_callback_func = Jac_callback_type(fcn)
else:
self.JacFcn = None
self.callback_func = callback_type(fcn)
self.error_callback_func = error_callback_type(self.errorHandler)
self.dllcvode.CVodeSetErrHandlerFn(self.cvode_mem, self.error_callback_func, None)
def setVectorValue(self, v, index, value):
py = self.dllnvector.N_VGetArrayPointer_Serial(ct.c_void_p(v))
py[index] = value
def getVectorValue(self, v, index):
py = self.dllnvector.N_VGetArrayPointer_Serial(ct.c_void_p(v))
return py[index]
def getVectorArray(self, v):
return self.dllnvector.N_VGetArrayPointer_Serial(ct.c_void_p(v))
def setTolerances(self, reltol=1E-6, abstol=1E-16):
_abstol = self.dllnvector.N_VNew_Serial(self.NEQ)
pabstol = self.dllnvector.N_VGetArrayPointer_Serial(ct.c_void_p(_abstol))
for i in range(self.NEQ):
pabstol[i] = abstol
self.dllcvode.CVodeSStolerances(self.cvode_mem, ct.c_double(reltol), ct.c_void_p(_abstol))
def initialize(self, n, y0, userData=None):
self.NEQ = n
self.init_y0 = self.dllnvector.N_VNew_Serial(self.NEQ)
self.init_y0_ptr = self.dllnvector.N_VGetArrayPointer_Serial(ct.c_void_p(self.init_y0))
for i in range(self.NEQ):
self.init_y0_ptr[i] = y0[i]
flag = self.dllcvode.CVodeSetUserData(self.cvode_mem, None)
t0 = ct.c_double(0.0)
flag = self.dllcvode.CVodeInit(self.cvode_mem, self.callback_func, t0, ct.c_void_p(self.init_y0))
if flag != 0:
print("Error in calling CVodeInit: ", flag)
# Create dense SUNMatrix for use in linear solves
A = self.dllcvode.SUNDenseMatrix(self.NEQ, self.NEQ)
# if(check_retval((void *)A, "SUNDenseMatrix", 0)) return(1)
# Create dense SUNLinearSolver object for use by CVode
LS = self.dllcvode.SUNLinSol_Dense(ct.c_void_p(self.init_y0), ct.c_void_p(A))
# if(check_retval((void *)LS, "SUNLinSol_Dense", 0)) return(1);
# Call CVodeSetLinearSolver to attach the matrix and linear solver to CVode
flag = self.dllcvode.CVodeSetLinearSolver(self.cvode_mem, ct.c_void_p(LS), ct.c_void_p(A))
if self.JacFcn != None:
flag = self.dllcvode.CVodeSetJacFn(self.cvode_mem, self.JacFcn)
self.setTolerances()
def reset(self):
t0 = ct.c_double(0.0)
flag = self.dllcvode.CVodeInit(self.cvode_mem, self.callback_func, t0, ct.c_void_p(self.init_y0))
def oneStep(self, t, hstep):
new_t = ct.c_double(0)
tout = ct.c_double(t + hstep)
print("tout = ", tout.value)
yout = self.dllnvector.N_VNew_Serial(self.NEQ)
print("neq = ", self.NEQ)
ier = self.dllcvode.CVode(self.cvode_mem, tout,
ct.c_void_p(yout),
ct.byref(new_t), CV_NORMAL)
print("ier = ", ier)
py = self.dllnvector.N_VGetArrayPointer_Serial(ct.c_void_p(yout))
print("Ans = ", new_t.value, py[0])
y = []
for i in range(self.NEQ):
y.append(py[i])
return t + hstep, y
def simulate(self, startTime, endTime, numPoints):
yreturn = np.zeros([numPoints, self.NEQ + 1]) # plus one for time
yreturn[0, 0] = startTime
for i in range(self.NEQ):
yreturn[0, i + 1] = self.init_y0_ptr[i]
hstep = (endTime - startTime) / (numPoints - 1)
new_t = ct.c_double(0)
yout = self.dllnvector.N_VNew_Serial(self.NEQ)
tout = ct.c_double(startTime + hstep)
for i in range(numPoints - 1):
ier = self.dllcvode.CVode(self.cvode_mem, tout,
ct.c_void_p(yout),
ct.byref(new_t), CV_NORMAL)
if ier < 0:
raise Exception("Error: " + str(ier))
py = self.dllnvector.N_VGetArrayPointer_Serial(ct.c_void_p(yout))
# +1 because time = 0 is stored at i=0
yreturn[i + 1, 0] = new_t.value
for j in range(self.NEQ):
yreturn[i + 1, j + 1] = py[j]
tout.value = tout.value + hstep
return yreturn
| 39.442623 | 143 | 0.655777 |
560c41e842c82019e482839c2885ec28eb1d9dcb
| 39,521 |
py
|
Python
|
neural-machine-translation/t/text_encoder.py
|
SunYanCN/NLP-Models-Tensorflow
|
0741216aa8235e1228b3de7903cc36d73f8f2b45
|
[
"MIT"
] | 1,705 |
2018-11-03T17:34:22.000Z
|
2022-03-29T04:30:01.000Z
|
neural-machine-translation/t/text_encoder.py
|
SunYanCN/NLP-Models-Tensorflow
|
0741216aa8235e1228b3de7903cc36d73f8f2b45
|
[
"MIT"
] | 26 |
2019-03-16T17:23:00.000Z
|
2021-10-08T08:06:09.000Z
|
neural-machine-translation/t/text_encoder.py
|
SunYanCN/NLP-Models-Tensorflow
|
0741216aa8235e1228b3de7903cc36d73f8f2b45
|
[
"MIT"
] | 705 |
2018-11-03T17:34:25.000Z
|
2022-03-24T02:29:14.000Z
|
# coding=utf-8
# Copyright 2020 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encoders for text data.
* TextEncoder: base class
* ByteTextEncoder: for ascii text
* TokenTextEncoder: with user-supplied vocabulary file
* SubwordTextEncoder: invertible
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from itertools import chain
import math
import re
import tempfile
import time
import numpy as np
import six
from six.moves import range # pylint: disable=redefined-builtin
from . import tokenizer
import tensorflow.compat.v1 as tf
# Reserved tokens for things like padding and EOS symbols.
PAD = '<pad>'
EOS = '<EOS>'
RESERVED_TOKENS = [PAD, EOS]
NUM_RESERVED_TOKENS = len(RESERVED_TOKENS)
PAD_ID = RESERVED_TOKENS.index(PAD) # Normally 0
EOS_ID = RESERVED_TOKENS.index(EOS) # Normally 1
if six.PY2:
RESERVED_TOKENS_BYTES = RESERVED_TOKENS
else:
RESERVED_TOKENS_BYTES = [bytes(PAD, 'ascii'), bytes(EOS, 'ascii')]
# Regular expression for unescaping token strings.
# '\u' is converted to '_'
# '\\' is converted to '\'
# '\213;' is converted to unichr(213)
_UNESCAPE_REGEX = re.compile(r'\\u|\\\\|\\([0-9]+);')
_ESCAPE_CHARS = set(u'\\_u;0123456789')
# Unicode utility functions that work with Python 2 and 3
def native_to_unicode(s):
if is_unicode(s):
return s
try:
return to_unicode(s)
except UnicodeDecodeError:
res = to_unicode(s, ignore_errors = True)
tf.logging.info('Ignoring Unicode error, outputting: %s' % res)
return res
def unicode_to_native(s):
if six.PY2:
return s.encode('utf-8') if is_unicode(s) else s
else:
return s
def is_unicode(s):
return isinstance(s, six.text_type)
def to_unicode(s, ignore_errors = False):
if is_unicode(s):
return s
error_mode = 'ignore' if ignore_errors else 'strict'
return s.decode('utf-8', errors = error_mode)
def to_unicode_ignore_errors(s):
return to_unicode(s, ignore_errors = True)
def to_unicode_utf8(s):
return unicode(s, 'utf-8') if six.PY2 else s.decode('utf-8')
def strip_ids(ids, ids_to_strip):
"""Strip ids_to_strip from the end ids."""
ids = list(ids)
while ids and ids[-1] in ids_to_strip:
ids.pop()
return ids
class TextEncoder(object):
"""Base class for converting from ints to/from human readable strings."""
def __init__(self, num_reserved_ids = NUM_RESERVED_TOKENS):
self._num_reserved_ids = num_reserved_ids
@property
def num_reserved_ids(self):
return self._num_reserved_ids
def encode(self, s):
"""Transform a human-readable string into a sequence of int ids.
The ids should be in the range [num_reserved_ids, vocab_size). Ids [0,
num_reserved_ids) are reserved.
EOS is not appended.
Args:
s: human-readable string to be converted.
Returns:
ids: list of integers
"""
return [int(w) + self._num_reserved_ids for w in s.split()]
def decode(self, ids, strip_extraneous = False):
"""Transform a sequence of int ids into a human-readable string.
EOS is not expected in ids.
Args:
ids: list of integers to be converted.
strip_extraneous: bool, whether to strip off extraneous tokens
(EOS and PAD).
Returns:
s: human-readable string.
"""
if strip_extraneous:
ids = strip_ids(ids, list(range(self._num_reserved_ids or 0)))
return ' '.join(self.decode_list(ids))
def decode_list(self, ids):
"""Transform a sequence of int ids into a their string versions.
This method supports transforming individual input/output ids to their
string versions so that sequence to/from text conversions can be visualized
in a human readable format.
Args:
ids: list of integers to be converted.
Returns:
strs: list of human-readable string.
"""
decoded_ids = []
for id_ in ids:
if 0 <= id_ < self._num_reserved_ids:
decoded_ids.append(RESERVED_TOKENS[int(id_)])
else:
decoded_ids.append(id_ - self._num_reserved_ids)
return [str(d) for d in decoded_ids]
@property
def vocab_size(self):
raise NotImplementedError()
class ByteTextEncoder(TextEncoder):
"""Encodes each byte to an id. For 8-bit strings only."""
def encode(self, s):
numres = self._num_reserved_ids
if six.PY2:
if isinstance(s, unicode):
s = s.encode('utf-8')
return [ord(c) + numres for c in s]
# Python3: explicitly convert to UTF-8
return [c + numres for c in s.encode('utf-8')]
def decode(self, ids, strip_extraneous = False):
if strip_extraneous:
ids = strip_ids(ids, list(range(self._num_reserved_ids or 0)))
numres = self._num_reserved_ids
decoded_ids = []
int2byte = six.int2byte
for id_ in ids:
if 0 <= id_ < numres:
decoded_ids.append(RESERVED_TOKENS_BYTES[int(id_)])
else:
decoded_ids.append(int2byte(id_ - numres))
if six.PY2:
return ''.join(decoded_ids)
# Python3: join byte arrays and then decode string
return b''.join(decoded_ids).decode('utf-8', 'replace')
def decode_list(self, ids):
numres = self._num_reserved_ids
decoded_ids = []
int2byte = six.int2byte
for id_ in ids:
if 0 <= id_ < numres:
decoded_ids.append(RESERVED_TOKENS_BYTES[int(id_)])
else:
decoded_ids.append(int2byte(id_ - numres))
# Python3: join byte arrays and then decode string
return decoded_ids
@property
def vocab_size(self):
return 2 ** 8 + self._num_reserved_ids
class ClassLabelEncoder(TextEncoder):
"""Encoder for class labels."""
def __init__(self, class_labels = None, class_labels_fname = None):
super(ClassLabelEncoder, self).__init__(num_reserved_ids = 0)
if class_labels_fname:
with tf.gfile.Open(class_labels_fname) as f:
class_labels = [label.strip() for label in f.readlines()]
assert class_labels
self._class_labels = class_labels
def encode(self, s):
label_str = s
return self._class_labels.index(label_str)
def decode(self, ids, strip_extraneous = False):
del strip_extraneous
label_id = ids
if isinstance(label_id, list):
assert len(label_id) == 1
label_id, = label_id
if isinstance(label_id, np.ndarray):
label_id = np.squeeze(label_id)
return self._class_labels[label_id]
def decode_list(self, ids):
return [self._class_labels[i] for i in ids]
@property
def vocab_size(self):
return len(self._class_labels)
class OneHotClassLabelEncoder(ClassLabelEncoder):
"""One-hot encoder for class labels."""
def encode(
self, label_str, on_value = 1, off_value = 0
): # pylint: disable=arguments-differ
e = np.full(self.vocab_size, off_value, dtype = np.int32)
e[self._class_labels.index(label_str)] = on_value
return e.tolist()
def decode(self, ids, strip_extraneous = False):
del strip_extraneous
label_id = ids
if isinstance(label_id, np.ndarray):
label_id = np.squeeze(label_id).astype(np.int8).tolist()
assert isinstance(label_id, list)
assert len(label_id) == self.vocab_size
return self._class_labels[label_id.index(1)]
@property
def vocab_size(self):
return len(self._class_labels)
class TokenTextEncoder(TextEncoder):
"""Encoder based on a user-supplied vocabulary (file or list)."""
def __init__(
self,
vocab_filename,
reverse = False,
vocab_list = None,
replace_oov = None,
num_reserved_ids = NUM_RESERVED_TOKENS,
):
"""Initialize from a file or list, one token per line.
Handling of reserved tokens works as follows:
- When initializing from a list, we add reserved tokens to the vocab.
- When initializing from a file, we do not add reserved tokens to the vocab.
- When saving vocab files, we save reserved tokens to the file.
Args:
vocab_filename: If not None, the full filename to read vocab from. If this
is not None, then vocab_list should be None.
reverse: Boolean indicating if tokens should be reversed during encoding
and decoding.
vocab_list: If not None, a list of elements of the vocabulary. If this is
not None, then vocab_filename should be None.
replace_oov: If not None, every out-of-vocabulary token seen when
encoding will be replaced by this string (which must be in vocab).
num_reserved_ids: Number of IDs to save for reserved tokens like <EOS>.
"""
super(TokenTextEncoder, self).__init__(
num_reserved_ids = num_reserved_ids
)
self._reverse = reverse
self._replace_oov = replace_oov
if vocab_filename:
self._init_vocab_from_file(vocab_filename)
else:
assert vocab_list is not None
self._init_vocab_from_list(vocab_list)
def encode(self, s):
"""Converts a space-separated string of tokens to a list of ids."""
sentence = s
tokens = sentence.strip().split()
if self._replace_oov is not None:
tokens = [
t if t in self._token_to_id else self._replace_oov
for t in tokens
]
ret = [self._token_to_id[tok] for tok in tokens]
return ret[::-1] if self._reverse else ret
def decode(self, ids, strip_extraneous = False):
return ' '.join(self.decode_list(ids))
def decode_list(self, ids):
seq = reversed(ids) if self._reverse else ids
return [self._safe_id_to_token(i) for i in seq]
@property
def vocab_size(self):
return len(self._id_to_token)
def _safe_id_to_token(self, idx):
return self._id_to_token.get(idx, 'ID_%d' % idx)
def _init_vocab_from_file(self, filename):
"""Load vocab from a file.
Args:
filename: The file to load vocabulary from.
"""
with tf.gfile.Open(filename) as f:
tokens = [token.strip() for token in f.readlines()]
def token_gen():
for token in tokens:
yield token
self._init_vocab(token_gen(), add_reserved_tokens = False)
def _init_vocab_from_list(self, vocab_list):
"""Initialize tokens from a list of tokens.
It is ok if reserved tokens appear in the vocab list. They will be
removed. The set of tokens in vocab_list should be unique.
Args:
vocab_list: A list of tokens.
"""
def token_gen():
for token in vocab_list:
if token not in RESERVED_TOKENS:
yield token
self._init_vocab(token_gen())
def _init_vocab(self, token_generator, add_reserved_tokens = True):
"""Initialize vocabulary with tokens from token_generator."""
self._id_to_token = {}
non_reserved_start_index = 0
if add_reserved_tokens:
self._id_to_token.update(enumerate(RESERVED_TOKENS))
non_reserved_start_index = len(RESERVED_TOKENS)
self._id_to_token.update(
enumerate(token_generator, start = non_reserved_start_index)
)
# _token_to_id is the reverse of _id_to_token
self._token_to_id = dict(
(v, k) for k, v in six.iteritems(self._id_to_token)
)
def store_to_file(self, filename):
"""Write vocab file to disk.
Vocab files have one token per line. The file ends in a newline. Reserved
tokens are written to the vocab file as well.
Args:
filename: Full path of the file to store the vocab to.
"""
with tf.gfile.Open(filename, 'w') as f:
for i in range(len(self._id_to_token)):
f.write(self._id_to_token[i] + '\n')
def _escape_token(token, alphabet):
"""Escape away underscores and OOV characters and append '_'.
This allows the token to be expressed as the concatenation of a list
of subtokens from the vocabulary. The underscore acts as a sentinel
which allows us to invertibly concatenate multiple such lists.
Args:
token: A unicode string to be escaped.
alphabet: A set of all characters in the vocabulary's alphabet.
Returns:
escaped_token: An escaped unicode string.
Raises:
ValueError: If the provided token is not unicode.
"""
if not isinstance(token, six.text_type):
raise ValueError('Expected string type for token, got %s' % type(token))
token = token.replace(u'\\', u'\\\\').replace(u'_', u'\\u')
ret = [
c if c in alphabet and c != u'\n' else r'\%d;' % ord(c) for c in token
]
return u''.join(ret) + '_'
def _unescape_token(escaped_token):
"""Inverse of _escape_token().
Args:
escaped_token: a unicode string
Returns:
token: a unicode string
"""
def match(m):
if m.group(1) is None:
return u'_' if m.group(0) == u'\\u' else u'\\'
try:
return six.unichr(int(m.group(1)))
except (ValueError, OverflowError) as _:
return u'\u3013' # Unicode for undefined character.
trimmed = (
escaped_token[:-1] if escaped_token.endswith('_') else escaped_token
)
return _UNESCAPE_REGEX.sub(match, trimmed)
class SubwordTextEncoder(TextEncoder):
"""Class for invertibly encoding text using a limited vocabulary.
Invertibly encodes a native string as a sequence of subtokens from a limited
vocabulary.
A SubwordTextEncoder is built from a corpus (so it is tailored to the text in
the corpus), and stored to a file. See text_encoder_build_subword.py.
It can then be loaded and used to encode/decode any text.
Encoding has four phases:
1. Tokenize into a list of tokens. Each token is a unicode string of either
all alphanumeric characters or all non-alphanumeric characters. We drop
tokens consisting of a single space that are between two alphanumeric
tokens.
2. Escape each token. This escapes away special and out-of-vocabulary
characters, and makes sure that each token ends with an underscore, and
has no other underscores.
3. Represent each escaped token as a the concatenation of a list of subtokens
from the limited vocabulary. Subtoken selection is done greedily from
beginning to end. That is, we construct the list in order, always picking
the longest subtoken in our vocabulary that matches a prefix of the
remaining portion of the encoded token.
4. Concatenate these lists. This concatenation is invertible due to the
fact that the trailing underscores indicate when one list is finished.
"""
def __init__(self, filename = None):
"""Initialize and read from a file, if provided.
Args:
filename: filename from which to read vocab. If None, do not load a
vocab
"""
self._alphabet = set()
self.filename = filename
if filename is not None:
self._load_from_file(filename)
super(SubwordTextEncoder, self).__init__()
def encode(self, s):
"""Converts a native string to a list of subtoken ids.
Args:
s: a native string.
Returns:
a list of integers in the range [0, vocab_size)
"""
return self._tokens_to_subtoken_ids(
tokenizer.encode(native_to_unicode(s))
)
def encode_without_tokenizing(self, token_text):
"""Converts string to list of subtoken ids without calling tokenizer.
This treats `token_text` as a single token and directly converts it
to subtoken ids. This may be useful when the default tokenizer doesn't
do what we want (e.g., when encoding text with tokens composed of lots of
nonalphanumeric characters). It is then up to the caller to make sure that
raw text is consistently converted into tokens. Only use this if you are
sure that `encode` doesn't suit your needs.
Args:
token_text: A native string representation of a single token.
Returns:
A list of subword token ids; i.e., integers in the range [0, vocab_size).
"""
return self._tokens_to_subtoken_ids([native_to_unicode(token_text)])
def decode(self, ids, strip_extraneous = False):
"""Converts a sequence of subtoken ids to a native string.
Args:
ids: a list of integers in the range [0, vocab_size)
strip_extraneous: bool, whether to strip off extraneous tokens
(EOS and PAD).
Returns:
a native string
"""
if strip_extraneous:
ids = strip_ids(ids, list(range(self._num_reserved_ids or 0)))
return unicode_to_native(
tokenizer.decode(self._subtoken_ids_to_tokens(ids))
)
def decode_list(self, ids):
return [self._subtoken_id_to_subtoken_string(s) for s in ids]
@property
def vocab_size(self):
"""The subtoken vocabulary size."""
return len(self._all_subtoken_strings)
def _tokens_to_subtoken_ids(self, tokens):
"""Converts a list of tokens to a list of subtoken ids.
Args:
tokens: a list of strings.
Returns:
a list of integers in the range [0, vocab_size)
"""
ret = []
for token in tokens:
ret.extend(self._token_to_subtoken_ids(token))
return ret
def _token_to_subtoken_ids(self, token):
"""Converts token to a list of subtoken ids.
Args:
token: a string.
Returns:
a list of integers in the range [0, vocab_size)
"""
cache_location = hash(token) % self._cache_size
cache_key, cache_value = self._cache[cache_location]
if cache_key == token:
return cache_value
ret = self._escaped_token_to_subtoken_ids(
_escape_token(token, self._alphabet)
)
self._cache[cache_location] = (token, ret)
return ret
def _subtoken_ids_to_tokens(self, subtokens):
"""Converts a list of subtoken ids to a list of tokens.
Args:
subtokens: a list of integers in the range [0, vocab_size)
Returns:
a list of strings.
"""
concatenated = ''.join(
[self._subtoken_id_to_subtoken_string(s) for s in subtokens]
)
split = concatenated.split('_')
ret = []
for t in split:
if t:
unescaped = _unescape_token(t + '_')
if unescaped:
ret.append(unescaped)
return ret
def _subtoken_id_to_subtoken_string(self, subtoken):
"""Converts a subtoken integer ID to a subtoken string."""
if 0 <= subtoken < self.vocab_size:
return self._all_subtoken_strings[subtoken]
return u''
def _escaped_token_to_subtoken_strings(self, escaped_token):
"""Converts an escaped token string to a list of subtoken strings.
Args:
escaped_token: An escaped token as a unicode string.
Returns:
A list of subtokens as unicode strings.
"""
# NOTE: This algorithm is greedy; it won't necessarily produce the "best"
# list of subtokens.
ret = []
start = 0
token_len = len(escaped_token)
while start < token_len:
for end in range(
min(token_len, start + self._max_subtoken_len), start, -1
):
subtoken = escaped_token[start:end]
if subtoken in self._subtoken_string_to_id:
ret.append(subtoken)
start = end
break
else: # Did not break
# If there is no possible encoding of the escaped token then one of the
# characters in the token is not in the alphabet. This should be
# impossible and would be indicative of a bug.
assert (
False
), 'Token substring not found in subtoken vocabulary.'
return ret
def _escaped_token_to_subtoken_ids(self, escaped_token):
"""Converts an escaped token string to a list of subtoken IDs.
Args:
escaped_token: An escaped token as a unicode string.
Returns:
A list of subtoken IDs as integers.
"""
return [
self._subtoken_string_to_id[subtoken]
for subtoken in self._escaped_token_to_subtoken_strings(
escaped_token
)
]
@classmethod
def build_from_generator(
cls,
generator,
target_size,
max_subtoken_length = None,
reserved_tokens = None,
):
"""Builds a SubwordTextEncoder from the generated text.
Args:
generator: yields text.
target_size: int, approximate vocabulary size to create.
max_subtoken_length: Maximum length of a subtoken. If this is not set,
then the runtime and memory use of creating the vocab is quadratic in
the length of the longest token. If this is set, then it is instead
O(max_subtoken_length * length of longest token).
reserved_tokens: List of reserved tokens. The global variable
`RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
argument is `None`, it will use `RESERVED_TOKENS`.
Returns:
SubwordTextEncoder with `vocab_size` approximately `target_size`.
"""
token_counts = collections.defaultdict(int)
for item in generator:
for tok in tokenizer.encode(native_to_unicode(item)):
token_counts[tok] += 1
encoder = cls.build_to_target_size(
target_size,
token_counts,
1,
1e3,
max_subtoken_length = max_subtoken_length,
reserved_tokens = reserved_tokens,
)
return encoder
@classmethod
def build_to_target_size(
cls,
target_size,
token_counts,
min_val,
max_val,
max_subtoken_length = None,
reserved_tokens = None,
num_iterations = 4,
):
"""Builds a SubwordTextEncoder that has `vocab_size` near `target_size`.
Uses simple recursive binary search to find a minimum token count that most
closely matches the `target_size`.
Args:
target_size: Desired vocab_size to approximate.
token_counts: A dictionary of token counts, mapping string to int.
min_val: An integer; lower bound for the minimum token count.
max_val: An integer; upper bound for the minimum token count.
max_subtoken_length: Maximum length of a subtoken. If this is not set,
then the runtime and memory use of creating the vocab is quadratic in
the length of the longest token. If this is set, then it is instead
O(max_subtoken_length * length of longest token).
reserved_tokens: List of reserved tokens. The global variable
`RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
argument is `None`, it will use `RESERVED_TOKENS`.
num_iterations: An integer; how many iterations of refinement.
Returns:
A SubwordTextEncoder instance.
Raises:
ValueError: If `min_val` is greater than `max_val`.
"""
if min_val > max_val:
raise ValueError(
'Lower bound for the minimum token count '
'is greater than the upper bound.'
)
if target_size < 1:
raise ValueError('Target size must be positive.')
if reserved_tokens is None:
reserved_tokens = RESERVED_TOKENS
def bisect(min_val, max_val):
"""Bisection to find the right size."""
present_count = (max_val + min_val) // 2
tf.logging.info('Trying min_count %d' % present_count)
subtokenizer = cls()
subtokenizer.build_from_token_counts(
token_counts,
present_count,
num_iterations,
max_subtoken_length = max_subtoken_length,
reserved_tokens = reserved_tokens,
)
# Being within 1% of the target size is ok.
is_ok = (
abs(subtokenizer.vocab_size - target_size) * 100 < target_size
)
# If min_val == max_val, we can't do any better than this.
if is_ok or min_val >= max_val or present_count < 2:
return subtokenizer
if subtokenizer.vocab_size > target_size:
other_subtokenizer = bisect(present_count + 1, max_val)
else:
other_subtokenizer = bisect(min_val, present_count - 1)
if other_subtokenizer is None:
return subtokenizer
if abs(other_subtokenizer.vocab_size - target_size) < abs(
subtokenizer.vocab_size - target_size
):
return other_subtokenizer
return subtokenizer
return bisect(min_val, max_val)
def build_from_token_counts(
self,
token_counts,
min_count,
num_iterations = 4,
reserved_tokens = None,
max_subtoken_length = None,
):
"""Train a SubwordTextEncoder based on a dictionary of word counts.
Args:
token_counts: a dictionary of Unicode strings to int.
min_count: an integer - discard subtokens with lower counts.
num_iterations: an integer. how many iterations of refinement.
reserved_tokens: List of reserved tokens. The global variable
`RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
argument is `None`, it will use `RESERVED_TOKENS`.
max_subtoken_length: Maximum length of a subtoken. If this is not set,
then the runtime and memory use of creating the vocab is quadratic in
the length of the longest token. If this is set, then it is instead
O(max_subtoken_length * length of longest token).
Raises:
ValueError: if reserved is not 0 or len(RESERVED_TOKENS). In this case, it
is not clear what the space is being reserved for, or when it will be
filled in.
"""
if reserved_tokens is None:
reserved_tokens = RESERVED_TOKENS
else:
# There is not complete freedom in replacing RESERVED_TOKENS.
for default, proposed in zip(RESERVED_TOKENS, reserved_tokens):
if default != proposed:
raise ValueError(
'RESERVED_TOKENS must be a prefix of '
'reserved_tokens.'
)
# Initialize the alphabet. Note, this must include reserved tokens or it can
# result in encoding failures.
alphabet_tokens = chain(
six.iterkeys(token_counts),
[native_to_unicode(t) for t in reserved_tokens],
)
self._init_alphabet_from_tokens(alphabet_tokens)
# Bootstrap the initial list of subtokens with the characters from the
# alphabet plus the escaping characters.
self._init_subtokens_from_list(
list(self._alphabet), reserved_tokens = reserved_tokens
)
# We build iteratively. On each iteration, we segment all the words,
# then count the resulting potential subtokens, keeping the ones
# with high enough counts for our new vocabulary.
if min_count < 1:
min_count = 1
for i in range(num_iterations):
tf.logging.info('Iteration {0}'.format(i))
# Collect all substrings of the encoded token that break along current
# subtoken boundaries.
subtoken_counts = collections.defaultdict(int)
for token, count in six.iteritems(token_counts):
iter_start_time = time.time()
escaped_token = _escape_token(token, self._alphabet)
subtokens = self._escaped_token_to_subtoken_strings(
escaped_token
)
start = 0
for subtoken in subtokens:
last_position = len(escaped_token) + 1
if max_subtoken_length is not None:
last_position = min(
last_position, start + max_subtoken_length
)
for end in range(start + 1, last_position):
new_subtoken = escaped_token[start:end]
subtoken_counts[new_subtoken] += count
start += len(subtoken)
iter_time_secs = time.time() - iter_start_time
if iter_time_secs > 0.1:
tf.logging.info(
u'Processing token [{0}] took {1} seconds, consider '
'setting Text2TextProblem.max_subtoken_length to a '
'smaller value.'.format(token, iter_time_secs)
)
# Array of sets of candidate subtoken strings, by length.
len_to_subtoken_strings = []
for subtoken_string, count in six.iteritems(subtoken_counts):
lsub = len(subtoken_string)
if count >= min_count:
while len(len_to_subtoken_strings) <= lsub:
len_to_subtoken_strings.append(set())
len_to_subtoken_strings[lsub].add(subtoken_string)
# Consider the candidates longest to shortest, so that if we accept
# a longer subtoken string, we can decrement the counts of its prefixes.
new_subtoken_strings = []
for lsub in range(len(len_to_subtoken_strings) - 1, 0, -1):
subtoken_strings = len_to_subtoken_strings[lsub]
for subtoken_string in subtoken_strings:
count = subtoken_counts[subtoken_string]
if count >= min_count:
# Exclude alphabet tokens here, as they must be included later,
# explicitly, regardless of count.
if subtoken_string not in self._alphabet:
new_subtoken_strings.append(
(count, subtoken_string)
)
for l in range(1, lsub):
subtoken_counts[subtoken_string[:l]] -= count
# Include the alphabet explicitly to guarantee all strings are encodable.
new_subtoken_strings.extend(
(subtoken_counts.get(a, 0), a) for a in self._alphabet
)
new_subtoken_strings.sort(reverse = True)
# Reinitialize to the candidate vocabulary.
new_subtoken_strings = [
subtoken for _, subtoken in new_subtoken_strings
]
if reserved_tokens:
escaped_reserved_tokens = [
_escape_token(native_to_unicode(t), self._alphabet)
for t in reserved_tokens
]
new_subtoken_strings = (
escaped_reserved_tokens + new_subtoken_strings
)
self._init_subtokens_from_list(new_subtoken_strings)
tf.logging.info('vocab_size = %d' % self.vocab_size)
@property
def all_subtoken_strings(self):
return tuple(self._all_subtoken_strings)
def dump(self):
"""Debugging dump of the current subtoken vocabulary."""
subtoken_strings = [
(i, s) for s, i in six.iteritems(self._subtoken_string_to_id)
]
print(
u', '.join(
u"{0} : '{1}'".format(i, s) for i, s in sorted(subtoken_strings)
)
)
def _init_subtokens_from_list(
self, subtoken_strings, reserved_tokens = None
):
"""Initialize token information from a list of subtoken strings.
Args:
subtoken_strings: a list of subtokens
reserved_tokens: List of reserved tokens. We must have `reserved_tokens`
as None or the empty list, or else the global variable `RESERVED_TOKENS`
must be a prefix of `reserved_tokens`.
Raises:
ValueError: if reserved is not 0 or len(RESERVED_TOKENS). In this case, it
is not clear what the space is being reserved for, or when it will be
filled in.
"""
if reserved_tokens is None:
reserved_tokens = []
if reserved_tokens:
self._all_subtoken_strings = reserved_tokens + subtoken_strings
else:
self._all_subtoken_strings = subtoken_strings
# we remember the maximum length of any subtoken to avoid having to
# check arbitrarily long strings.
self._max_subtoken_len = max([len(s) for s in subtoken_strings])
self._subtoken_string_to_id = {
s: i + len(reserved_tokens)
for i, s in enumerate(subtoken_strings)
if s
}
# Initialize the cache to empty.
self._cache_size = 2 ** 20
self._cache = [(None, None)] * self._cache_size
def _init_alphabet_from_tokens(self, tokens):
"""Initialize alphabet from an iterable of token or subtoken strings."""
# Include all characters from all tokens in the alphabet to guarantee that
# any token can be encoded. Additionally, include all escaping characters.
self._alphabet = {c for token in tokens for c in token}
self._alphabet |= _ESCAPE_CHARS
def _load_from_file_object(self, f):
"""Load from a file object.
Args:
f: File object to load vocabulary from
"""
subtoken_strings = []
for line in f:
s = line.rstrip()
# Some vocab files wrap words in single quotes, but others don't
if (s.startswith("'") and s.endswith("'")) or (
s.startswith('"') and s.endswith('"')
):
s = s[1:-1]
subtoken_strings.append(native_to_unicode(s))
self._init_subtokens_from_list(subtoken_strings)
self._init_alphabet_from_tokens(subtoken_strings)
def _load_from_file(self, filename):
"""Load from a vocab file."""
if not tf.gfile.Exists(filename):
raise ValueError('File %s not found' % filename)
with tf.gfile.Open(filename) as f:
self._load_from_file_object(f)
def store_to_file(self, filename, add_single_quotes = True):
with tf.gfile.Open(filename, 'w') as f:
for subtoken_string in self._all_subtoken_strings:
if add_single_quotes:
f.write("'" + unicode_to_native(subtoken_string) + "'\n")
else:
f.write(unicode_to_native(subtoken_string) + '\n')
class ImageEncoder(object):
"""Encoder class for saving and loading images."""
def __init__(
self, num_reserved_ids = 0, height = None, width = None, channels = 3
):
assert num_reserved_ids == 0
self._height = height
self._width = width
self._channels = channels
@property
def num_reserved_ids(self):
return 0
def encode(self, s):
"""Transform a string with a filename into a list of RGB integers.
Args:
s: path to the file with an image.
Returns:
ids: list of integers
"""
try:
import matplotlib.image as im # pylint: disable=g-import-not-at-top
except ImportError as e:
tf.logging.warning(
'Reading an image requires matplotlib to be installed: %s', e
)
raise NotImplementedError('Image reading not implemented.')
return im.imread(s)
def decode(self, ids, strip_extraneous = False):
"""Transform a sequence of int ids into an image file.
Args:
ids: list of integers to be converted.
strip_extraneous: unused
Returns:
Path to the temporary file where the image was saved.
Raises:
ValueError: if the ids are not of the appropriate size.
"""
del strip_extraneous
_, tmp_file_path = tempfile.mkstemp('_decode.png')
if self._height is None or self._width is None:
size = int(math.sqrt(len(ids) / self._channels))
length = size * size * self._channels
else:
size = None
length = self._height * self._width * self._channels
if len(ids) != length:
raise ValueError(
'Length of ids (%d) must be height (%d) x width (%d) x '
'channels (%d); %d != %d.\n Ids: %s'
% (
len(ids),
self._height,
self._width,
self._channels,
len(ids),
length,
' '.join([str(i) for i in ids]),
)
)
with tf.Graph().as_default():
raw = tf.constant(ids, dtype = tf.uint8)
if size is None:
img = tf.reshape(
raw, [self._height, self._width, self._channels]
)
else:
img = tf.reshape(raw, [size, size, self._channels])
png = tf.image.encode_png(img)
op = tf.write_file(tmp_file_path, png)
with tf.Session() as sess:
sess.run(op)
return tmp_file_path
def decode_list(self, ids):
"""Transform a sequence of int ids into an image file.
Args:
ids: list of integers to be converted.
Returns:
Singleton list: path to the temporary file where the image was saved.
"""
return [self.decode(ids)]
@property
def vocab_size(self):
return 256
class RealEncoder(object):
"""Encoder class for saving and loading float values."""
def encode(self, s):
"""Transform a string (space separated float values) into a float array.
Args:
s: space separated float values.
Returns:
Array of float values.
"""
return [float(w) for w in s.split()]
def decode(self, ids, strip_extraneous = False):
"""Transform sequence of float values into string (float values).
Args:
ids: array of floats to be converted.
strip_extraneous: unused
Returns:
String having space separated float values.
Raises:
ValueError: if the ids are not of the appropriate size.
"""
del strip_extraneous
return ' '.join([str(i) for i in ids])
| 34.455972 | 87 | 0.619696 |
1ce7d7f5a982b381c900935b7b85852a99891712
| 1,613 |
py
|
Python
|
observations/r/litters.py
|
hajime9652/observations
|
2c8b1ac31025938cb17762e540f2f592e302d5de
|
[
"Apache-2.0"
] | 199 |
2017-07-24T01:34:27.000Z
|
2022-01-29T00:50:55.000Z
|
observations/r/litters.py
|
hajime9652/observations
|
2c8b1ac31025938cb17762e540f2f592e302d5de
|
[
"Apache-2.0"
] | 46 |
2017-09-05T19:27:20.000Z
|
2019-01-07T09:47:26.000Z
|
observations/r/litters.py
|
hajime9652/observations
|
2c8b1ac31025938cb17762e540f2f592e302d5de
|
[
"Apache-2.0"
] | 45 |
2017-07-26T00:10:44.000Z
|
2022-03-16T20:44:59.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def litters(path):
"""Mouse Litters
Data on the body and brain weights of 20 mice, together with the size of
the litter. Two mice were taken from each litter size.
This data frame contains the following columns:
lsize
litter size
bodywt
body weight
brainwt
brain weight
Wainright P, Pelkman C and Wahlsten D 1989. The quantitative
relationship between nutritional effects on preweaning growth and
behavioral development in mice. Developmental Psychobiology 22: 183-193.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `litters.csv`.
Returns:
Tuple of np.ndarray `x_train` with 20 rows and 3 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'litters.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/DAAG/litters.csv'
maybe_download_and_extract(path, url,
save_file_name='litters.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
| 26.442623 | 74 | 0.698078 |
81f8b6009b0f1bf979bc5394fc80662b1d06675f
| 1,516 |
py
|
Python
|
app.py
|
boweihan/NLPPlayground
|
17ad1bc989a6d98fcc11cede0ab728abd5d7df42
|
[
"MIT"
] | null | null | null |
app.py
|
boweihan/NLPPlayground
|
17ad1bc989a6d98fcc11cede0ab728abd5d7df42
|
[
"MIT"
] | 16 |
2021-01-06T08:10:37.000Z
|
2022-03-27T06:27:48.000Z
|
app.py
|
boweihan/NLPPlayground
|
17ad1bc989a6d98fcc11cede0ab728abd5d7df42
|
[
"MIT"
] | null | null | null |
import stanza
# packages are downloaded separately to minimize startup time for Heroku deployment
stanza.download("en", package="ewt")
stanza.download("en", package="OntoNotes")
from flask import Flask, request
from flask_cors import CORS, cross_origin
from twilio.twiml.messaging_response import MessagingResponse
from iterations.one import ExampleOne
import requests
# api configuration
app = Flask(__name__)
cors = CORS(app, resources={r"*": {"origins": "*"}})
"""
-- Query Examples by Category --
{number} example number
{category} input category (i.e. boolean, multiple, rating, name, numeric)
{input} query string for user input
returns string {response}
"""
@app.route("/example/<number>/<category>", methods=["GET"])
def example_one(number, category):
input = request.args.get("input")
if not input:
return "Please supply a valid query string [input]"
example = None
if number == "1":
example = ExampleOne()
else:
return "Please enter a valid example number"
if category == "boolean":
return example.parseBoolean(input)
elif category == "multiple":
return example.parseMultiple(input)
elif category == "rating":
return example.parseRating(input)
elif category == "name":
return example.parseName(input)
elif category == "numeric":
return example.parseNumber(input)
return "Please enter a valid category"
if __name__ == "__main__":
app.run(threaded=False, host="0.0.0.0", port=8081)
| 27.071429 | 83 | 0.693272 |
fbc8f4b5aaf0e54ec3c4b2f43e25e5069b3dd332
| 30,073 |
py
|
Python
|
adaptft/tft.py
|
alexberrian/AdapTFT
|
263a4d25669f6f87039d1dafd386cdc7838bde9e
|
[
"BSD-3-Clause"
] | null | null | null |
adaptft/tft.py
|
alexberrian/AdapTFT
|
263a4d25669f6f87039d1dafd386cdc7838bde9e
|
[
"BSD-3-Clause"
] | null | null | null |
adaptft/tft.py
|
alexberrian/AdapTFT
|
263a4d25669f6f87039d1dafd386cdc7838bde9e
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from adaptft.audio_io import AudioSignal
from copy import deepcopy
# from typing import Callable
# from typing import Generator
TWOPI = np.pi * 2
class TFTransformer(object):
def __init__(self, filename):
self.AudioSignal = AudioSignal(filename)
self.param_dict = {}
self.exp_time_shift = None
self.jtfrt_memory = None
self.jtfrt_shape = None
self._initialize_default_params()
self._initialize_helpers_jtfrt()
def _initialize_default_params(self):
self.param_dict = {"hopsize": 256, # Test with 215
"windowfunc": np.hanning,
"windowmode": "single",
"windowsize": 4096, # Test with 4095
"fftsize": 4096,
"buffermode": "centered_analysis",
"sstsize": 6000, # Test with 6007
"realfft": True,
"compute_stft": True,
"compute_sst": False,
"compute_jtfrm": False,
"compute_qstft": False,
"eps_division": 1.0e-16,
"reassignment_mode": "magsq", # "magsq" or "complex"
}
def _initialize_helpers_jtfrt(self):
"""
Initialize helper arrays and variables for JTFRT
:return:
"""
windowsize = self.param_dict["windowsize"]
hopsize = self.param_dict["hopsize"]
sstsize = self.param_dict["sstsize"]
reassignment_mode = self.param_dict["reassignment_mode"]
reassignment_dtype: type = complex if reassignment_mode == "complex" else float
self.exp_time_shift = np.exp(-TWOPI * 1j * np.tile(np.arange(self.param_dict["fftsize"]),
[self.AudioSignal.channels, 1])
/ float(self.AudioSignal.samplerate))
jtfrt_memory_num_frames = windowsize // hopsize
channels = self.AudioSignal.channels
num_bins_up_to_nyquist = (sstsize // 2) + 1
self.jtfrt_shape = (channels, num_bins_up_to_nyquist)
self.jtfrt_memory = np.zeros([jtfrt_memory_num_frames, *self.jtfrt_shape], dtype=reassignment_dtype)
def _check_parameter_validity(self, transform):
windowmode = self.param_dict["windowmode"]
windowsize = self.param_dict["windowsize"]
hopsize = self.param_dict["hopsize"]
fftsize = self.param_dict["fftsize"]
buffermode = self.param_dict["buffermode"]
reassignment_mode = self.param_dict["reassignment_mode"]
# Validity checks for all transforms
if hopsize > windowsize:
raise ValueError("Not allowed to have hopsize {} larger than windowsize {} "
"because of the way SoundFile processes chunks".format(hopsize, windowsize))
if windowsize > fftsize:
raise ValueError("window size {} is larger than FFT size {}!".format(windowsize, fftsize))
if buffermode not in ["centered_analysis", "reconstruction", "valid_analysis"]:
raise ValueError("Invalid buffermode {}".format(buffermode))
elif buffermode == "reconstruction":
raise ValueError("Buffermode 'reconstruction' is not yet implemented")
# Transform-specific checks
if transform == "stft":
if windowmode != "single":
raise ValueError("windowmode must be 'single' for STFT, "
"instead it is {}".format(windowmode))
if windowsize < 2:
raise ValueError("windowsize {} must be at least 2 for STFT".format(windowsize))
elif transform in ["sst", "jtfrt"]:
if not self.param_dict["realfft"]:
raise ValueError("Must have realfft to compute SST/JTFRT, untested otherwise!")
if windowmode != "single":
raise ValueError("windowmode (currently) must be 'single' for SST/JTFRT, "
"instead it is {}. "
"Will support SST/JTFRT based on QSTFT later.".format(windowmode))
if windowsize < 4:
raise ValueError("windowsize {} must be at least 4 to deal with edge cases "
"for SST/JTFRT".format(windowsize))
if reassignment_mode not in ["magsq", "complex"]:
raise ValueError("Invalid reassignment mode {}".format(reassignment_mode))
else:
raise ValueError("Invalid transform {}".format(transform))
def compute_stft(self):
"""
Computes STFT and returns it as a generator with each STFT frame.
Allows for support of boundary frames.
TBD:
- Proper boundary treatment to ensure perfect reconstruction
- Option for stereo->mono for computation
- Testing for stereo signals
:yield: Generator, each instance is an STFT frame.
"""
self._check_parameter_validity("stft")
hopsize = self.param_dict["hopsize"]
windowsize = self.param_dict["windowsize"]
fftsize = self.param_dict["fftsize"]
windowfunc = self.param_dict["windowfunc"]
buffermode = self.param_dict["buffermode"]
overlap = windowsize - hopsize
window = windowfunc(windowsize)
# Just in case the audio signal has already been read out
self.AudioSignal.seek(frames=0)
# Compute the left boundary STFT frames
# Will refactor later when I put in the "reconstruction" buffering mode.
if buffermode == "centered_analysis":
initial_block = self.AudioSignal.read(frames=windowsize, always_2d=True)
initial_block = initial_block.T
# Pad the boundary with reflected audio frames, then yield the boundary STFT frame
frame0 = -(windowsize // 2) # if window is odd, this centers audio frame 0. reconstruction imperfect
while frame0 < 0:
reflect_block = self._pad_boundary_rows(initial_block[:, :frame0], windowsize, 'left')
yield self.wft(reflect_block, window, fftsize)
frame0 += hopsize
elif buffermode == "reconstruction":
pass # FILL THIS IN
frame0 = 0
elif buffermode == "valid_analysis":
frame0 = 0
else:
raise ValueError("Invalid buffermode {}".format(buffermode))
# Get the number of audio frames, and seek to the audio frame given by frame0
num_audio_frames = self.AudioSignal.get_num_frames_from_and_seek_start(start_frame=frame0)
# Now calculate the max number of FULL non-boundary STFT frames,
# considering hop size and window size.
num_full_stft_frames = 1 + ((num_audio_frames - windowsize) // hopsize)
# Convert that to the number of audio frames that you'll analyze for non-boundary STFT.
num_audio_frames_full_stft = (num_full_stft_frames - 1) * hopsize + windowsize
# Feed blocks to create the non-boundary STFT frames
blockreader = self.AudioSignal.blocks(blocksize=windowsize, overlap=overlap,
frames=num_audio_frames_full_stft, always_2d=True)
for block in blockreader:
block = block.T # First transpose to get each channel as a row
yield self.wft(block, window, fftsize)
frame0 += hopsize
# Compute the right boundary STFT frames
if buffermode == "centered_analysis":
# Need to read from frame0
self.AudioSignal.seek(frames=frame0)
final_block = self.AudioSignal.read(always_2d=True) # Read the rest of the file from there
final_block = final_block.T
final_block_num_frames = final_block.shape[1]
if final_block_num_frames >= windowsize:
raise ValueError("You shouldn't have final_block_num_frames {} "
"greater than windowsize {}".format(final_block_num_frames, windowsize))
# Pad the boundary with reflected audio frames,
# then add boundary STFT frames to the STFT
frame1 = 0
halfwindowsize = (windowsize + 1) // 2 # Edge case: odd windows, want final valid sample to be in middle
while final_block_num_frames - frame1 >= halfwindowsize:
reflect_block = self._pad_boundary_rows(final_block[:, frame1:], windowsize, 'right')
yield self.wft(reflect_block, window, fftsize)
frame1 += hopsize
elif buffermode == "reconstruction":
pass # FILL THIS IN
elif buffermode == "valid_analysis": # Do nothing at this point
pass
else:
raise ValueError("Invalid buffermode {}".format(buffermode))
def compute_sst(self):
"""
TO DO:
- Investigate the non-realfft case
- Deal with mono vs. stereo etc.
- Deal with QSTFT case
:yield: synchrosqueezing transform of the given STFT frame
"""
self._check_parameter_validity("sst")
# windowmode = self.param_dict["windowmode"] # For later development
hopsize = self.param_dict["hopsize"]
windowsize = self.param_dict["windowsize"]
fftsize = self.param_dict["fftsize"]
sstsize = self.param_dict["sstsize"]
windowfunc = self.param_dict["windowfunc"]
buffermode = self.param_dict["buffermode"]
windowsize_p1 = windowsize + 1
overlap = windowsize_p1 - hopsize # For SST block procedure
window = windowfunc(windowsize)
reassignment_mode = self.param_dict["reassignment_mode"]
# Just in case the audio signal has already been read out
self.AudioSignal.seek(frames=0)
# Compute the left boundary SST frames
# Will refactor later when I put in the "reconstruction" buffering mode.
if buffermode == "centered_analysis":
initial_block = self.AudioSignal.read(frames=windowsize + 1, always_2d=True)
initial_block = initial_block.T
# Pad the boundary with reflected audio frames, then yield the boundary SST frames necessary
frame0 = -(windowsize // 2) # if window is odd, this centers audio frame 0. reconstruction imperfect
while frame0 < 0:
reflect_block = self._pad_boundary_rows(initial_block[:, :(frame0 + windowsize)], windowsize, 'left')
reflect_block_plus = self._pad_boundary_rows(initial_block[:, :(frame0 + windowsize_p1)],
windowsize, 'left')
yield self._reassign_sst(reflect_block, reflect_block_plus, window, fftsize, sstsize,
reassignment_mode)
frame0 += hopsize
elif buffermode == "reconstruction":
pass # FILL THIS IN
frame0 = 0
elif buffermode == "valid_analysis":
frame0 = 0
else:
raise ValueError("Invalid buffermode {}".format(buffermode))
# May refactor the following four non-comment code lines for full generality
# Get the number of audio frames, and seek to the audio frame given by frame0
num_audio_frames = self.AudioSignal.get_num_frames_from_and_seek_start(start_frame=frame0)
# Now calculate the max number of FULL non-boundary SST frames,
# considering hop size and window size. Have to modify because taking more frames than usual.
num_full_sst_frames = 1 + ((num_audio_frames - windowsize_p1) // hopsize)
# Convert that to the number of audio frames that you'll analyze for non-boundary SST.
num_audio_frames_full_sst = (num_full_sst_frames - 1) * hopsize + windowsize_p1
# Feed blocks to create the non-boundary SST frames, with
blockreader = self.AudioSignal.blocks(blocksize=windowsize_p1, overlap=overlap,
frames=num_audio_frames_full_sst, always_2d=True)
for block in blockreader:
block = block.T # First transpose to get each channel as a row
yield self._reassign_sst(block[:, :windowsize], block[:, 1:], window, fftsize, sstsize, reassignment_mode)
frame0 += hopsize
# Compute the right boundary SST frames
if buffermode == "centered_analysis":
# Need to read from frame0
self.AudioSignal.seek(frames=frame0)
# Read the rest of the file (length less than windowsize+1)
final_block = self.AudioSignal.read(always_2d=True)
final_block = final_block.T
final_block_num_frames = final_block.shape[1]
if final_block_num_frames >= windowsize_p1:
raise ValueError("You shouldn't have final_block_num_frames {} "
"greater than windowsize + 1 == {}".format(final_block_num_frames, windowsize_p1))
# Pad the boundary with reflected audio frames,
# then add boundary SST frames to the SST
frame1 = 0
halfwindowsize = (windowsize + 1) // 2 # Edge case: odd windows, want final valid sample to be in middle
while final_block_num_frames - frame1 >= halfwindowsize:
reflect_block = self._pad_boundary_rows(final_block[:, frame1:], windowsize, 'right')
reflect_block_plus = self._pad_boundary_rows(final_block[:, frame1 + 1:(frame1 + windowsize_p1)],
windowsize, 'right')
yield self._reassign_sst(reflect_block, reflect_block_plus, window, fftsize, sstsize,
reassignment_mode)
frame1 += hopsize
elif buffermode == "reconstruction":
pass # FILL THIS IN
elif buffermode == "valid_analysis": # Do nothing at this point
pass
else:
raise ValueError("Invalid buffermode {}".format(buffermode))
def compute_jtfrt(self):
"""
TO DO:
- Investigate the non-realfft case
- Deal with mono vs. stereo etc.
- Deal with QSTFT case
- Edge case: What happens at the right boundary? It's probably fine, but just check.
:yield: joint time-frequency reassignment transform of the given STFT frame
"""
self._check_parameter_validity("jtfrt")
self._initialize_helpers_jtfrt()
# windowmode = self.param_dict["windowmode"] # For later development
hopsize = self.param_dict["hopsize"]
windowsize = self.param_dict["windowsize"]
fftsize = self.param_dict["fftsize"]
sstsize = self.param_dict["sstsize"] # i.e., size of frequency axis. No option to change time axis yet
windowfunc = self.param_dict["windowfunc"]
buffermode = self.param_dict["buffermode"]
windowsize_p1 = windowsize + 1
overlap = windowsize_p1 - hopsize # For JTFRT block procedure
window = windowfunc(windowsize)
reassignment_mode = self.param_dict["reassignment_mode"]
reassignment_dtype: type = complex if reassignment_mode == "complex" else float
# Create a circular buffer of JTFRT frames of size windowsize // hopsize,
# This may not be enough if windowsize not divided evenly by hopsize, but forget that edge case
jtfrt_memory_num_frames = windowsize // hopsize
write_frame = 0
export_frame = -(jtfrt_memory_num_frames // 2) + 1
# Just in case the audio signal has already been read out
self.AudioSignal.seek(frames=0)
# Compute the left boundary JTFRT frames
# Will refactor later when I put in the "reconstruction" buffering mode.
if buffermode == "centered_analysis":
initial_block = self.AudioSignal.read(frames=windowsize + 1, always_2d=True)
initial_block = initial_block.T
# Pad the boundary with reflected audio frames, then yield the boundary JTFRT frames necessary
frame0 = -(windowsize // 2) # if window is odd, this centers audio frame 0. reconstruction imperfect
while frame0 < 0:
reflect_block = self._pad_boundary_rows(initial_block[:, :(frame0 + windowsize)], windowsize, 'left')
reflect_block_plus = self._pad_boundary_rows(initial_block[:, :(frame0 + windowsize_p1)],
windowsize, 'left')
self._reassign_jtfrt(write_frame, reflect_block, reflect_block_plus, window,
fftsize, sstsize, reassignment_mode)
frame0 += hopsize
write_frame += 1
write_frame %= jtfrt_memory_num_frames
if export_frame > -1:
# Export and reset this frame to zeros so it can be added to again
# You HAVE to yield a deepcopy or else it will yield a pointer to the memory array.
yield deepcopy(self.jtfrt_memory[export_frame])
self.jtfrt_memory[export_frame] = 0
export_frame += 1
export_frame %= jtfrt_memory_num_frames
else:
export_frame += 1
elif buffermode == "reconstruction":
pass # FILL THIS IN
frame0 = 0
elif buffermode == "valid_analysis":
frame0 = 0
else:
raise ValueError("Invalid buffermode {}".format(buffermode))
# May refactor the following four non-comment code lines for full generality
# Get the number of audio frames, and seek to the audio frame given by frame0
num_audio_frames = self.AudioSignal.get_num_frames_from_and_seek_start(start_frame=frame0)
# Now calculate the max number of FULL non-boundary JTFRT frames,
# considering hop size and window size. Have to modify because taking more frames than usual.
num_full_jtfrt_frames = 1 + ((num_audio_frames - windowsize_p1) // hopsize)
# Convert that to the number of audio frames that you'll analyze for non-boundary JTFRT.
num_audio_frames_full_jtfrt = (num_full_jtfrt_frames - 1) * hopsize + windowsize_p1
# Feed blocks to create the non-boundary JTFRT frames, with
blockreader = self.AudioSignal.blocks(blocksize=windowsize_p1, overlap=overlap,
frames=num_audio_frames_full_jtfrt, always_2d=True)
for block in blockreader:
block = block.T # First transpose to get each channel as a row
self._reassign_jtfrt(write_frame, block[:, :windowsize], block[:, 1:],
window, fftsize, sstsize, reassignment_mode)
frame0 += hopsize
write_frame += 1
write_frame %= jtfrt_memory_num_frames
if export_frame > -1:
# Export and reset this frame to zeros so it can be added to again
# You HAVE to yield a deepcopy or else it will yield a pointer to the memory array.
yield deepcopy(self.jtfrt_memory[export_frame])
self.jtfrt_memory[export_frame] = 0
export_frame += 1
export_frame %= jtfrt_memory_num_frames
else:
export_frame += 1
# Compute the right boundary JTFRT frames
if buffermode == "centered_analysis":
# Need to read from frame0
self.AudioSignal.seek(frames=frame0)
# Read the rest of the file (length less than windowsize+1)
final_block = self.AudioSignal.read(always_2d=True)
final_block = final_block.T
final_block_num_frames = final_block.shape[1]
if final_block_num_frames >= windowsize_p1:
raise ValueError("You shouldn't have final_block_num_frames {} "
"greater than windowsize + 1 == {}".format(final_block_num_frames, windowsize_p1))
# Pad the boundary with reflected audio frames,
# then add boundary JTFRT frames to the JTFRT
frame1 = 0
halfwindowsize = (windowsize + 1) // 2 # Edge case: odd windows, want final valid sample to be in middle
while final_block_num_frames - frame1 >= halfwindowsize:
reflect_block = self._pad_boundary_rows(final_block[:, frame1:], windowsize, 'right')
reflect_block_plus = self._pad_boundary_rows(final_block[:, frame1 + 1:(frame1 + windowsize_p1)],
windowsize, 'right')
self._reassign_jtfrt(write_frame, reflect_block, reflect_block_plus, window,
fftsize, sstsize, reassignment_mode)
frame1 += hopsize
write_frame += 1
write_frame %= jtfrt_memory_num_frames
if export_frame > -1:
# Export and reset this frame to zeros so it can be added to again
# You HAVE to yield a deepcopy or else it will yield a pointer to the memory array.
yield deepcopy(self.jtfrt_memory[export_frame])
self.jtfrt_memory[export_frame] = 0
export_frame += 1
export_frame %= jtfrt_memory_num_frames
else:
export_frame += 1
elif buffermode == "reconstruction":
pass # FILL THIS IN
elif buffermode == "valid_analysis": # Do nothing at this point
pass
else:
raise ValueError("Invalid buffermode {}".format(buffermode))
# Flush the remaining buffers
# No need to zero them out
while export_frame != write_frame:
yield deepcopy(self.jtfrt_memory[export_frame])
export_frame += 1
export_frame %= jtfrt_memory_num_frames
def wft(self, block: np.ndarray, window: np.ndarray, fftsize: int, fft_type="real") -> np.ndarray:
if fft_type == "real":
return np.fft.rfft(self._zeropad_rows(window * block, fftsize))
elif fft_type == "complex_short":
return np.fft.fft(self._zeropad_rows(window * block, fftsize))[:, :(1 + (fftsize // 2))]
elif fft_type == "complex_full": # For reconstruction
return np.fft.fft(self._zeropad_rows(window * block, fftsize))
else:
raise ValueError("Invalid fft_type {}, must use 'real', 'complex_short', "
"or 'complex_full'".format(fft_type))
@staticmethod
def _reassignment_value_map(x: np.ndarray, reassignment_mode: str) -> np.ndarray:
if reassignment_mode == "magsq":
return np.abs(x) ** 2.0
elif reassignment_mode == "complex":
return x
else:
raise ValueError("Invalid reassignment_mode {}".format(reassignment_mode))
def _reassign_sst(self, f: np.ndarray, f_plus: np.ndarray, window: np.ndarray,
fftsize: int, sstsize: int, reassignment_mode: str) -> np.ndarray:
channels = self.AudioSignal.channels
num_bins_up_to_nyquist = (sstsize // 2) + 1
sst_shape = (channels, num_bins_up_to_nyquist) # Bins above Nyquist generally irrelevant for SST purposes
wft = self.wft(f, window, fftsize)
wft_plus = self.wft(f_plus, window, fftsize)
rf = self._calculate_rf(wft, wft_plus) # Unit: Normalized frequency
out_of_bounds = np.where((rf < 0) | (rf > 0.5)) # For real valued signals rf > 0.5 is meaningless
wft[out_of_bounds] = 0
rf[out_of_bounds] = 0
sst_out = np.zeros(sst_shape, dtype=(complex if reassignment_mode == "complex" else float))
for channel in range(channels):
np.add.at(sst_out[channel], (rf[channel] * sstsize).astype(int),
self._reassignment_value_map(wft[channel], reassignment_mode))
return sst_out
def _reassign_jtfrt(self, write_frame: int, f: np.ndarray, f_plus: np.ndarray,
window: np.ndarray, fftsize: int, sstsize: int, reassignment_mode: str):
jtfrt_memory_num_frames = self.jtfrt_memory.shape[0]
jtfrt_memory_num_frames_half = jtfrt_memory_num_frames // 2
jtfrt_frames_back = jtfrt_memory_num_frames_half - 1
jtfrt_frames_front = jtfrt_memory_num_frames_half
channels = self.AudioSignal.channels
wft = self.wft(f, window, fftsize)
wft_plus_freq = self.wft(f_plus, window, fftsize)
# WARNING: For JTFRM based on QSTFT with multiple FFT sizes, OR FFT size changed by user,
# the line below will need to be modified.
try:
f_plus_time = f * self.exp_time_shift[:, :fftsize] # See warning above
except IndexError:
raise IndexError("self.exp_time_shift has dimensions {}, "
"but FFT size passed here is {}".format(self.exp_time_shift.shape, fftsize))
wft_plus_time = self.wft(f_plus_time, window, fftsize, fft_type="complex_short")
rf = self._calculate_rf(wft, wft_plus_freq) # Unit: Normalized frequency
rtdev = self._calculate_rtdev(wft, wft_plus_time)
rtdev = np.zeros(rtdev.shape, dtype=rtdev.dtype) # For debug, see if it works like SST does
out_of_bounds = np.where((rf < 0) | (rf > 0.5) | (rtdev > jtfrt_frames_back) | (rtdev < -jtfrt_frames_front))
wft[out_of_bounds] = 0
rf[out_of_bounds] = 0
# Change rf, rt to the appropriate location indices
rf = (rf * sstsize).astype(int)
rt = (np.round(write_frame - rtdev) % jtfrt_memory_num_frames).astype(int) # % because memory array is circular
rt[out_of_bounds] = 0
for channel in range(channels):
np.add.at(self.jtfrt_memory[:, channel, :], (rt[channel], rf[channel]),
self._reassignment_value_map(wft[channel], reassignment_mode))
def _calculate_rf(self, wft: np.ndarray, wft_plus: np.ndarray) -> np.ndarray:
eps_division = self.param_dict["eps_division"]
return np.angle(wft_plus / (wft + eps_division)) / TWOPI # Unit: Normalized frequency
def _calculate_rtdev(self, wft: np.ndarray, wft_plus: np.ndarray):
eps_division = self.param_dict["eps_division"]
hopsize = self.param_dict["hopsize"]
windowsize = self.param_dict["windowsize"]
samplerate = self.AudioSignal.samplerate
# Returned unit is in STFT frames from the center, current frame.
return np.angle(wft_plus / (wft + eps_division)) / TWOPI / hopsize * samplerate + windowsize/2/hopsize
@staticmethod
def _pad_boundary_rows(input_array: np.ndarray, finalsize: int, side: str) -> np.ndarray:
"""
Pad each channel of a buffer, where channels are assumed to be in rows.
Padding happens at the boundary, by even reflection.
:param input_array: array to be padded by reflection
:param finalsize: finalsize: final size of the array (example: window size)
:param side: "left" or "right" to do the padding.
i.e., if "left", then padding is done to the left of the input array.
:return: output_array: reflection-padded array
"""
inputsize = input_array.shape[1]
if finalsize == inputsize:
return input_array
else:
padsize = finalsize - inputsize
if side == "left":
padsize_left = padsize
padsize_right = 0
elif side == "right":
padsize_left = 0
padsize_right = padsize
else:
raise ValueError("Pad side {} to pad_boundary_rows is invalid, "
"must be 'left' or 'right'".format(side))
if len(input_array.shape) == 2:
output_array = np.pad(input_array, ((0, 0), (padsize_left, padsize_right)), mode='reflect')
else:
raise ValueError("input array to pad_boundary_rows has dimensions {}, "
"which is not supported... must be 2D array even if mono".format(input_array.shape))
return output_array
@staticmethod
def _zeropad_rows(input_array: np.ndarray, finalsize: int) -> np.ndarray:
"""
Zeropad each channel of a buffer, where channels are assumed to be in rows.
Padding happens with the input array centered, and zeros padded equally on left and right,
unless finalsize minus inputsize is odd.
This is used for preparing a windowed array to be sent to an FFT.
:param input_array: array to be padded with zeros
:param finalsize: final size of the array (example: FFT size)
:return: output_array: zero-padded array
"""
inputsize = input_array.shape[1]
if inputsize == finalsize:
return input_array
else:
padsize = finalsize - inputsize
padsize_left = padsize // 2
padsize_right = padsize - padsize_left
if len(input_array.shape) == 2:
output_array = np.pad(input_array, ((0, 0), (padsize_left, padsize_right)), mode='constant')
else:
raise ValueError("input array to zeropad_rows has dimensions {}, "
"which is not supported... must be 2D array even if mono".format(input_array.shape))
return output_array
| 52.119584 | 120 | 0.606757 |
f74674d3d04e992c9b38b8f5f7cfef8b4888cf75
| 1,290 |
py
|
Python
|
cycling_in_france/helper_func.py
|
nenb/cycling_in_france
|
6cddc433a2136f52be996719db0a1d876fcf5c59
|
[
"MIT"
] | null | null | null |
cycling_in_france/helper_func.py
|
nenb/cycling_in_france
|
6cddc433a2136f52be996719db0a1d876fcf5c59
|
[
"MIT"
] | null | null | null |
cycling_in_france/helper_func.py
|
nenb/cycling_in_france
|
6cddc433a2136f52be996719db0a1d876fcf5c59
|
[
"MIT"
] | null | null | null |
import regionmask
import numpy as np
import dask
def create_windmax_dict(u, v, names, borders, longitude, latitude):
"""Produce a dictionary of masked maximum wind speeds in units of mph."""
if u.units != "m s**-1":
raise ValueError("U field does not have units m/s")
if v.units != "m s**-1":
raise ValueError("V field does not have units m/s")
metre_to_mile = 3600.0 / 1609.3
speed = np.sqrt(u ** 2 + v ** 2) * metre_to_mile
windmax_dict = {}
for i, regname in enumerate(names):
# Modify index in case any entries have been dropped e.g. Corsica
idx = names.index[i]
# Create object from 'borders' for masking gridded data
regmask = regionmask.Regions(name=regname, outlines=list(borders[idx]))
# Apply mask to dataset coordinates
mask_zeros = regmask.mask(longitude, latitude)
# Replace zeros with ones for matrix multiplication
mask_ones = mask_zeros.where(np.isnan(mask_zeros.values), 1)
# Use Dask dataframes for lazy execution
mask_ones = dask.array.from_array(mask_ones)
speed_mask = speed * mask_ones
# Compute maximum over lat-lon grid
windmax_dict[regname] = speed_mask.max(dim=["longitude", "latitude"])
return windmax_dict
| 40.3125 | 79 | 0.662791 |
c5de5f409109c1624a68f693d4f556e0e227cd13
| 4,542 |
py
|
Python
|
pyabc/sge/db.py
|
Gabriel-p/pyABC
|
a1c963203c9f9e3fa40793ccf214753fb689d27f
|
[
"BSD-3-Clause"
] | null | null | null |
pyabc/sge/db.py
|
Gabriel-p/pyABC
|
a1c963203c9f9e3fa40793ccf214753fb689d27f
|
[
"BSD-3-Clause"
] | null | null | null |
pyabc/sge/db.py
|
Gabriel-p/pyABC
|
a1c963203c9f9e3fa40793ccf214753fb689d27f
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import sqlite3
import time
import redis
from .config import get_config
def within_time(job_start_time, max_run_time_h):
return time.time() - job_start_time < max_run_time_h * 1.1 * 3600
class SQLiteJobDB:
SQLITE_DB_TIMEOUT = 2000
def __init__(self, tmp_dir):
self.connection = sqlite3.connect(
os.path.join(tmp_dir, 'status.db'), timeout=self.SQLITE_DB_TIMEOUT
)
def clean_up(self):
pass
def create(self, nr_jobs):
# create database for job information
with self.connection:
self.connection.execute(
"CREATE TABLE IF NOT EXISTS "
"status(ID INTEGER, status TEXT, time REAL)"
)
def start(self, ID):
with self.connection:
self.connection.execute(
"INSERT INTO status VALUES(?,?,?)",
(ID, 'started', time.time()),
)
def finish(self, ID):
with self.connection:
self.connection.execute(
"INSERT INTO status VALUES(?,?,?)",
(ID, 'finished', time.time()),
)
def wait_for_job(self, ID, max_run_time_h):
"""
Return true if we should still wait for the job.
Return false otherwise
"""
# TODO Possible SQL injection error should be fixed, e.g. via
# pre-calculated expressions
with self.connection:
results = self.connection.execute(
"SELECT status, time from status WHERE ID=" # noqa: S608, B608
+ str(ID)
).fetchall()
nr_rows = len(results)
if nr_rows == 0: # job not jet started
return True
if nr_rows == 1: # job already started
job_start_time = results[0][1]
# job took to long
if not within_time(job_start_time, max_run_time_h):
print('Job ' + str(ID) + ' timed out.') # noqa: T001
return False # job took too long
else: # still time left
return True
if nr_rows == 2: # job finished
return False
# something not catched here
raise Exception('Something went wrong. nr_rows={}'.format(nr_rows))
class RedisJobDB:
FINISHED_STATE = "finished"
STARTED_STATE = "started"
@staticmethod
def server_online(cls):
try:
redis.Redis(cls.HOST).get(None)
except redis.ConnectionError:
return False
else:
return True
def __init__(self, tmp_dir):
config = get_config()
self.HOST = config["REDIS"]["HOST"]
self.job_name = os.path.basename(tmp_dir)
self.connection = redis.Redis(host=self.HOST, decode_responses=True)
def key(self, ID):
return self.job_name + ":" + str(ID)
def clean_up(self):
IDs = map(int, self.connection.lrange(self.job_name, 0, -1))
pipeline = self.connection.pipeline()
for ID in IDs:
pipeline.delete(self.key(ID))
pipeline.delete(self.job_name)
pipeline.execute()
def create(self, nr_jobs):
pipeline = self.connection.pipeline()
for ID in range(1, nr_jobs + 1):
pipeline.rpush(self.job_name, ID)
pipeline.execute()
def start(self, ID):
self.connection.hmset(
self.key(ID), {"status": self.STARTED_STATE, "time": time.time()}
)
def finish(self, ID):
self.connection.hmset(
self.key(ID), {"status": self.FINISHED_STATE, "time": time.time()}
)
def wait_for_job(self, ID, max_run_time_h):
values = self.connection.hgetall(self.key(ID))
if len(values) == 0: # not yet set, job not yet started
return True
status = values["status"]
time_stamp = float(values["time"])
if status == self.FINISHED_STATE:
return False
if status == self.STARTED_STATE:
if within_time(time_stamp, max_run_time_h):
return True
return False
raise Exception('Something went wrong.')
def job_db_factory(tmp_path):
"""
Returns
-------
SQLite or redis db depending on availability
"""
config = get_config()
if config["BROKER"]["TYPE"] == "REDIS":
return RedisJobDB(tmp_path)
if config["BROKER"]["TYPE"] == "SQLITE":
return SQLiteJobDB(tmp_path)
raise Exception("Unknown broker: {}".format(config["BROKER"]["TYPE"]))
| 29.303226 | 79 | 0.570233 |
70f451775205a648d2ab827293e6eb5db45200eb
| 890 |
py
|
Python
|
app/context_processors.py
|
Axiacore/knowledge-base
|
edf78e0bc1e0e4334f77b5785749408ba426627d
|
[
"MIT"
] | 9 |
2016-06-10T21:24:15.000Z
|
2018-03-05T15:43:20.000Z
|
app/context_processors.py
|
Axiacore/knowledge-base
|
edf78e0bc1e0e4334f77b5785749408ba426627d
|
[
"MIT"
] | 3 |
2016-06-14T19:23:38.000Z
|
2016-10-12T16:33:27.000Z
|
app/context_processors.py
|
Axiacore/knowledge-base
|
edf78e0bc1e0e4334f77b5785749408ba426627d
|
[
"MIT"
] | 5 |
2016-06-14T13:02:26.000Z
|
2019-09-19T21:08:14.000Z
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Author: AxiaCore S.A.S. http://axiacore.com
from django.conf import settings
from django.utils.safestring import mark_safe
from string import Template
TRACKING_CODE = """
<script>
window.ga=window.ga||function(){(ga.q=ga.q||[]).push(arguments)};
ga.l=+new Date;
ga('create', '$ga_code', 'auto');
ga('send', 'pageview');
</script>
<script async src='https://www.google-analytics.com/analytics.js'></script>
"""
def logo_company(request):
return {'LOGO_COMPANY': settings.LOGO_COMPANY}
def analytics(request):
"""
Enable analytics script if debug is False
"""
script = ''
if not settings.DEBUG:
template = Template(TRACKING_CODE)
script = mark_safe(
template.substitute(
ga_code=settings.GOOGLE_ANALYTICS_CODE,
)
)
return {'ANALYTICS': script}
| 23.421053 | 75 | 0.644944 |
b056012ca7196a9f722aab62488de68928721578
| 159 |
py
|
Python
|
itam/it_asset_management/doctype/itam_asset/test_itam_asset.py
|
Res-IT-Solutions/ITAM
|
70a5ab7da17395d8c2b7adb7954b104dd9f03c91
|
[
"MIT"
] | null | null | null |
itam/it_asset_management/doctype/itam_asset/test_itam_asset.py
|
Res-IT-Solutions/ITAM
|
70a5ab7da17395d8c2b7adb7954b104dd9f03c91
|
[
"MIT"
] | null | null | null |
itam/it_asset_management/doctype/itam_asset/test_itam_asset.py
|
Res-IT-Solutions/ITAM
|
70a5ab7da17395d8c2b7adb7954b104dd9f03c91
|
[
"MIT"
] | 1 |
2022-03-23T12:33:13.000Z
|
2022-03-23T12:33:13.000Z
|
# Copyright (c) 2021, Res-IT Solutions Ltd. and Contributors
# See license.txt
# import frappe
import unittest
class TestITAMAsset(unittest.TestCase):
pass
| 17.666667 | 60 | 0.773585 |
06c7edf28f4a3fffbf0dd90b353486c2e6414743
| 8,365 |
py
|
Python
|
WarpDrive/WarpDriveLib/Tools/DrawTool.py
|
simonoxen/SlicerNetstim
|
52e3abf486fa67f853e5197ace755398cdbdbb7e
|
[
"BSD-3-Clause"
] | null | null | null |
WarpDrive/WarpDriveLib/Tools/DrawTool.py
|
simonoxen/SlicerNetstim
|
52e3abf486fa67f853e5197ace755398cdbdbb7e
|
[
"BSD-3-Clause"
] | null | null | null |
WarpDrive/WarpDriveLib/Tools/DrawTool.py
|
simonoxen/SlicerNetstim
|
52e3abf486fa67f853e5197ace755398cdbdbb7e
|
[
"BSD-3-Clause"
] | null | null | null |
import qt, vtk, slicer
import numpy as np
from ..Widgets.ToolWidget import AbstractToolWidget
from ..Effects.DrawEffect import AbstractDrawEffect
from ..Helpers import GridNodeHelper, WarpDriveUtil
class DrawToolWidget(AbstractToolWidget):
def __init__(self):
toolTip = ''
AbstractToolWidget.__init__(self, 'Draw', toolTip)
# set up menu
nearestModelAction = qt.QAction('To Nearest Model', self.effectButton)
nearestModelAction.setCheckable(True)
nearestModelAction.setChecked(True)
twoLinesAction = qt.QAction('To Following Line', self.effectButton)
twoLinesAction.setCheckable(True)
actionsGroup = qt.QActionGroup(self.effectButton)
actionsGroup.addAction(nearestModelAction)
actionsGroup.addAction(twoLinesAction)
menu = qt.QMenu(self.effectButton)
menu.addActions(actionsGroup.actions())
self.effectButton.setMenu(menu)
self.effectButton.setPopupMode(self.effectButton.DelayedPopup)
class DrawToolEffect(AbstractDrawEffect):
def __init__(self, sliceWidget):
AbstractDrawEffect.__init__(self, sliceWidget)
self.sourceFiducial = None
def processEvent(self, caller=None, event=None):
AbstractDrawEffect.processEvent(self, caller, event)
if event == 'LeftButtonReleaseEvent':
if self.sourceFiducial is None: # no previous drawing
self.sourceFiducial = self.getFiducialFromDrawing()
if self.sourceFiducial is None:
self.resetPolyData()
return
if self.parameterNode.GetParameter("DrawMode") == 'To Nearest Model': # get target fiducial from nearest model
targetFiducial = self.getFiducialFromSlicedModel()
elif self.parameterNode.GetParameter("DrawMode") == 'To Following Line': # return and wait for following drawing
self.sourceFiducial.GetDisplayNode().SetVisibility(1)
self.resetPolyData()
return
else: # use new drawing as target fiducial
targetFiducial = self.getFiducialFromDrawing(nPoints = self.sourceFiducial.GetNumberOfControlPoints())
if targetFiducial is None:
slicer.mrmlScene.RemoveNode(self.sourceFiducial)
self.sourceFiducial = None
self.resetPolyData()
return
self.sourceFiducial.ApplyTransform(self.parameterNode.GetNodeReference("OutputGridTransform").GetTransformFromParent()) # undo current
WarpDriveUtil.addCorrection(self.sourceFiducial, targetFiducial,
spread=int(round(float(self.parameterNode.GetParameter("Spread")))),
referenceNode = self.parameterNode.GetNodeReference("InputNode"))
self.parameterNode.SetParameter("Update","true")
self.sourceFiducial = None
self.resetPolyData()
elif event == 'RightButtonPressEvent' or (event == 'KeyPressEvent' and self.interactor.GetKeySym()=='Escape'):
slicer.mrmlScene.RemoveNode(self.sourceFiducial)
self.sourceFiducial = None
def getFiducialFromDrawing(self, sampleDistance = 1, nPoints = None):
# create curve from drawing and resample
sourceCurve = slicer.mrmlScene.AddNewNodeByClass('vtkMRMLMarkupsCurveNode')
sourceCurve.SetControlPointPositionsWorld(self.rasPoints)
sourceCurve.ResampleCurveWorld(sampleDistance)
if nPoints is not None: # resample to get specified number of points
sourceCurve.ResampleCurveWorld(sourceCurve.GetCurveLengthWorld() / max((nPoints - 1), 1))
# get resampled points
resampledPoints = vtk.vtkPoints()
sourceCurve.GetControlPointPositionsWorld(resampledPoints)
sourceFiducial = self.curveToFiducial(sourceCurve)
slicer.mrmlScene.RemoveNode(sourceCurve)
if sourceFiducial.GetNumberOfControlPoints() <= 1:
slicer.mrmlScene.RemoveNode(sourceFiducial)
return None
else:
return sourceFiducial
def getFiducialFromSlicedModel(self, sampleDistance = 1):
# get source fiducial points
resampledPoints = vtk.vtkPoints()
self.sourceFiducial.GetControlPointPositionsWorld(resampledPoints)
# get closest model sliced
slicedModel, originalModel = self.sliceClosestModel(resampledPoints.GetPoint(0))
if not slicedModel:
return None
# resample sourceCurve in sliced model with same amount of points
targetCurve = slicer.mrmlScene.AddNewNodeByClass('vtkMRMLMarkupsCurveNode')
targetCurve.GetDisplayNode().SetVisibility(0)
targetCurve.SetControlPointPositionsWorld(resampledPoints)
targetCurve.SetCurveTypeToShortestDistanceOnSurface(slicedModel)
targetCurve.ResampleCurveSurface(sampleDistance, slicer.vtkMRMLModelNode().SafeDownCast(slicedModel), 0.0025)
targetCurve.ResampleCurveWorld(targetCurve.GetCurveLengthWorld() / max((resampledPoints.GetNumberOfPoints() - 1), 1))
# curve to fiducial
targetFiducial = self.curveToFiducial(targetCurve)
# set name
shNode = slicer.mrmlScene.GetSubjectHierarchyNode()
modelParentName = shNode.GetItemName(shNode.GetItemParent(shNode.GetItemByDataNode(originalModel)))
targetFiducial.SetName(modelParentName + '_' + originalModel.GetName())
# remove
slicer.mrmlScene.RemoveNode(targetCurve)
slicer.mrmlScene.RemoveNode(slicedModel)
return targetFiducial
def copyControlPoints(self, sourceNode, targetNode):
if targetNode.GetNumberOfControlPoints() == 0:
label = '1'
else:
label = str( int(targetNode.GetNthFiducialLabel(targetNode.GetNumberOfControlPoints()-1)) + 1 )
p = [0]*3
for i in range(sourceNode.GetNumberOfControlPoints()):
sourceNode.GetNthControlPointPosition(i,p)
targetNode.AddFiducialFromArray(p, label)
def curveToFiducial(self, curve):
fiducial = slicer.mrmlScene.AddNewNodeByClass('vtkMRMLMarkupsFiducialNode')
fiducial.GetDisplayNode().SetVisibility(0)
fiducial.GetDisplayNode().SetTextScale(0)
points = vtk.vtkPoints()
curve.GetControlPointPositionsWorld(points)
fiducial.SetControlPointPositionsWorld(points)
return fiducial
def sliceClosestModel(self, point):
originalModel = None
# set up plane
normal = np.array([float(self.sliceLogic.GetSliceNode().GetName()==name) for name in ['Yellow','Green','Red']])
plane = vtk.vtkPlane()
plane.SetOrigin(point) # point in plane
plane.SetNormal(normal)
# set up cutter
cutter = vtk.vtkCutter()
cutter.SetCutFunction(plane)
cutter.SetGenerateCutScalars(0)
cutter.Update()
# init point locator and output
pointsLocator = vtk.vtkPointLocator()
globalMinDistance = 1000
outPolyData = vtk.vtkPolyData()
# iterate over models in scene
nModels = slicer.mrmlScene.GetNumberOfNodesByClass('vtkMRMLModelNode')
for i in range(nModels):
model = slicer.mrmlScene.GetNthNodeByClass(i, 'vtkMRMLModelNode')
polyData = model.GetPolyData()
if model.GetDisplayNode() and model.GetDisplayNode().GetVisibility() and polyData.GetNumberOfCells() > 1 and model.GetName()!= 'auxSphereModel': # model visible and cells available
cutter.SetInputData(polyData)
cutter.Update()
cutterOutput = cutter.GetOutput()
if cutterOutput.GetNumberOfCells(): # model intersects with plane
# get distance from input point to closest point in model
pointsLocator.SetDataSet(cutterOutput)
pointsLocator.BuildLocator()
closestPoint = cutterOutput.GetPoint(pointsLocator.FindClosestPoint(point))
localMinDistance = vtk.vtkMath().Distance2BetweenPoints(closestPoint, point)
if localMinDistance < globalMinDistance: # new min
outPolyData.DeepCopy(cutterOutput)
globalMinDistance = localMinDistance
originalModel = model
# return in case no model found
if not originalModel:
return False, False
# generate output
triangulator = vtk.vtkContourTriangulator()
triangulator.SetInputData(outPolyData)
triangulator.Update()
slicedModel = slicer.mrmlScene.AddNewNodeByClass('vtkMRMLModelNode')
slicedModel.SetAndObservePolyData(triangulator.GetOutput())
slicedModel.CreateDefaultDisplayNodes()
slicedModel.GetDisplayNode().SetVisibility(0)
return slicedModel, originalModel
def cleanup(self):
slicer.mrmlScene.RemoveNode(self.sourceFiducial)
self.sourceFiducial = None
AbstractDrawEffect.cleanup(self)
| 39.2723 | 186 | 0.736402 |
125224bd8e29332a724348f8b627a69969e63866
| 649 |
py
|
Python
|
price.py
|
indiBlockDev/CryptoIndiaBot
|
d6b7b9c1e797426688db0d1c05975d086d4095d6
|
[
"MIT"
] | null | null | null |
price.py
|
indiBlockDev/CryptoIndiaBot
|
d6b7b9c1e797426688db0d1c05975d086d4095d6
|
[
"MIT"
] | null | null | null |
price.py
|
indiBlockDev/CryptoIndiaBot
|
d6b7b9c1e797426688db0d1c05975d086d4095d6
|
[
"MIT"
] | null | null | null |
import requests
import time
price = ""
def store(name,url):
global price
data = requests.get(url).json()
if "Coinsecure" in name:
bid = data["bid"]
ask = data["ask"]
else:
bid = data["buy"]
ask = data["sell"]
if("Coinsecure" in name):
price = "Coinsecure : \nBid : %s Ask : %s\n" %(bid/100,ask/100)
else:
price = price + "%s :\nBid : %s Ask : %s\n" %(name,bid,ask)
def getPrice():
while True:
store("Coinsecure","https://api.coinsecure.in/v0/noauth/newticker")
store("Unocoin","https://www.unocoin.com/trade?all")
store("Zebpay","https://api.zebpay.com/api/v1/ticker?currencyCode=INR")
time.sleep(300)
| 19.088235 | 73 | 0.625578 |
1719c62da08f708b0ba25f21d9cb3e56faa43376
| 3,237 |
py
|
Python
|
improver/utilities/cube_constraints.py
|
cgsandford/improver
|
3cfbf3323d3a693a9c61fec13350295b85d03676
|
[
"BSD-3-Clause"
] | 1 |
2021-08-02T21:17:18.000Z
|
2021-08-02T21:17:18.000Z
|
improver/utilities/cube_constraints.py
|
NMC-DAVE/improver
|
b56379f8bd236ddf2bab31ef64af8345de856cc1
|
[
"BSD-3-Clause"
] | 4 |
2018-01-24T11:29:15.000Z
|
2021-01-11T15:16:21.000Z
|
improver/utilities/cube_constraints.py
|
cgsandford/improver
|
3cfbf3323d3a693a9c61fec13350295b85d03676
|
[
"BSD-3-Clause"
] | 1 |
2021-08-02T21:17:19.000Z
|
2021-08-02T21:17:19.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2021 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Module for helping to create Iris constraints."""
import iris
def create_sorted_lambda_constraint(coord_name, values, tolerance=1.0e-7):
"""
Create a lambda constraint for a range. This formulation of specifying
a lambda constraint has the benefit of not needing to hardcode the name
for the coordinate, so that this can be determined at runtime.
The created function uses float values. As a result, a small tolerance is
used to spread the ends of the ranges to help with float equality
matching. Note that the relative tolerance will not affect values of zero.
Adding/subtracting an absolute tolerance is not done due to the
difficulty of selecting an appropriate value given the very small values
of precipitation rates expressed in m s-1.
Args:
coord_name (str):
Name of the coordinate.
values (list):
A list of two values that represent the inclusive end points
of a range.
tolerance (float):
A relative tolerance value to ensure equivalence matching when
using float32 values. Values of zero will be unchanged.
Returns:
iris.Constraint:
Constraint representative of a range of values.
"""
values = [float(i) for i in values]
values = sorted(values)
values[0] = (1.0 - tolerance) * values[0]
values[1] = (1.0 + tolerance) * values[1]
constr = iris.Constraint(
coord_values={coord_name: lambda cell: values[0] <= cell <= values[1]}
)
return constr
| 44.342466 | 79 | 0.711152 |
48e64c9cf1705560716a3683378fe07828ec08d7
| 6,374 |
py
|
Python
|
test/unit/pulled_search/process_files.py
|
mjpernot/pulled-search
|
2c8f4dee420f0c2fc12867b2a642e3a530d2f24c
|
[
"MIT"
] | null | null | null |
test/unit/pulled_search/process_files.py
|
mjpernot/pulled-search
|
2c8f4dee420f0c2fc12867b2a642e3a530d2f24c
|
[
"MIT"
] | null | null | null |
test/unit/pulled_search/process_files.py
|
mjpernot/pulled-search
|
2c8f4dee420f0c2fc12867b2a642e3a530d2f24c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# Classification (U)
"""Program: process_files.py
Description: Unit testing of process_files in pulled_search.py.
Usage:
test/unit/pulled_search/process_files.py
Arguments:
"""
# Libraries and Global Variables
# Standard
import sys
import os
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
# Third-party
import mock
# Local
sys.path.append(os.getcwd())
import pulled_search
import version
__version__ = version.__version__
class UnitTest(unittest.TestCase):
"""Class: UnitTest
Description: Class which is a representation of a unit testing.
Methods:
setUp -> Initialize testing environment.
test_with_preamble -> Test with pre-amble subject.
test_with_no_mail -> Test with no mail setup.
test_nonprocessed_files -> Test with nonprocessed files.
test_no_log_files -> Test with no log files detected.
test_with_mail -> Test with mail setup.
test_with_data -> Test with successful log file check.
"""
def setUp(self):
"""Function: setUp
Description: Initialization for unit testing.
Arguments:
"""
class CfgTest(object):
"""Class: CfgTest
Description: Class which is a representation of a cfg module.
Methods:
__init__ -> Initialize configuration environment.
"""
def __init__(self):
"""Method: __init__
Description: Initialization instance of the CfgTest class.
Arguments:
"""
self.file_regex = "*_docid.json"
self.doc_dir = "/dir_path/doc_dir"
self.error_dir = "/dir/path/error_dir"
self.archive_dir = "/dir/path/archive_dir"
self.cfg = CfgTest()
self.log_files = ["/path/logfile1", "/path/logfile2"]
self.args_array = {"-t": "name@domain"}
self.args_array2 = {}
self.args_array3 = {"-t": "name@domain", "-s": "Pre-amble: "}
@mock.patch("pulled_search.gen_class.setup_mail",
mock.Mock(return_value=True))
@mock.patch("pulled_search.cleanup_files", mock.Mock(return_value=[]))
@mock.patch("pulled_search.process_list", mock.Mock(return_value=[]))
@mock.patch("pulled_search.gen_libs.filename_search",
mock.Mock(return_value=[]))
@mock.patch("pulled_search.gen_class.Logger")
def test_with_preamble(self, mock_log):
"""Function: test_with_preamble
Description: Test with pre-amble subject.
Arguments:
"""
mock_log.return_value = True
self.assertFalse(pulled_search.process_files(self.args_array3,
self.cfg, mock_log))
@mock.patch("pulled_search.gen_class.setup_mail",
mock.Mock(return_value=True))
@mock.patch("pulled_search.cleanup_files", mock.Mock(return_value=[]))
@mock.patch("pulled_search.process_list", mock.Mock(return_value=[]))
@mock.patch("pulled_search.gen_libs.filename_search",
mock.Mock(return_value=[]))
@mock.patch("pulled_search.gen_class.Logger")
def test_with_no_mail(self, mock_log):
"""Function: test_with_no_mail
Description: Test with no mail setup.
Arguments:
"""
mock_log.return_value = True
self.assertFalse(pulled_search.process_files(self.args_array2,
self.cfg, mock_log))
@mock.patch("pulled_search.gen_class.setup_mail",
mock.Mock(return_value=True))
@mock.patch("pulled_search.cleanup_files", mock.Mock(return_value=[]))
@mock.patch("pulled_search.process_list", mock.Mock(return_value=[]))
@mock.patch("pulled_search.gen_libs.filename_search",
mock.Mock(return_value=[]))
@mock.patch("pulled_search.gen_class.Logger")
def test_nonprocessed_files(self, mock_log):
"""Function: test_nonprocessed_files
Description: Test with nonprocessed files.
Arguments:
"""
self.assertFalse(pulled_search.process_files({}, self.cfg, mock_log))
@mock.patch("pulled_search.gen_class.setup_mail",
mock.Mock(return_value=True))
@mock.patch("pulled_search.cleanup_files", mock.Mock(return_value=[]))
@mock.patch("pulled_search.process_list", mock.Mock(return_value=[]))
@mock.patch("pulled_search.gen_libs.filename_search",
mock.Mock(return_value=[]))
@mock.patch("pulled_search.gen_class.Logger")
def test_no_log_files(self, mock_log):
"""Function: test_no_log_files
Description: Test with no log files detected.
Arguments:
"""
self.assertFalse(pulled_search.process_files({}, self.cfg, mock_log))
@mock.patch("pulled_search.gen_class.setup_mail",
mock.Mock(return_value=True))
@mock.patch("pulled_search.cleanup_files", mock.Mock(return_value=[]))
@mock.patch("pulled_search.process_list", mock.Mock(return_value=[]))
@mock.patch("pulled_search.gen_libs.filename_search",
mock.Mock(return_value=[]))
@mock.patch("pulled_search.gen_class.Logger")
def test_with_mail(self, mock_log):
"""Function: test_with_mail
Description: Test with mail setup.
Arguments:
"""
mock_log.return_value = True
self.assertFalse(pulled_search.process_files(self.args_array, self.cfg,
mock_log))
@mock.patch("pulled_search.gen_class.setup_mail",
mock.Mock(return_value=True))
@mock.patch("pulled_search.cleanup_files", mock.Mock(return_value=[]))
@mock.patch("pulled_search.process_list", mock.Mock(return_value=[]))
@mock.patch("pulled_search.gen_libs.filename_search",
mock.Mock(return_value=[]))
@mock.patch("pulled_search.gen_class.Logger")
def test_with_data(self, mock_log):
"""Function: test_with_data
Description: Test with successful log file check.
Arguments:
"""
self.assertFalse(pulled_search.process_files({}, self.cfg, mock_log))
if __name__ == "__main__":
unittest.main()
| 28.711712 | 79 | 0.630844 |
ceeb572f340af7dc5d5a346305ec18b7f45c2ed7
| 922 |
py
|
Python
|
bims/management/commands/update_long_lat_locationsite.py
|
Christiaanvdm/django-bims
|
f92a63156c711b2d53c5f8ea06867cd64cee9eb9
|
[
"MIT"
] | null | null | null |
bims/management/commands/update_long_lat_locationsite.py
|
Christiaanvdm/django-bims
|
f92a63156c711b2d53c5f8ea06867cd64cee9eb9
|
[
"MIT"
] | null | null | null |
bims/management/commands/update_long_lat_locationsite.py
|
Christiaanvdm/django-bims
|
f92a63156c711b2d53c5f8ea06867cd64cee9eb9
|
[
"MIT"
] | null | null | null |
from django.core.management.base import BaseCommand
from django.contrib.gis.db import models
from django.db.models import Q
from bims.models.location_site import (
location_site_post_save_handler,
LocationSite
)
class Command(BaseCommand):
"""Update taxa.
"""
def handle(self, *args, **options):
# Update lon lat
location_sites = LocationSite.objects.filter(
Q(longitude__isnull=True) |
Q(latitude__isnull=True)
)
print('Updating: %s' % len(location_sites))
models.signals.post_save.disconnect(
location_site_post_save_handler,
)
for site in location_sites:
center = site.get_centroid()
site.longitude = center.x
site.latitude = center.y
site.save()
models.signals.post_save.connect(
location_site_post_save_handler,
)
| 25.611111 | 53 | 0.625813 |
bef41b9281dc6ce244afb61b34be82fcd526250f
| 365 |
py
|
Python
|
sleuth_crawler/scraper/scraper/customcontext.py
|
ubclaunchpad/sleuth
|
7b7be0b7097a26169e17037f4220fd0ce039bde1
|
[
"MIT"
] | 12 |
2017-09-17T02:14:35.000Z
|
2022-01-09T10:14:59.000Z
|
sleuth_crawler/scraper/scraper/customcontext.py
|
ubclaunchpad/sleuth
|
7b7be0b7097a26169e17037f4220fd0ce039bde1
|
[
"MIT"
] | 92 |
2017-09-16T23:50:45.000Z
|
2018-01-02T01:56:33.000Z
|
sleuth_crawler/scraper/scraper/customcontext.py
|
ubclaunchpad/sleuth
|
7b7be0b7097a26169e17037f4220fd0ce039bde1
|
[
"MIT"
] | 5 |
2017-12-26T01:47:36.000Z
|
2021-12-31T11:15:07.000Z
|
from OpenSSL import SSL
from scrapy.core.downloader.contextfactory import ScrapyClientContextFactory
class CustomContextFactory(ScrapyClientContextFactory):
"""
Custom context factory that allows SSL negotiation.
"""
def __init__(self, method):
# Use SSLv23_METHOD so we can use protocol negotiation
self.method = SSL.SSLv23_METHOD
| 33.181818 | 76 | 0.758904 |
955bdf46af50519c1d2655aebc3253aa81ff1999
| 2,024 |
py
|
Python
|
src/tests/ftest/util/GeneralUtils.py
|
phylcrandall/daos
|
7a6adf9733f12538afcea44cced450b0fd4ad31d
|
[
"Apache-2.0"
] | null | null | null |
src/tests/ftest/util/GeneralUtils.py
|
phylcrandall/daos
|
7a6adf9733f12538afcea44cced450b0fd4ad31d
|
[
"Apache-2.0"
] | null | null | null |
src/tests/ftest/util/GeneralUtils.py
|
phylcrandall/daos
|
7a6adf9733f12538afcea44cced450b0fd4ad31d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
'''
(C) Copyright 2018 Intel Corporation.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE
The Government's rights to use, modify, reproduce, release, perform, display,
or disclose this software are subject to the terms of the Apache License as
provided in Contract No. B609815.
Any reproduction of computer software, computer software documentation, or
portions thereof marked with this legend must also reproduce the markings.
'''
import os
import json
from pathlib import Path
from errno import ENOENT
def get_file_path(bin_name, dir_path=""):
"""
find the binary path name in daos_m and return the list of path
args:
bin_name: bin file to be.
dir_path: Directory location on top of daos_m to find the
bin.
return:
list: list of the paths for bin_name file
Raises:
OSError: If failed to find the bin_name file
"""
with open('../../../.build_vars.json') as json_file:
build_paths = json.load(json_file)
basepath = os.path.normpath(build_paths['PREFIX'] + "/../{0}"
.format(dir_path))
file_path = list(Path(basepath).glob('**/{0}'.format(bin_name)))
if not file_path:
raise OSError(ENOENT, "File {0} not found inside {1} Directory"
.format(bin_name, basepath))
else:
return file_path
class DaosTestError(Exception):
"""
DAOS API exception class
"""
| 36.142857 | 79 | 0.682806 |
e852edc72bf5e25c45467cefec859744216113e1
| 43,517 |
py
|
Python
|
program/views.py
|
Dumbaz/autoradio-pv
|
8aae293e58b2e79a05956c535bb109f74edc89c3
|
[
"BSD-3-Clause"
] | null | null | null |
program/views.py
|
Dumbaz/autoradio-pv
|
8aae293e58b2e79a05956c535bb109f74edc89c3
|
[
"BSD-3-Clause"
] | null | null | null |
program/views.py
|
Dumbaz/autoradio-pv
|
8aae293e58b2e79a05956c535bb109f74edc89c3
|
[
"BSD-3-Clause"
] | null | null | null |
import json
from datetime import date, datetime, time, timedelta
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.models import User
from django.http import Http404, HttpResponse, JsonResponse
from django.shortcuts import get_object_or_404
from django.views.generic.base import TemplateView
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from django.forms.models import model_to_dict
from rest_framework import permissions, serializers, status, viewsets
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.pagination import LimitOffsetPagination
from program.models import Type, MusicFocus, Language, Note, Show, Category, RTRCategory, Topic, TimeSlot, Host, Schedule, RRule
from program.serializers import TypeSerializer, LanguageSerializer, MusicFocusSerializer, NoteSerializer, ShowSerializer, ScheduleSerializer, CategorySerializer, RTRCategorySerializer, TopicSerializer, TimeSlotSerializer, HostSerializer, UserSerializer
from program.utils import tofirstdayinisoweek, get_cached_shows
# Deprecated
class CalendarView(TemplateView):
template_name = 'calendar.html'
# Deprecated
class HostListView(ListView):
context_object_name = 'host_list'
queryset = Host.objects.filter(Q(is_active=True) | Q(shows__schedules__until__gt=datetime.now())).distinct()
template_name = 'host_list.html'
# Deprecated
class HostDetailView(DetailView):
context_object_name = 'host'
queryset = Host.objects.all()
template_name = 'host_detail.html'
# Deprecated
class ShowListView(ListView):
context_object_name = 'show_list'
template_name = 'show_list.html'
def get_queryset(self):
queryset = Show.objects.filter(schedules__until__gt=date.today()).exclude(id=1).distinct()
if 'type' in self.request.GET:
type = get_object_or_404(Type, slug=self.request.GET['type'])
queryset = queryset.filter(type=type)
elif 'musicfocus' in self.request.GET:
musicfocus = get_object_or_404(MusicFocus, slug=self.request.GET['musicfocus'])
queryset = queryset.filter(musicfocus=musicfocus)
elif 'category' in self.request.GET:
category = get_object_or_404(Category, slug=self.request.GET['category'])
queryset = queryset.filter(category=category)
elif 'topic' in self.request.GET:
topic = get_object_or_404(Topic, slug=self.request.GET['topic'])
queryset = queryset.filter(topic=topic)
elif 'rtrcategory' in self.request.GET:
rtrcategory = get_object_or_404(RTRCategory, slug=self.request.GET['rtrcategory'])
queryset = queryset.filter(rtrcategory=rtrcategory)
return queryset
# Deprecated
class ShowDetailView(DetailView):
queryset = Show.objects.all().exclude(id=1)
template_name = 'show_detail.html'
# Deprecated
class TimeSlotDetailView(DetailView):
queryset = TimeSlot.objects.all()
template_name = 'timeslot_detail.html'
# Deprecated
class RecommendationsListView(ListView):
context_object_name = 'recommendation_list'
template_name = 'recommendation_list.html'
now = datetime.now()
end = now + timedelta(weeks=1)
queryset = TimeSlot.objects.filter(Q(note__isnull=False, note__status=1,
start__range=(now, end)) |
Q(show__type__slug='sondersendung',
start__range=(now, end))).order_by('start')[:20]
# Deprecated
class RecommendationsBoxView(RecommendationsListView):
template_name = 'boxes/recommendation.html'
# Deprecated
class DayScheduleView(TemplateView):
template_name = 'day_schedule.html'
def get_context_data(self, **kwargs):
year = self.kwargs.get('year', None)
month = self.kwargs.get('month', None)
day = self.kwargs.get('day', None)
if year is None and month is None and day is None:
today = datetime.combine(date.today(), time(6, 0))
else:
today = datetime.strptime('%s__%s__%s__06__00' % (year, month, day), '%Y__%m__%d__%H__%M')
tomorrow = today + timedelta(days=1)
context = super(DayScheduleView, self).get_context_data(**kwargs)
context['day'] = today
context['recommendations'] = Note.objects.filter(status=1, timeslot__start__range=(today, tomorrow))
context['default_show'] = Show.objects.get(pk=1)
timeslots = TimeSlot.objects.get_day_timeslots(today)
if 'type' in self.request.GET:
type = get_object_or_404(Type, slug=self.request.GET['type'])
context['timeslots'] = timeslots.filter(show__type=type)
elif 'musicfocus' in self.request.GET:
musicfocus = get_object_or_404(MusicFocus, slug=self.request.GET['musicfocus'])
context['timeslots'] = timeslots.filter(show__musicfocus=musicfocus)
elif 'category' in self.request.GET:
category = get_object_or_404(Category, slug=self.request.GET['category'])
context['timeslots'] = timeslots.filter(show__category=category)
elif 'topic' in self.request.GET:
topic = get_object_or_404(Topic, slug=self.request.GET['topic'])
context['topic'] = timeslots.filter(show__topic=topic)
else:
context['timeslots'] = timeslots
return context
# Deprecated
class CurrentShowBoxView(TemplateView):
context_object_name = 'recommendation_list'
template_name = 'boxes/current.html'
def get_context_data(self, **kwargs):
current_timeslot = TimeSlot.objects.get_or_create_current()
previous_timeslot = current_timeslot.get_previous_by_start()
next_timeslot = current_timeslot.get_next_by_start()
after_next_timeslot = next_timeslot.get_next_by_start()
context = super(CurrentShowBoxView, self).get_context_data(**kwargs)
context['current_timeslot'] = current_timeslot
context['previous_timeslot'] = previous_timeslot
context['next_timeslot'] = next_timeslot
context['after_next_timeslot'] = after_next_timeslot
return context
# Deprecated
class WeekScheduleView(TemplateView):
template_name = 'week_schedule.html'
def get_context_data(self, **kwargs):
year = self.kwargs.get('year', None)
week = self.kwargs.get('week', None)
if year is None and week is None:
year, week = datetime.now().strftime('%G__%V').split('__')
monday = tofirstdayinisoweek(int(year), int(week))
tuesday = monday + timedelta(days=1)
wednesday = monday + timedelta(days=2)
thursday = monday + timedelta(days=3)
friday = monday + timedelta(days=4)
saturday = monday + timedelta(days=5)
sunday = monday + timedelta(days=6)
context = super(WeekScheduleView, self).get_context_data()
context['monday'] = monday
context['tuesday'] = tuesday
context['wednesday'] = wednesday
context['thursday'] = thursday
context['friday'] = friday
context['saturday'] = saturday
context['sunday'] = sunday
context['default_show'] = Show.objects.get(pk=1)
context['monday_timeslots'] = TimeSlot.objects.get_day_timeslots(monday)
context['tuesday_timeslots'] = TimeSlot.objects.get_day_timeslots(tuesday)
context['wednesday_timeslots'] = TimeSlot.objects.get_day_timeslots(wednesday)
context['thursday_timeslots'] = TimeSlot.objects.get_day_timeslots(thursday)
context['friday_timeslots'] = TimeSlot.objects.get_day_timeslots(friday)
context['saturday_timeslots'] = TimeSlot.objects.get_day_timeslots(saturday)
context['sunday_timeslots'] = TimeSlot.objects.get_day_timeslots(sunday)
context['last_w'] = datetime.strftime(monday - timedelta(days=7), '%G/%V')
context['cur_w'] = datetime.strftime(monday, '%G/%V')
context['next_w1'] = datetime.strftime(monday + timedelta(days=7), '%G/%V')
context['next_w2'] = datetime.strftime(monday + timedelta(days=14), '%G/%V')
context['next_w3'] = datetime.strftime(monday + timedelta(days=21), '%G/%V')
context['next_w4'] = datetime.strftime(monday + timedelta(days=28), '%G/%V')
return context
class StylesView(TemplateView):
template_name = 'styles.css'
content_type = 'text/css'
def get_context_data(self, **kwargs):
context = super(StylesView, self).get_context_data(**kwargs)
context['types'] = Type.objects.filter(is_active=True)
context['musicfocus'] = MusicFocus.objects.all()
context['category'] = Category.objects.all()
context['topic'] = Topic.objects.all()
return context
# Deprecated
def json_day_schedule(request, year=None, month=None, day=None):
if year is None and month is None and day is None:
today = datetime.combine(date.today(), time(0, 0))
else:
today = datetime.strptime('%s__%s__%s__00__00' % (year, month, day), '%Y__%m__%d__%H__%M')
timeslots = TimeSlot.objects.get_24h_timeslots(today).select_related('schedule').select_related('show')
schedule = []
for ts in timeslots:
entry = {
'start': ts.start.strftime('%Y-%m-%d_%H:%M:%S'),
'end': ts.end.strftime('%Y-%m-%d_%H:%M:%S'),
'title': ts.show.name,
'id': ts.show.id,
'automation-id': -1
}
if ts.schedule.automation_id:
entry['automation-id'] = ts.schedule.automation_id
schedule.append(entry)
return HttpResponse(json.dumps(schedule, ensure_ascii=False).encode('utf8'),
content_type="application/json; charset=utf-8")
def json_playout(request):
"""
Called by
- engine (playout) to retrieve timeslots within a given timerange
Expects GET variables 'start' (date) and 'end' (date).
If start not given, it will be today
- internal calendar to retrieve all timeslots for a week
Expects GET variable 'start' (date), otherwise start will be today
If end not given, it returns all timeslots of the next 7 days
"""
if request.GET.get('start') == None:
start = datetime.combine(date.today(), time(0, 0))
else:
start = datetime.combine( datetime.strptime(request.GET.get('start'), '%Y-%m-%d').date(), time(0, 0))
if request.GET.get('end') == None:
# If no end was given, return the next week
timeslots = TimeSlot.objects.get_7d_timeslots(start).select_related('schedule').select_related('show')
else:
# Otherwise return the given timerange
end = datetime.combine( datetime.strptime(request.GET.get('end'), '%Y-%m-%d').date(), time(23, 59))
timeslots = TimeSlot.objects.get_timerange_timeslots(start, end).select_related('schedule').select_related('show')
schedule = []
for ts in timeslots:
is_repetition = ' ' + _('REP') if ts.schedule.is_repetition is 1 else ''
hosts = ', '.join(ts.show.hosts.values_list('name', flat=True))
categories = ', '.join(ts.show.category.values_list('category', flat=True))
topics = ', '.join(ts.show.topic.values_list('topic', flat=True))
musicfocus = ', '.join(ts.show.musicfocus.values_list('focus', flat=True))
languages = ', '.join(ts.show.language.values_list('name', flat=True))
rtrcategory = RTRCategory.objects.get(pk=ts.show.rtrcategory_id)
type = Type.objects.get(pk=ts.show.type_id)
classname = 'default'
if ts.playlist_id is None or ts.playlist_id == 0:
classname = 'danger'
entry = {
'id': ts.id,
'start': ts.start.strftime('%Y-%m-%dT%H:%M:%S'),
'end': ts.end.strftime('%Y-%m-%dT%H:%M:%S'),
'title': ts.show.name + is_repetition, # For JS Calendar
'automation-id': -1,
'schedule_id': ts.schedule.id,
'is_repetition': ts.is_repetition,
'playlist_id': ts.playlist_id,
'schedule_fallback_id': ts.schedule.fallback_id, # The schedule's fallback
'show_fallback_id': ts.show.fallback_id, # The show's fallback
'show_id': ts.show.id,
'show_name': ts.show.name + is_repetition,
'show_hosts': hosts,
'show_type': type.type,
'show_categories': categories,
'show_topics': topics,
'show_musicfocus': musicfocus,
'show_languages': languages,
'show_rtrcategory': rtrcategory.rtrcategory,
'station_fallback_id': 0, # TODO: The station's global fallback (might change)
'memo': ts.memo,
'className': classname,
}
if ts.schedule.automation_id:
entry['automation-id'] = ts.schedule.automation_id
schedule.append(entry)
return HttpResponse(json.dumps(schedule, ensure_ascii=False).encode('utf8'),
content_type="application/json; charset=utf-8")
def json_timeslots_specials(request):
specials = {}
shows = get_cached_shows()['shows']
for show in shows:
show['pv_id'] = -1
if show['type'] == 's':
specials[show['id']] = show
for ts in TimeSlot.objects.filter(end__gt=datetime.now(),
schedule__automation_id__in=specials.iterkeys()).select_related('show'):
automation_id = ts.schedule.automation_id
start = ts.start.strftime('%Y-%m-%d_%H:%M:%S')
end = ts.end.strftime('%Y-%m-%d_%H:%M:%S')
if specials[automation_id]['pv_id'] != -1:
if specials[automation_id]['pv_start'] < start:
continue
specials[automation_id]['pv_id'] = int(ts.show.id)
specials[automation_id]['pv_name'] = ts.show.name
specials[automation_id]['pv_start'] = start
specials[automation_id]['pv_end'] = end
return HttpResponse(json.dumps(specials, ensure_ascii=False).encode('utf8'),
content_type="application/json; charset=utf-8")
####################################################################
# REST API View Sets
####################################################################
class APIUserViewSet(viewsets.ModelViewSet):
"""
/api/v1/users Returns oneself - Superusers see all users. Only superusers may create a user (GET, POST)
/api/v1/users/1 Used for retrieving or updating a single user. Non-superusers may only update certain fields. (GET, PUT) - DELETE is prohibited for everyone
Superusers may access and update all users
"""
permission_classes = [permissions.DjangoModelPermissionsOrAnonReadOnly]
serializer_class = UserSerializer
queryset = User.objects.none()
required_scopes = ['users']
def get_queryset(self):
"""Constrain access to oneself except for superusers"""
if self.request.user.is_superuser:
return User.objects.all()
return User.objects.filter(pk=self.request.user.id)
def list(self, request):
users = self.get_queryset()
serializer = UserSerializer(users, many=True)
return Response(serializer.data)
def retrieve(self, request, pk=None):
"""Returns a single user"""
# Common users only see themselves
if not request.user.is_superuser and int(pk) != request.user.id:
return Response(status=status.HTTP_401_UNAUTHORIZED)
user = get_object_or_404(User, pk=pk)
serializer = UserSerializer(user)
return Response(serializer.data)
def create(self, request, pk=None):
"""
Create a User
Only superusers may create a user
"""
if not request.user.is_superuser:
return Response(status=status.HTTP_401_UNAUTHORIZED)
serializer = UserSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def update(self, request, pk=None):
# Common users may only edit themselves
if not request.user.is_superuser and int(pk) != request.user.id:
return Response(serializer.initial_data, status=status.HTTP_401_UNAUTHORIZED)
user = get_object_or_404(User, pk=pk)
serializer = UserSerializer(user, data=request.data, context={ 'user': request.user })
if serializer.is_valid():
serializer.save();
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def destroy(self, request, pk=None):
"""Deleting users is prohibited: Set 'is_active' to False instead"""
return Response(status=status.HTTP_401_UNAUTHORIZED)
class APIShowViewSet(viewsets.ModelViewSet):
"""
/api/v1/shows/ Returns all shows (GET, POST)
/api/v1/shows/?active=true Returns all active shows (GET)
/api/v1/shows/?host=1 Returns shows assigned to a given host (GET)
/api/v1/shows/?owner=1 Returns shows of a given owner (GET)
/api/v1/shows/1 Used for retrieving a single show or update (if owned) (GET, PUT, DELETE)
/api/v1/shows/1/notes Returns all notes to the show (GET) - POST not allowed at this level, use /shows/1/schedules/1/timeslots/1/note instead
/api/v1/shows/1/notes/1 Returns the note of the show by its ID (GET) - PUT/DELETE not allowed at this level, use /shows/1/schedules/1/timeslots/1/note/1/ instead
/api/v1/shows/1/schedules Returns all schedules of the show (GET, POST)
/api/v1/shows/1/schedules/1 Returns the schedule of the show by its ID (GET) - POST not allowed at this level, use /shows/1/schedules/ instead
/api/v1/shows/1/timeslots Returns all timeslots of the show (GET) - Timeslots may only be added by creating/updating a schedule
/api/v1/shows/1/timeslots/1 Returns the timeslot of the show (GET) - Timeslots may only be added by creating/updating a schedule
/api/v1/shows/1/timeslots?start=2017-01-01&end=2017-12-31 Returns all timeslots of the show within the given timerange (GET)
/api/v1/shows/1/timeslots/1/note Returns a note to the timeslot (one at max) (GET) - POST not allowed at this level, use /shows/1/schedules/1/timelots/1/note/ instead
/api/v1/shows/1/timeslots/1/note/1 Returns the note of the show's timeslot by its ID (GET) - PUT/DELETE not allowed at this level, use /shows/1/schedules/1/timeslots/1/note/1/ instead
Only superusers may add and delete shows
"""
queryset = Show.objects.none()
serializer_class = ShowSerializer
permission_classes = [permissions.DjangoModelPermissionsOrAnonReadOnly]
pagination_class = LimitOffsetPagination
required_scopes = ['shows']
def get_queryset(self):
shows = Show.objects.all()
'''Filters'''
if self.request.GET.get('active') == 'true':
'''Filter currently running shows'''
# Get currently running schedules to filter by first
# For single dates we test if there'll be one in the future (and ignore the until date)
# TODO: Really consider dstart? (=currently active, not just upcoming ones)
# Add limit for future?
schedules = Schedule.objects.filter( Q(rrule_id__gt=1,dstart__lte=date.today(),until__gte=date.today()) |
Q(rrule_id=1,dstart__gte=date.today())
).distinct().values_list('show_id', flat=True)
shows = Show.objects.filter(id__in=schedules)
if self.request.GET.get('owner') != None:
'''Filter shows by owner'''
shows = shows.filter(owners__in=[int(self.request.GET.get('owner'))])
if self.request.GET.get('host') != None:
'''Filter shows by host'''
shows = shows.filter(hosts__in=[int(self.request.GET.get('host'))])
return shows
def create(self, request, pk=None):
"""
Create a show
Only superusers may create a show
"""
if not request.user.is_superuser:
return Response(status=status.HTTP_401_UNAUTHORIZED)
serializer = ShowSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def retrieve(self, request, pk=None):
"""Returns a single show"""
show = get_object_or_404(Show, pk=pk)
serializer = ShowSerializer(show)
return Response(serializer.data)
def update(self, request, pk=None):
"""
Update a show
Common users may only update shows they own
"""
if not Show.is_editable(self, pk):
return Response(status=status.HTTP_401_UNAUTHORIZED)
show = get_object_or_404(Show, pk=pk)
serializer = ShowSerializer(show, data=request.data, context={ 'user': request.user })
if serializer.is_valid():
# Common users mustn't edit the show's name
if not request.user.is_superuser:
serializer.validated_data['name'] = show.name
serializer.save();
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def destroy(self, request, pk=None):
"""
Delete a show
Only superusers may delete shows
"""
if not request.user.is_superuser:
return Response(status=status.HTTP_401_UNAUTHORIZED)
show = get_object_or_404(Show, pk=pk)
Show.objects.get(pk=pk).delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class APIScheduleViewSet(viewsets.ModelViewSet):
"""
/api/v1/schedules/ Returns schedules (GET) - POST not allowed at this level
/api/v1/schedules/1 Returns the given schedule (GET) - POST not allowed at this level
/api/v1/shows/1/schedules Returns schedules of the show (GET, POST)
/api/v1/shows/1/schedules/1 Returns schedules by its ID (GET, PUT, DELETE)
Only superusers may create and update schedules
"""
queryset = Schedule.objects.none()
serializer_class = ScheduleSerializer
permission_classes = [permissions.DjangoModelPermissionsOrAnonReadOnly]
required_scopes = ['schedules']
def get_queryset(self):
show_pk = self.kwargs['show_pk'] if 'show_pk' in self.kwargs else None
if show_pk != None:
return Schedule.objects.filter(show=show_pk)
return Schedule.objects.all()
def list(self, request, show_pk=None, pk=None):
"""List Schedules of a show"""
schedules = self.get_queryset()
serializer = ScheduleSerializer(schedules, many=True)
return Response(serializer.data)
def retrieve(self, request, pk=None, show_pk=None):
if show_pk != None:
schedule = get_object_or_404(Schedule, pk=pk, show=show_pk)
else:
schedule = get_object_or_404(Schedule, pk=pk)
serializer = ScheduleSerializer(schedule)
return Response(serializer.data)
def create(self, request, pk=None, show_pk=None):
"""
Create a schedule, generate timeslots, test for collisions and resolve them including notes
Only superusers may add schedules
TODO: Perhaps directly insert into database if no conflicts found
"""
# Only allow creating when calling /shows/1/schedules/
if show_pk == None or not request.user.is_superuser:
return Response(status=status.HTTP_401_UNAUTHORIZED)
# The schedule dict is mandatory
if not 'schedule' in request.data:
return Response(status=status.HTTP_400_BAD_REQUEST)
# First create submit -> return projected timeslots and collisions
if not 'solutions' in request.data:
return Response(Schedule.make_conflicts(request.data['schedule'], pk, show_pk))
# Otherwise try to resolve
resolution = Schedule.resolve_conflicts(request.data, pk, show_pk)
# If resolution went well
if not 'projected' in resolution:
return Response(resolution, status=status.HTTP_201_CREATED)
# Otherwise return conflicts
return Response(resolution)
def update(self, request, pk=None, show_pk=None):
"""
Update a schedule, generate timeslots, test for collisions and resolve them including notes
Only superusers may update schedules
"""
# Only allow updating when calling /shows/1/schedules/1
if show_pk == None or not request.user.is_superuser:
return Response(status=status.HTTP_401_UNAUTHORIZED)
schedule = get_object_or_404(Schedule, pk=pk, show=show_pk)
# The schedule dict is mandatory
if not 'schedule' in request.data:
return Response(status=status.HTTP_400_BAD_REQUEST)
# First update submit -> return projected timeslots and collisions
if not 'solutions' in request.data:
# TODO: If nothing else than fallback_id, automation_id or is_repetition changed -> just save and don't do anything
return Response(Schedule.make_conflicts(request.data['schedule'], pk, show_pk))
# Otherwise try to resolve
resolution = Schedule.resolve_conflicts(request.data, pk, show_pk)
# If resolution went well
if not 'projected' in resolution:
return Response(resolution, status=status.HTTP_200_OK)
# Otherwise return conflicts
return Response(resolution)
def destroy(self, request, pk=None, show_pk=None):
"""
Delete a schedule
Only superusers may delete schedules
"""
# Only allow deleting when calling /shows/1/schedules/1
if show_pk == None or not request.user.is_superuser:
return Response(status=status.HTTP_401_UNAUTHORIZED)
schedule = get_object_or_404(Schedule, pk=pk)
Schedule.objects.get(pk=pk).delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class APITimeSlotViewSet(viewsets.ModelViewSet):
"""
/api/v1/timeslots Returns timeslots of the next 60 days (GET) - Timeslots may only be added by creating/updating a schedule
/api/v1/timeslots/1 Returns the given timeslot (GET) - PUT/DELETE not allowed at this level
/api/v1/timeslots/?start=2017-01-01&end=2017-02-01 Returns timeslots within the given timerange (GET)
/api/v1/shows/1/timeslots Returns timeslots of the show (GET, POST)
/api/v1/shows/1/timeslots/1 Returns a timeslots by its ID (GET, PUT, DELETE)
/api/v1/shows/1/timeslots?start=2017-01-01&end=2017-02-01 Returns timeslots of the show within the given timerange
/api/v1/shows/1/schedules/1/timeslots Returns all timeslots of the schedule (GET, POST)
/api/v1/shows/1/schedules/1/timeslots/1 Returns a timeslot by its ID (GET, PUT, DELETE)
/api/v1/shows/1/schedules/1/timeslots?start=2017-01-01&end=2017-02-01 Returns all timeslots of the schedule within the given timerange
"""
permission_classes = [permissions.DjangoModelPermissionsOrAnonReadOnly]
serializer_class = TimeSlotSerializer
pagination_class = LimitOffsetPagination
queryset = TimeSlot.objects.none()
required_scopes = ['timeslots']
def get_queryset(self):
show_pk = self.kwargs['show_pk'] if 'show_pk' in self.kwargs else None
schedule_pk = self.kwargs['schedule_pk'] if 'schedule_pk' in self.kwargs else None
'''Filters'''
# Return next 60 days by default
start = datetime.combine(date.today(), time(0, 0))
end = start + timedelta(days=60)
if self.request.GET.get('start') and self.request.GET.get('end'):
start = datetime.combine( datetime.strptime(self.request.GET.get('start'), '%Y-%m-%d').date(), time(0, 0))
end = datetime.combine( datetime.strptime(self.request.GET.get('end'), '%Y-%m-%d').date(), time(23, 59))
'''Endpoints'''
#
# /shows/1/schedules/1/timeslots/
#
# Returns timeslots of the given show and schedule
#
if show_pk != None and schedule_pk != None:
return TimeSlot.objects.filter(show=show_pk, schedule=schedule_pk, start__gte=start, end__lte=end).order_by('start')
#
# /shows/1/timeslots/
#
# Returns timeslots of the show
#
elif show_pk != None and schedule_pk == None:
return TimeSlot.objects.filter(show=show_pk, start__gte=start, end__lte=end).order_by('start')
#
# /timeslots/
#
# Returns all timeslots
#
else:
return TimeSlot.objects.filter(start__gte=start, end__lte=end).order_by('start')
def retrieve(self, request, pk=None, schedule_pk=None, show_pk=None):
if show_pk != None:
timeslot = get_object_or_404(TimeSlot, pk=pk, show=show_pk)
else:
timeslot = get_object_or_404(TimeSlot, pk=pk)
serializer = TimeSlotSerializer(timeslot)
return Response(serializer.data)
def create(self, request):
"""
Timeslots may only be created by adding/updating schedules
TODO: Adding single timeslot which fits to schedule?
"""
return Response(status=HTTP_401_UNAUTHORIZED)
def update(self, request, pk=None, schedule_pk=None, show_pk=None):
"""Link a playlist_id to a timeslot"""
timeslot = get_object_or_404(TimeSlot, pk=pk, schedule=schedule_pk, show=show_pk)
# Update is only allowed when calling /shows/1/schedules/1/timeslots/1 and if user owns the show
if schedule_pk == None or show_pk == None or not Show.is_editable(self, timeslot.show_id):
return Response(status=status.HTTP_401_UNAUTHORIZED)
serializer = TimeSlotSerializer(timeslot, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def destroy(self, request, pk=None, schedule_pk=None, show_pk=None):
"""
Delete a timeslot
Only superusers may delete timeslots
"""
# Only allow when calling endpoint starting with /shows/1/...
if show_pk == None:
return Response(status=status.HTTP_400_BAD_REQUEST)
if not request.user.is_superuser:
return Response(status=status.HTTP_401_UNAUTHORIZED)
timeslot = get_object_or_404(TimeSlot, pk=pk)
TimeSlot.objects.get(pk=pk).delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class APINoteViewSet(viewsets.ModelViewSet):
"""
/api/v1/notes/ Returns all notes (GET) - POST not allowed at this level
/ap1/v1/notes/1 Returns a single note (if owned) (GET) - PUT/DELETE not allowed at this level
/api/v1/notes/?ids=1,2,3,4,5 Returns given notes (if owned) (GET)
/api/v1/notes/?host=1 Returns notes assigned to a given host (GET)
/api/v1/notes/?owner=1 Returns notes editable by a given user (GET)
/api/v1/notes/?user=1 Returns notes created by a given user (GET)
/api/v1/shows/1/notes Returns all notes of a show (GET) - POST not allowed at this level
/api/v1/shows/1/notes/1 Returns a note by its ID (GET) - PUT/DELETE not allowed at this level
/api/v1/shows/1/timeslots/1/note/ Returns a note of the timeslot (GET) - POST not allowed at this level
/api/v1/shows/1/timeslots/1/note/1 Returns a note by its ID (GET) - PUT/DELETE not allowed at this level
/api/v1/shows/1/schedules/1/timeslots/1/note Returns a note to the timeslot (GET, POST) - Only one note allowed per timeslot
/api/v1/shows/1/schedules/1/timeslots/1/note/1 Returns a note by its ID (GET, PUT, DELETE)
Superusers may access and update all notes
"""
queryset = Note.objects.none()
serializer_class = NoteSerializer
permission_classes = [permissions.DjangoModelPermissionsOrAnonReadOnly]
pagination_class = LimitOffsetPagination
required_scopes = ['notes']
def get_queryset(self):
pk = self.kwargs['pk'] if 'pk' in self.kwargs else None
timeslot_pk = self.kwargs['timeslot_pk'] if 'timeslot_pk' in self.kwargs else None
show_pk = self.kwargs['show_pk'] if 'show_pk' in self.kwargs else None
'''Endpoints'''
#
# /shows/1/schedules/1/timeslots/1/note
# /shows/1/timeslots/1/note
#
# Return a note to the timeslot
#
if show_pk != None and timeslot_pk != None:
notes = Note.objects.filter(show=show_pk, timeslot=timeslot_pk)
#
# /shows/1/notes
#
# Returns notes to the show
#
elif show_pk != None and timeslot_pk == None:
notes = Note.objects.filter(show=show_pk)
#
# /notes
#
# Returns all notes
#
else:
notes = Note.objects.all()
'''Filters'''
if self.request.GET.get('ids') != None:
'''Filter notes by their IDs'''
note_ids = self.request.GET.get('ids').split(',')
notes = notes.filter(id__in=note_ids)
if self.request.GET.get('host') != None:
'''Filter notes by host'''
notes = notes.filter(host=int(self.request.GET.get('host')))
if self.request.GET.get('owner') != None:
'''Filter notes by show owner: all notes the user may edit'''
shows = Show.objects.filter(owners=int(self.request.GET.get('owner')))
notes = notes.filter(show__in=shows)
if self.request.GET.get('user') != None:
'''Filter notes by their creator'''
notes = notes.filter(user=int(self.request.GET.get('user')))
return notes
def create(self, request, pk=None, timeslot_pk=None, schedule_pk=None, show_pk=None):
"""Create a note"""
# Only create a note if show_id, timeslot_id and schedule_id is given
if show_pk == None or schedule_pk == None or timeslot_pk == None:
return Response(status=status.HTTP_400_BAD_REQUEST)
if not Show.is_editable(self, show_pk):
return Response(status=status.HTTP_401_UNAUTHORIZED)
serializer = NoteSerializer(data=request.data, context={ 'user_id': request.user.id })
if serializer.is_valid():
# Don't assign a host the user mustn't edit
if not Host.is_editable(self, request.data['host']) or request.data['host'] == None:
serializer.validated_data['host'] = None
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def retrieve(self, request, pk=None, timeslot_pk=None, schedule_pk=None, show_pk=None):
"""
Returns a single note
Called by:
/notes/1
/shows/1/notes/1
/shows/1/timeslots/1/note/1
/shows/1/schedules/1/timeslots/1/note/1
"""
#
# /shows/1/notes/1
#
# Returns a note to a show
#
if show_pk != None and timeslot_pk == None and schedule_pk == None:
note = get_object_or_404(Note, pk=pk, show=show_pk)
#
# /shows/1/timeslots/1/note/1
# /shows/1/schedules/1/timeslots/1/note/1
#
# Return a note to a timeslot
#
elif show_pk != None and timeslot_pk != None:
note = get_object_or_404(Note, pk=pk, show=show_pk, timeslot=timeslot_pk)
#
# /notes/1
#
# Returns the given note
#
else:
note = get_object_or_404(Note, pk=pk)
serializer = NoteSerializer(note)
return Response(serializer.data)
def update(self, request, pk=None, show_pk=None, schedule_pk=None, timeslot_pk=None):
# Allow PUT only when calling /shows/1/schedules/1/timeslots/1/note/1
if show_pk == None or schedule_pk == None or timeslot_pk == None:
return Response(status=status.HTTP_400_BAD_REQUEST)
note = get_object_or_404(Note, pk=pk, timeslot=timeslot_pk, show=show_pk)
# Commons users may only edit notes of shows they own
if not Note.is_editable(self, note_id):
return Response(status=status.HTTP_401_UNAUTHORIZED)
serializer = NoteSerializer(note, data=request.data)
if serializer.is_valid():
# Don't assign a host the user mustn't edit. Reassign the original value instead
if not Host.is_editable(self, request.data['host']) and request.data['host'] != None:
serializer.validated_data['host'] = Host.objects.filter(pk=note.host_id)[0]
serializer.save();
return Response(serializer.data)
return Response(status=status.HTTP_400_BAD_REQUEST)
def destroy(self, request, pk=None):
note = get_object_or_404(Note, pk=pk)
if Note.is_editable(self, note.id):
Note.objects.get(pk=pk).delete()
return Response(status=status.HTTP_204_NO_CONTENT)
return Response(status=status.HTTP_401_UNAUTHORIZED)
class APICategoryViewSet(viewsets.ModelViewSet):
"""
/api/v1/categories/ Returns all categories (GET, POST)
/api/v1/categories/?active=true Returns all active categories (GET)
/api/v1/categories/1 Returns a category by its ID (GET, PUT, DELETE)
"""
queryset = Category.objects.all()
serializer_class = CategorySerializer
permission_classes = [permissions.DjangoModelPermissionsOrAnonReadOnly]
required_scopes = ['categories']
def get_queryset(self):
'''Filters'''
if self.request.GET.get('active') == 'true':
return Category.objects.filter(is_active=True)
return Category.objects.all()
class APITypeViewSet(viewsets.ModelViewSet):
"""
/api/v1/types/ Returns all types (GET, POST)
/api/v1/types/?active=true Returns all active types (GET)
/api/v1/types/1 Returns a type by its ID (GET, PUT, DELETE)
"""
queryset = Type.objects.all()
serializer_class = TypeSerializer
permission_classes = [permissions.DjangoModelPermissionsOrAnonReadOnly]
required_scopes = ['types']
def get_queryset(self):
'''Filters'''
if self.request.GET.get('active') == 'true':
return Type.objects.filter(is_active=True)
return Type.objects.all()
class APITopicViewSet(viewsets.ModelViewSet):
"""
/api/v1/topics/ Returns all topics (GET, POST)
/api/v1/topics/?active=true Returns all active topics (GET)
/api/v1/topics/1 Returns a topic by its ID (GET, PUT, DELETE)
"""
queryset = Topic.objects.all()
serializer_class = TopicSerializer
permission_classes = [permissions.DjangoModelPermissionsOrAnonReadOnly]
required_scopes = ['topics']
def get_queryset(self):
'''Filters'''
if self.request.GET.get('active') == 'true':
return Topic.objects.filter(is_active=True)
return Topic.objects.all()
class APIMusicFocusViewSet(viewsets.ModelViewSet):
"""
/api/v1/musicfocus/ Returns all musicfocuses (GET, POST)
/api/v1/musicfocus/?active=true Returns all active musicfocuses (GET)
/api/v1/musicfocus/1 Returns a musicfocus by its ID (GET, PUT, DELETE)
"""
queryset = MusicFocus.objects.all()
serializer_class = MusicFocusSerializer
permission_classes = [permissions.DjangoModelPermissionsOrAnonReadOnly]
required_scopes = ['musicfocus']
def get_queryset(self):
'''Filters'''
if self.request.GET.get('active') == 'true':
return MusicFocus.objects.filter(is_active=True)
return MusicFocus.objects.all()
class APIRTRCategoryViewSet(viewsets.ModelViewSet):
"""
/api/v1/rtrcategories/ Returns all rtrcategories (GET, POST)
/api/v1/rtrcategories/?active=true Returns all active rtrcategories (GET)
/api/v1/rtrcategories/1 Returns a rtrcategory by its ID (GET, PUT, DELETE)
"""
queryset = RTRCategory.objects.all()
serializer_class = RTRCategorySerializer
permission_classes = [permissions.DjangoModelPermissionsOrAnonReadOnly]
required_scopes = ['rtrcategories']
def get_queryset(self):
'''Filters'''
if self.request.GET.get('active') == 'true':
return RTRCategory.objects.filter(is_active=True)
return RTRCategory.objects.all()
class APILanguageViewSet(viewsets.ModelViewSet):
"""
/api/v1/languages/ Returns all languages (GET, POST)
/api/v1/languages/?active=true Returns all active languages (GET)
/api/v1/languages/1 Returns a language by its ID (GET, PUT, DELETE)
"""
queryset = Language.objects.all()
serializer_class = LanguageSerializer
permission_classes = [permissions.DjangoModelPermissionsOrAnonReadOnly]
required_scopes = ['languages']
def get_queryset(self):
'''Filters'''
if self.request.GET.get('active') == 'true':
return Language.objects.filter(is_active=True)
return Language.objects.all()
class APIHostViewSet(viewsets.ModelViewSet):
"""
/api/v1/hosts/ Returns all hosts (GET, POST)
/api/v1/hosts/?active=true Returns all active hosts (GET)
/api/v1/hosts/1 Returns a host by its ID (GET, PUT, DELETE)
"""
queryset = Host.objects.all()
serializer_class = HostSerializer
permission_classes = [permissions.DjangoModelPermissionsOrAnonReadOnly]
required_scopes = ['hosts']
def get_queryset(self):
'''Filters'''
if self.request.GET.get('active') == 'true':
return Host.objects.filter(is_active=True)
return Host.objects.all()
| 38.647425 | 252 | 0.636165 |
9b0414f20ac30c39c640b31c79da4965694083fa
| 1,117 |
py
|
Python
|
news_pipeline/queue_helper.py
|
yanyang729/news-recommendation
|
0cd2e2e84f94507a339077753e367cf8bef9e36e
|
[
"MIT"
] | 4 |
2017-11-16T15:00:23.000Z
|
2018-03-08T16:28:26.000Z
|
news_pipeline/queue_helper.py
|
yanyang729/news-recommendation
|
0cd2e2e84f94507a339077753e367cf8bef9e36e
|
[
"MIT"
] | null | null | null |
news_pipeline/queue_helper.py
|
yanyang729/news-recommendation
|
0cd2e2e84f94507a339077753e367cf8bef9e36e
|
[
"MIT"
] | null | null | null |
import os
import sys
# import common package in parent directory
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'common'))
import news_api_client
from cloudAMQP_client import CloundAMQPClient as CloudAMQPClient
DEDUPE_NEWS_TASK_QUEUE_URL = "amqp://rcsdmawz:[email protected]/rcsdmawz"
DEDUPE_NEWS_TASK_QUEUE_NAME = "news-deduper"
SCRAPE_NEWS_TASK_QUEUE_URL = "amqp://holajbod:[email protected]/holajbod"
SCRAPE_NEWS_TASK_QUEUE_NAME = "news-task"
def clearQueue(queue_url, queue_name):
scrape_news_queue_client = CloudAMQPClient(queue_url, queue_name)
num_of_messages = 0
while True:
if scrape_news_queue_client is not None:
msg = scrape_news_queue_client.getMessage()
if msg is None:
print "Cleared %d messages." % num_of_messages
return
num_of_messages += 1
if __name__ == "__main__":
clearQueue(SCRAPE_NEWS_TASK_QUEUE_URL, SCRAPE_NEWS_TASK_QUEUE_NAME)
clearQueue(DEDUPE_NEWS_TASK_QUEUE_URL, DEDUPE_NEWS_TASK_QUEUE_NAME)
| 36.032258 | 112 | 0.761862 |
47fd8afe68f42294edde0122b590d7c87b71ea92
| 831 |
py
|
Python
|
gan/progan/auxiliary.py
|
valentingol/GANJax
|
ebcb8f4412277da2d9bda80282c2842d111bf393
|
[
"MIT"
] | 9 |
2021-11-20T18:25:37.000Z
|
2021-12-13T23:32:35.000Z
|
gan/progan/auxiliary.py
|
valentingol/GANJax
|
ebcb8f4412277da2d9bda80282c2842d111bf393
|
[
"MIT"
] | 4 |
2021-12-04T15:30:58.000Z
|
2022-01-20T13:13:32.000Z
|
gan/progan/auxiliary.py
|
valentingol/GANJax
|
ebcb8f4412277da2d9bda80282c2842d111bf393
|
[
"MIT"
] | 3 |
2022-01-18T00:02:30.000Z
|
2022-03-10T09:22:43.000Z
|
from jax import jit, numpy as jnp, random
def upsample_2d(x, scale):
x = jnp.repeat(x, scale, axis=1)
x = jnp.repeat(x, scale, axis=2)
return x
@jit
def add_batch_std(x):
shape = x.shape
mean = jnp.mean(x, axis=0, keepdims=True)
# Variance over the batch:
var = jnp.mean(jnp.square(x - mean), axis=0, keepdims=True) + 1e-8
# Mean of std across the channels and pixels:
mean_std = jnp.mean(jnp.sqrt(var))
mean_std = jnp.tile(mean_std, (shape[0], shape[1], shape[2], 1))
x = jnp.concatenate((x, mean_std), axis=-1)
return x
@jit
def pixel_norm(x):
x_2_mean = jnp.mean(jnp.square(x), axis=-1, keepdims=True) + 1e-8
norm = jnp.sqrt(x_2_mean)
x = x / norm
return x
if __name__ == '__main__':
key = random.PRNGKey(0)
X = random.normal(key, (32, 10, 10, 3))
| 24.441176 | 70 | 0.619735 |
c7ddda2f0a5ae68def9e2f41f0b6e65027780863
| 386 |
py
|
Python
|
account/migrations/0007_auto_20190809_2257.py
|
jongwooo/TruckWassup
|
553a940062c1aa13bc26bb99b8357470cc21dfdb
|
[
"MIT"
] | 1 |
2020-07-23T03:22:47.000Z
|
2020-07-23T03:22:47.000Z
|
account/migrations/0007_auto_20190809_2257.py
|
jongwooo/TruckWassup
|
553a940062c1aa13bc26bb99b8357470cc21dfdb
|
[
"MIT"
] | 7 |
2020-06-05T22:47:48.000Z
|
2022-03-11T23:55:47.000Z
|
account/migrations/0007_auto_20190809_2257.py
|
jongwooo/TruckWassup
|
553a940062c1aa13bc26bb99b8357470cc21dfdb
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.4 on 2019-08-09 13:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0006_profile_company'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='company',
field=models.CharField(max_length=255),
),
]
| 20.315789 | 51 | 0.598446 |
eeada57e5fe392e65475d6cc827875665a1715ef
| 2,522 |
py
|
Python
|
src/tf_transformers/layers/mask/cross_attention_mask.py
|
s4sarath/tf-transformers
|
361f7b01c7816034ddfc8661f8b6a967835bc1de
|
[
"Apache-2.0"
] | 2 |
2021-03-31T17:48:16.000Z
|
2021-08-22T11:52:19.000Z
|
src/tf_transformers/layers/mask/cross_attention_mask.py
|
Vibha111094/tf-transformers
|
f26d440a4de0557e0e481279bfd70a732aaa8825
|
[
"Apache-2.0"
] | null | null | null |
src/tf_transformers/layers/mask/cross_attention_mask.py
|
Vibha111094/tf-transformers
|
f26d440a4de0557e0e481279bfd70a732aaa8825
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras layer that creates a self-attention mask."""
# from __future__ import google_type_annotations
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from tf_transformers.utils import tf_utils
@tf.keras.utils.register_keras_serializable(package="Text")
class CrossAttentionMask(tf.keras.layers.Layer):
"""Create 3D attention mask from a 2D tensor mask.
inputs[0]: from_tensor: 2D or 3D Tensor of shape
[batch_size, from_seq_length, ...].
inputs[1]: to_mask: int32 Tensor of shape [batch_size, to_seq_length].
Returns:
float Tensor of shape [batch_size, from_seq_length, to_seq_length].
"""
def __init__(self, **kwargs):
# We need to have a default dtype of float32, since the inputs (which Keras
# usually uses to infer the dtype) will always be int32.
if "dtype" not in kwargs:
kwargs["dtype"] = "float32"
super(CrossAttentionMask, self).__init__(**kwargs)
self._dtype = kwargs["dtype"]
def call(self, inputs):
to_mask = inputs[1]
batch_size, from_seq_length = tf_utils.get_shape_list(inputs[0])
_, to_seq_length = tf_utils.get_shape_list(inputs[1])
to_mask = tf.cast(tf.reshape(to_mask, [batch_size, 1, to_seq_length]), dtype=self._dtype)
# We don't assume that `from_tensor` is a mask (although it could be). We
# don't actually care if we attend *from* padding tokens (only *to* padding)
# tokens so we create a tensor of all ones.
#
# `broadcast_ones` = [batch_size, from_seq_length, 1]
broadcast_ones = tf.ones(shape=[batch_size, from_seq_length, 1], dtype=self._dtype)
# Here we broadcast along two dimensions to create the mask.
mask = broadcast_ones * to_mask
return mask
| 39.40625 | 97 | 0.681205 |
341e245b9afcbc71f451b7d438c82e51c93ecd88
| 441 |
py
|
Python
|
api/model/crud/crud.py
|
PatrickVuscan/React-Native-Fast-API-Pizzeria
|
9a0747b6db77ed7d5a2e9795e9a159da6e490ccf
|
[
"MIT"
] | 1 |
2020-12-20T20:40:38.000Z
|
2020-12-20T20:40:38.000Z
|
api/model/crud/crud.py
|
PatrickVuscan/React-Native-Fast-API-Pizzeria
|
9a0747b6db77ed7d5a2e9795e9a159da6e490ccf
|
[
"MIT"
] | null | null | null |
api/model/crud/crud.py
|
PatrickVuscan/React-Native-Fast-API-Pizzeria
|
9a0747b6db77ed7d5a2e9795e9a159da6e490ccf
|
[
"MIT"
] | null | null | null |
"""Aggregate all model CRUD operations."""
from .pizza_crud import SqlPizzaCRUD
from .customer_crud import SqlCustomerCRUD
from .drink_crud import SqlDrinkCRUD
from .topping_crud import SqlToppingCRUD
from .order_crud import SqlOrderCRUD
# pylint: disable=too-many-ancestors
class Crud(SqlPizzaCRUD, SqlCustomerCRUD, SqlDrinkCRUD, SqlToppingCRUD, SqlOrderCRUD):
"""A class containing all Crud operations for all models in database."""
| 36.75 | 86 | 0.814059 |
f46a022a88ff92d9e7d5aae48a4800a53a7d2295
| 713 |
py
|
Python
|
Snippets/20220126.py
|
DZL1943/geeknotes
|
2ea9f7c1048784dc4bf879105d0053bf2b8cd619
|
[
"MIT"
] | null | null | null |
Snippets/20220126.py
|
DZL1943/geeknotes
|
2ea9f7c1048784dc4bf879105d0053bf2b8cd619
|
[
"MIT"
] | null | null | null |
Snippets/20220126.py
|
DZL1943/geeknotes
|
2ea9f7c1048784dc4bf879105d0053bf2b8cd619
|
[
"MIT"
] | null | null | null |
# 用 python 处理命令输出的一个例子
# date: 20220126
import os
def _f():
r = os.popen("grep -o 'hs\.[a-zA-Z]*' ~/.hammerspoon/init.lua | sort|uniq")
lines = r.read().splitlines()
return lines
lines = [
'hs.alert',
'hs.application',
'hs.canvas',
'hs.configdir',
'hs.eventtap',
'hs.fnutils',
'hs.hints',
'hs.hotkey',
'hs.inspect',
'hs.loadSpoon',
'hs.logger',
'hs.mouse',
'hs.notify',
'hs.pathwatcher',
'hs.reload',
'hs.screen',
'hs.spoons',
'hs.timer'
]
# 目标是处理成 local x = require('hs.x')
dlines = []
for line in lines:
dline = "local %s = require('%s')" %(line.split('.')[1], line)
dlines.append(dline)
print('\n'.join(dlines))
| 17.825 | 79 | 0.54979 |
979621339ff06f4fc345b265c2a0b55cc585dbef
| 1,106 |
py
|
Python
|
public/code/saveMetaData.py
|
xrachitx/audioRecordJS
|
bc16d736a4ccb613b8032b927cea48b2a7185447
|
[
"Apache-2.0"
] | null | null | null |
public/code/saveMetaData.py
|
xrachitx/audioRecordJS
|
bc16d736a4ccb613b8032b927cea48b2a7185447
|
[
"Apache-2.0"
] | null | null | null |
public/code/saveMetaData.py
|
xrachitx/audioRecordJS
|
bc16d736a4ccb613b8032b927cea48b2a7185447
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import os
import json
parser = argparse.ArgumentParser()
parser.add_argument("fileAddr",help = "File name for the recorded audio",type= str)
parser.add_argument("txt",help = "txt file details",type= str)
parser.add_argument("age",help = "Age",type= str)
parser.add_argument("gender",help = "Gender",type= str)
parser.add_argument("country",help = "nationality",type= str)
print("fsdkhujidksfirfjdsklm")
args = parser.parse_args()
f = args.fileAddr
txt = args.txt
age = args.age
gender = args.gender
country = args.country
path = os.getcwd()+ f
txtFile = open(path,"w")
txtFile.write(txt)
pathMetaData = os.getcwd()+"/public/uploads/metadata.json"
jsonFile = open(pathMetaData,"r+")
metaData = json.load(jsonFile)
filename = f.split("/")[-1]
metaData[filename[:-3]] = {}
print(path)
metaData[filename[:-3]]["age"] = age
metaData[filename[:-3]]["gender"] = gender
metaData[filename[:-3]]["country"] = country
# json.dump(metaData,jsonFile)
with open(pathMetaData, 'w') as fp:
json.dump(metaData, fp)
# # metaData.write("\n"+filename+","+age+","+gender+","+country)
# print("jatham")
| 29.105263 | 83 | 0.708861 |
804abb5e8b92340b5605f522e45d3b94474670ae
| 3,240 |
py
|
Python
|
orm_sqlite/model.py
|
yzhang-dev/ORM-SQLite
|
ec0e67490840601bc3ba4b5e5a694e919aac02be
|
[
"MIT"
] | null | null | null |
orm_sqlite/model.py
|
yzhang-dev/ORM-SQLite
|
ec0e67490840601bc3ba4b5e5a694e919aac02be
|
[
"MIT"
] | null | null | null |
orm_sqlite/model.py
|
yzhang-dev/ORM-SQLite
|
ec0e67490840601bc3ba4b5e5a694e919aac02be
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .manager import Manager, classonlymethod
from .logger import child_logger
logger = child_logger('orm_sqlite.model')
class Field(object):
def __init__(self, name, type, default, primary_key):
self.name = name
self.type = type
self.default = default
self.primary_key = primary_key
def __str__(self):
return '<{}, {} {}>'.format(
self.__class__.__name__, self.name, self.type
)
# string data types
class StringField(Field):
def __init__(self, name=None, default=''):
super().__init__(name, 'TEXT', default, False)
# numeric data types
class IntegerField(Field):
def __init__(self, name=None, default=0, primary_key=False):
super().__init__(name, 'INTEGER', default, primary_key)
class FloatField(Field):
def __init__(self, name=None, default=0.0):
super().__init__(name, 'REAL', default, False)
class ModelMetaclass(type):
def __new__(cls, name, bases, attrs):
if name == 'Model':
return super().__new__(cls, name, bases, attrs)
table = attrs.get('__table__', None) or name.lower()
logger.info('model: {} (table: {}) found'.format(name, table))
mappings = dict()
primary_key = None
fields = list()
columns = list()
for attr_name, attr_value in attrs.items():
if isinstance(attr_value, Field):
logger.info('mapping: {} ==> {} found'.format(attr_name, attr_value))
mappings[attr_name] = attr_value
if attr_value.primary_key:
if primary_key is not None:
raise RuntimeError('Duplicate primary key for field: {}'.format(attr_name))
primary_key = attr_name
else:
fields.append(attr_name)
columns.append('{} {}'.format(attr_name, attr_value.type))
if primary_key is None:
raise RuntimeError('Primary key not found.')
for attr_name in mappings.keys():
attrs.pop(attr_name)
attrs['__table__'] = table
attrs['__mappings__'] = mappings
attrs['__primary_key__'] = primary_key
attrs['__fields__'] = fields
attrs['__columns__'] = columns
attrs['__placeholders__'] = ['?' for _ in range(len(fields))]
new_cls = super().__new__(cls, name, bases, attrs)
manager = Manager()
manager.as_attribute(new_cls, name='objects')
return new_cls
class Model(dict, metaclass=ModelMetaclass):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@classonlymethod
def exists(cls):
return cls.objects.table_exists()
@classonlymethod
def create(cls):
return cls.objects.create_table()
@classonlymethod
def drop(cls):
return cls.objects.drop_table()
def save(self):
if not self.__class__.exists():
self.__class__.create()
return self.__class__.objects.add(self)
def update(self):
return self.__class__.objects.update(self)
def delete(self):
return self.__class__.objects.remove(self)
| 30.566038 | 99 | 0.60463 |
3c5379aa59e8b14bda047687f04505a16ea16754
| 1,117 |
py
|
Python
|
tests/components/august/test_camera.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 30,023 |
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
tests/components/august/test_camera.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 31,101 |
2020-03-02T13:00:16.000Z
|
2022-03-31T23:57:36.000Z
|
tests/components/august/test_camera.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 11,956 |
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""The camera tests for the august platform."""
from http import HTTPStatus
from unittest.mock import patch
from homeassistant.const import STATE_IDLE
from tests.components.august.mocks import (
_create_august_with_devices,
_mock_doorbell_from_fixture,
)
async def test_create_doorbell(hass, hass_client_no_auth):
"""Test creation of a doorbell."""
doorbell_one = await _mock_doorbell_from_fixture(hass, "get_doorbell.json")
with patch.object(
doorbell_one, "async_get_doorbell_image", create=False, return_value="image"
):
await _create_august_with_devices(hass, [doorbell_one])
camera_k98gidt45gul_name_camera = hass.states.get(
"camera.k98gidt45gul_name_camera"
)
assert camera_k98gidt45gul_name_camera.state == STATE_IDLE
url = hass.states.get("camera.k98gidt45gul_name_camera").attributes[
"entity_picture"
]
client = await hass_client_no_auth()
resp = await client.get(url)
assert resp.status == HTTPStatus.OK
body = await resp.text()
assert body == "image"
| 30.189189 | 84 | 0.702775 |
5a33052c4472c357bdc4d6909727b7ba60d5047b
| 13,248 |
py
|
Python
|
tests/kafkatest/services/console_consumer.py
|
testadmin1-levelops/kafka
|
87734144c4f73dfa4838dbdefcf8b89a1c3b4f69
|
[
"Apache-2.0"
] | null | null | null |
tests/kafkatest/services/console_consumer.py
|
testadmin1-levelops/kafka
|
87734144c4f73dfa4838dbdefcf8b89a1c3b4f69
|
[
"Apache-2.0"
] | null | null | null |
tests/kafkatest/services/console_consumer.py
|
testadmin1-levelops/kafka
|
87734144c4f73dfa4838dbdefcf8b89a1c3b4f69
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.utils.util import wait_until
from ducktape.services.background_thread import BackgroundThreadService
from kafkatest.services.kafka.directory import kafka_dir
from kafkatest.services.kafka.version import TRUNK, LATEST_0_8_2, LATEST_0_9, V_0_10_0_0
from kafkatest.services.monitor.jmx import JmxMixin
import itertools
import os
import subprocess
"""
0.8.2.1 ConsoleConsumer options
The console consumer is a tool that reads data from Kafka and outputs it to standard output.
Option Description
------ -----------
--blacklist <blacklist> Blacklist of topics to exclude from
consumption.
--consumer.config <config file> Consumer config properties file.
--csv-reporter-enabled If set, the CSV metrics reporter will
be enabled
--delete-consumer-offsets If specified, the consumer path in
zookeeper is deleted when starting up
--formatter <class> The name of a class to use for
formatting kafka messages for
display. (default: kafka.tools.
DefaultMessageFormatter)
--from-beginning If the consumer does not already have
an established offset to consume
from, start with the earliest
message present in the log rather
than the latest message.
--max-messages <Integer: num_messages> The maximum number of messages to
consume before exiting. If not set,
consumption is continual.
--metrics-dir <metrics dictory> If csv-reporter-enable is set, and
this parameter isset, the csv
metrics will be outputed here
--property <prop>
--skip-message-on-error If there is an error when processing a
message, skip it instead of halt.
--topic <topic> The topic id to consume on.
--whitelist <whitelist> Whitelist of topics to include for
consumption.
--zookeeper <urls> REQUIRED: The connection string for
the zookeeper connection in the form
host:port. Multiple URLS can be
given to allow fail-over.
"""
class ConsoleConsumer(JmxMixin, BackgroundThreadService):
# Root directory for persistent output
PERSISTENT_ROOT = "/mnt/console_consumer"
STDOUT_CAPTURE = os.path.join(PERSISTENT_ROOT, "console_consumer.stdout")
STDERR_CAPTURE = os.path.join(PERSISTENT_ROOT, "console_consumer.stderr")
LOG_DIR = os.path.join(PERSISTENT_ROOT, "logs")
LOG_FILE = os.path.join(LOG_DIR, "console_consumer.log")
LOG4J_CONFIG = os.path.join(PERSISTENT_ROOT, "tools-log4j.properties")
CONFIG_FILE = os.path.join(PERSISTENT_ROOT, "console_consumer.properties")
logs = {
"consumer_stdout": {
"path": STDOUT_CAPTURE,
"collect_default": False},
"consumer_stderr": {
"path": STDERR_CAPTURE,
"collect_default": False},
"consumer_log": {
"path": LOG_FILE,
"collect_default": True}
}
def __init__(self, context, num_nodes, kafka, topic, group_id="test-consumer-group", new_consumer=False,
message_validator=None, from_beginning=True, consumer_timeout_ms=None, version=TRUNK,
client_id="console-consumer", print_key=False, jmx_object_names=None, jmx_attributes=[],
enable_systest_events=False):
"""
Args:
context: standard context
num_nodes: number of nodes to use (this should be 1)
kafka: kafka service
topic: consume from this topic
new_consumer: use new Kafka consumer if True
message_validator: function which returns message or None
from_beginning: consume from beginning if True, else from the end
consumer_timeout_ms: corresponds to consumer.timeout.ms. consumer process ends if time between
successively consumed messages exceeds this timeout. Setting this and
waiting for the consumer to stop is a pretty good way to consume all messages
in a topic.
print_key if True, print each message's key in addition to its value
enable_systest_events if True, console consumer will print additional lifecycle-related information
only available in 0.10.0 and later.
"""
JmxMixin.__init__(self, num_nodes, jmx_object_names, jmx_attributes)
BackgroundThreadService.__init__(self, context, num_nodes)
self.kafka = kafka
self.new_consumer = new_consumer
self.group_id = group_id
self.args = {
'topic': topic,
}
self.consumer_timeout_ms = consumer_timeout_ms
for node in self.nodes:
node.version = version
self.from_beginning = from_beginning
self.message_validator = message_validator
self.messages_consumed = {idx: [] for idx in range(1, num_nodes + 1)}
self.clean_shutdown_nodes = set()
self.client_id = client_id
self.print_key = print_key
self.log_level = "TRACE"
self.enable_systest_events = enable_systest_events
if self.enable_systest_events:
# Only available in 0.10.0 and up
assert version >= V_0_10_0_0
def prop_file(self, node):
"""Return a string which can be used to create a configuration file appropriate for the given node."""
# Process client configuration
prop_file = self.render('console_consumer.properties')
if hasattr(node, "version") and node.version <= LATEST_0_8_2:
# in 0.8.2.X and earlier, console consumer does not have --timeout-ms option
# instead, we have to pass it through the config file
prop_file += "\nconsumer.timeout.ms=%s\n" % str(self.consumer_timeout_ms)
# Add security properties to the config. If security protocol is not specified,
# use the default in the template properties.
self.security_config = self.kafka.security_config.client_config(prop_file)
prop_file += str(self.security_config)
return prop_file
def start_cmd(self, node):
"""Return the start command appropriate for the given node."""
args = self.args.copy()
args['zk_connect'] = self.kafka.zk.connect_setting()
args['stdout'] = ConsoleConsumer.STDOUT_CAPTURE
args['stderr'] = ConsoleConsumer.STDERR_CAPTURE
args['log_dir'] = ConsoleConsumer.LOG_DIR
args['log4j_config'] = ConsoleConsumer.LOG4J_CONFIG
args['config_file'] = ConsoleConsumer.CONFIG_FILE
args['stdout'] = ConsoleConsumer.STDOUT_CAPTURE
args['jmx_port'] = self.jmx_port
args['kafka_dir'] = kafka_dir(node)
args['broker_list'] = self.kafka.bootstrap_servers(self.security_config.security_protocol)
args['kafka_opts'] = self.security_config.kafka_opts
cmd = "export JMX_PORT=%(jmx_port)s; " \
"export LOG_DIR=%(log_dir)s; " \
"export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%(log4j_config)s\"; " \
"export KAFKA_OPTS=%(kafka_opts)s; " \
"/opt/%(kafka_dir)s/bin/kafka-console-consumer.sh " \
"--topic %(topic)s --consumer.config %(config_file)s" % args
if self.new_consumer:
cmd += " --new-consumer --bootstrap-server %(broker_list)s" % args
else:
cmd += " --zookeeper %(zk_connect)s" % args
if self.from_beginning:
cmd += " --from-beginning"
if self.consumer_timeout_ms is not None:
# version 0.8.X and below do not support --timeout-ms option
# This will be added in the properties file instead
if node.version > LATEST_0_8_2:
cmd += " --timeout-ms %s" % self.consumer_timeout_ms
if self.print_key:
cmd += " --property print.key=true"
# LoggingMessageFormatter was introduced after 0.9
if node.version > LATEST_0_9:
cmd += " --formatter kafka.tools.LoggingMessageFormatter"
if self.enable_systest_events:
# enable systest events is only available in 0.10.0 and later
# check the assertion here as well, in case node.version has been modified
assert node.version >= V_0_10_0_0
cmd += " --enable-systest-events"
cmd += " 2>> %(stderr)s | tee -a %(stdout)s &" % args
return cmd
def pids(self, node):
try:
cmd = "ps ax | grep -i console_consumer | grep java | grep -v grep | awk '{print $1}'"
pid_arr = [pid for pid in node.account.ssh_capture(cmd, allow_fail=True, callback=int)]
return pid_arr
except (subprocess.CalledProcessError, ValueError) as e:
return []
def alive(self, node):
return len(self.pids(node)) > 0
def _worker(self, idx, node):
node.account.ssh("mkdir -p %s" % ConsoleConsumer.PERSISTENT_ROOT, allow_fail=False)
# Create and upload config file
self.logger.info("console_consumer.properties:")
prop_file = self.prop_file(node)
self.logger.info(prop_file)
node.account.create_file(ConsoleConsumer.CONFIG_FILE, prop_file)
self.security_config.setup_node(node)
# Create and upload log properties
log_config = self.render('tools_log4j.properties', log_file=ConsoleConsumer.LOG_FILE)
node.account.create_file(ConsoleConsumer.LOG4J_CONFIG, log_config)
# Run and capture output
cmd = self.start_cmd(node)
self.logger.debug("Console consumer %d command: %s", idx, cmd)
consumer_output = node.account.ssh_capture(cmd, allow_fail=False)
first_line = next(consumer_output, None)
if first_line is not None:
self.start_jmx_tool(idx, node)
for line in itertools.chain([first_line], consumer_output):
msg = line.strip()
if msg == "shutdown_complete":
# Note that we can only rely on shutdown_complete message if running 0.10.0 or greater
if node in self.clean_shutdown_nodes:
raise Exception("Unexpected shutdown event from consumer, already shutdown. Consumer index: %d" % idx)
self.clean_shutdown_nodes.add(node)
else:
if self.message_validator is not None:
msg = self.message_validator(msg)
if msg is not None:
self.messages_consumed[idx].append(msg)
self.read_jmx_output(idx, node)
def start_node(self, node):
BackgroundThreadService.start_node(self, node)
def stop_node(self, node):
node.account.kill_process("console_consumer", allow_fail=True)
wait_until(lambda: not self.alive(node), timeout_sec=10, backoff_sec=.2,
err_msg="Timed out waiting for consumer to stop.")
def clean_node(self, node):
if self.alive(node):
self.logger.warn("%s %s was still alive at cleanup time. Killing forcefully..." %
(self.__class__.__name__, node.account))
JmxMixin.clean_node(self, node)
node.account.kill_process("java", clean_shutdown=False, allow_fail=True)
node.account.ssh("rm -rf %s" % ConsoleConsumer.PERSISTENT_ROOT, allow_fail=False)
self.security_config.clean_node(node)
| 48.527473 | 126 | 0.603563 |
4353094eba973b7e06a9368bf1bbf0aaeff99dc4
| 1,726 |
py
|
Python
|
raffle.py
|
codeburr/pyraffle
|
6ef6f0949a1e1fb09c71b9dc37c7b5c34494c95e
|
[
"MIT"
] | null | null | null |
raffle.py
|
codeburr/pyraffle
|
6ef6f0949a1e1fb09c71b9dc37c7b5c34494c95e
|
[
"MIT"
] | null | null | null |
raffle.py
|
codeburr/pyraffle
|
6ef6f0949a1e1fb09c71b9dc37c7b5c34494c95e
|
[
"MIT"
] | null | null | null |
from sys import exit
import os
import random
def clear():
os.system("cls" if os.name=="nt" else "clear")
return
def raffle(wins,shuffles=3):
names=[]
winners=[]
with open("./names.txt","r") as f:
lines=f.readlines()
if wins<0: wins=abs(wins)
if wins>len(lines): wins=len(lines)
names=[line.rstrip() for line in lines]
for i in range(0,wins):
for j in range(0,shuffles):
win=names[random.randint(0,len(names)-1)]
winners.append(win)
names.remove(win)
print(str(i+1)+"- "+win)
with open("./log.txt","w") as f:
output=""
for i in range(0,wins):
output+=winners[i]+(", " if i<wins-1 else "")
f.write("Winners: "+output)
return
clear()
proceed=False
with open("./names.txt","r") as f:
if len(f.readlines())==0:
print("'names.txt' is empty or inexistent.")
input()
exit(0)
print("How many winners?")
x=str(input()).strip()
while(not proceed):
if not x.isnumeric():
clear()
print("Invalid. Please, input a valid integer.")
x=str(input()).strip()
else:
if x=="0":
clear()
print("There's no need to raffle if there's no winners.")
input()
exit(0)
else: proceed=True
clear()
proceed=False
print("How many shuffles? (Optional. Keep it empty to ignore.)")
y=str(input()).strip()
while(not proceed):
if not y.isnumeric() and y!="":
clear()
print("Invalid. Please, input a valid integer or keep it empty.")
y=str(input()).strip()
else:
if y=="": y=3
proceed=True
clear()
raffle(int(x),int(y))
| 25.761194 | 73 | 0.542294 |
6d9b69e9a93158e746fd7b282467f40b50ac7f79
| 1,767 |
py
|
Python
|
nsofetch/fetch_uk.py
|
FullFact/nso-stats-fetcher
|
7d279b7e885951ba69cd34c0e0a45e5ec8333549
|
[
"MIT"
] | null | null | null |
nsofetch/fetch_uk.py
|
FullFact/nso-stats-fetcher
|
7d279b7e885951ba69cd34c0e0a45e5ec8333549
|
[
"MIT"
] | null | null | null |
nsofetch/fetch_uk.py
|
FullFact/nso-stats-fetcher
|
7d279b7e885951ba69cd34c0e0a45e5ec8333549
|
[
"MIT"
] | null | null | null |
import json
import csv
import requests
import filepaths
import utils
def month_name_to_num(month_name):
month_map = {
'January': '01', 'February': '02', 'March': '03', 'April': '04', 'May': '05', 'June': '06',
'July': '07', 'August': '08', 'September': '09', 'October': '10', 'November': '11', 'December': '12',
}
return month_map[month_name]
def fetch_uk_stat(url: str, output_filepath: str):
response = requests.get(url)
stats = json.loads(response.text)
csv_output = [['month', 'observation']]
for stat in stats['months']:
month_val = stat['year'] + '-' + month_name_to_num(stat['month'])
csv_output.append([month_val, stat['value']])
with open(output_filepath, "w") as file:
writer = csv.writer(file)
writer.writerows(csv_output)
def fetch_uk_inflation_cpi():
stats_metadata = utils.read_stats_metadata()
url = stats_metadata['UK']['inflation']['CPI']['url']
output_filepath = filepaths.DATA_DIR / stats_metadata['UK']['inflation']['CPI']['filename']
fetch_uk_stat(url, output_filepath)
def fetch_uk_inflation_cpih():
stats_metadata = utils.read_stats_metadata()
url = stats_metadata['UK']['inflation']['CPIH']['url']
output_filepath = filepaths.DATA_DIR / stats_metadata['UK']['inflation']['CPIH']['filename']
fetch_uk_stat(url, output_filepath)
def fetch_uk_inflation_rpi():
stats_metadata = utils.read_stats_metadata()
url = stats_metadata['UK']['inflation']['RPI']['url']
output_filepath = filepaths.DATA_DIR / stats_metadata['UK']['inflation']['RPI']['filename']
fetch_uk_stat(url, output_filepath)
if __name__ == '__main__':
fetch_uk_inflation_cpi()
fetch_uk_inflation_cpih()
fetch_uk_inflation_rpi()
| 31.553571 | 109 | 0.66893 |
453bc11ae253de1b8dd0222840c48eb08a6607c3
| 8,031 |
py
|
Python
|
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/web_infrastructure/apache2_module.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/web_infrastructure/apache2_module.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/web_infrastructure/apache2_module.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# coding: utf-8 -*-
# (c) 2013-2014, Christian Berendt <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: apache2_module
author:
- Christian Berendt (@berendt)
- Ralf Hertel (@n0trax)
- Robin Roth (@robinro)
short_description: Enables/disables a module of the Apache2 webserver.
description:
- Enables or disables a specified module of the Apache2 webserver.
options:
name:
description:
- Name of the module to enable/disable as given to C(a2enmod/a2dismod).
required: true
identifier:
description:
- Identifier of the module as listed by C(apache2ctl -M).
This is optional and usually determined automatically by the common convention of
appending C(_module) to I(name) as well as custom exception for popular modules.
required: False
force:
description:
- Force disabling of default modules and override Debian warnings.
required: false
type: bool
default: False
state:
description:
- Desired state of the module.
choices: ['present', 'absent']
default: present
ignore_configcheck:
description:
- Ignore configuration checks about inconsistent module configuration. Especially for mpm_* modules.
type: bool
default: False
requirements: ["a2enmod","a2dismod"]
'''
EXAMPLES = '''
# enables the Apache2 module "wsgi"
- apache2_module:
state: present
name: wsgi
# disables the Apache2 module "wsgi"
- apache2_module:
state: absent
name: wsgi
# disable default modules for Debian
- apache2_module:
state: absent
name: autoindex
force: True
# disable mpm_worker and ignore warnings about missing mpm module
- apache2_module:
state: absent
name: mpm_worker
ignore_configcheck: True
# enable dump_io module, which is identified as dumpio_module inside apache2
- apache2_module:
state: present
name: dump_io
identifier: dumpio_module
'''
RETURN = '''
result:
description: message about action taken
returned: always
type: str
warnings:
description: list of warning messages
returned: when needed
type: list
rc:
description: return code of underlying command
returned: failed
type: int
stdout:
description: stdout of underlying command
returned: failed
type: str
stderr:
description: stderr of underlying command
returned: failed
type: str
'''
import re
# import module snippets
from ansible.module_utils.basic import AnsibleModule
def _run_threaded(module):
control_binary = _get_ctl_binary(module)
result, stdout, stderr = module.run_command("%s -V" % control_binary)
return bool(re.search(r'threaded:[ ]*yes', stdout))
def _get_ctl_binary(module):
for command in ['apache2ctl', 'apachectl']:
ctl_binary = module.get_bin_path(command)
if ctl_binary is not None:
return ctl_binary
module.fail_json(
msg="Neither of apache2ctl nor apachctl found."
" At least one apache control binary is necessary."
)
def _module_is_enabled(module):
control_binary = _get_ctl_binary(module)
result, stdout, stderr = module.run_command("%s -M" % control_binary)
if result != 0:
error_msg = "Error executing %s: %s" % (control_binary, stderr)
if module.params['ignore_configcheck']:
if 'AH00534' in stderr and 'mpm_' in module.params['name']:
module.warnings.append(
"No MPM module loaded! apache2 reload AND other module actions"
" will fail if no MPM module is loaded immediately."
)
else:
module.warnings.append(error_msg)
return False
else:
module.fail_json(msg=error_msg)
searchstring = ' ' + module.params['identifier']
return searchstring in stdout
def create_apache_identifier(name):
"""
By convention if a module is loaded via name, it appears in apache2ctl -M as
name_module.
Some modules don't follow this convention and we use replacements for those."""
# a2enmod name replacement to apache2ctl -M names
text_workarounds = [
('shib2', 'mod_shib'),
('evasive', 'evasive20_module'),
]
# re expressions to extract subparts of names
re_workarounds = [
('php', r'^(php\d)\.'),
]
for a2enmod_spelling, module_name in text_workarounds:
if a2enmod_spelling in name:
return module_name
for search, reexpr in re_workarounds:
if search in name:
try:
rematch = re.search(reexpr, name)
return rematch.group(1) + '_module'
except AttributeError:
pass
return name + '_module'
def _set_state(module, state):
name = module.params['name']
force = module.params['force']
want_enabled = state == 'present'
state_string = {'present': 'enabled', 'absent': 'disabled'}[state]
a2mod_binary = {'present': 'a2enmod', 'absent': 'a2dismod'}[state]
success_msg = "Module %s %s" % (name, state_string)
if _module_is_enabled(module) != want_enabled:
if module.check_mode:
module.exit_json(changed=True,
result=success_msg,
warnings=module.warnings)
a2mod_binary = module.get_bin_path(a2mod_binary)
if a2mod_binary is None:
module.fail_json(msg="%s not found. Perhaps this system does not use %s to manage apache" % (a2mod_binary, a2mod_binary))
if not want_enabled and force:
# force exists only for a2dismod on debian
a2mod_binary += ' -f'
result, stdout, stderr = module.run_command("%s %s" % (a2mod_binary, name))
if _module_is_enabled(module) == want_enabled:
module.exit_json(changed=True,
result=success_msg,
warnings=module.warnings)
else:
msg = (
'Failed to set module {name} to {state}:\n'
'{stdout}\n'
'Maybe the module identifier ({identifier}) was guessed incorrectly.'
'Consider setting the "identifier" option.'
).format(
name=name,
state=state_string,
stdout=stdout,
identifier=module.params['identifier']
)
module.fail_json(msg=msg,
rc=result,
stdout=stdout,
stderr=stderr)
else:
module.exit_json(changed=False,
result=success_msg,
warnings=module.warnings)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
identifier=dict(required=False, type='str'),
force=dict(required=False, type='bool', default=False),
state=dict(default='present', choices=['absent', 'present']),
ignore_configcheck=dict(required=False, type='bool', default=False),
),
supports_check_mode=True,
)
module.warnings = []
name = module.params['name']
if name == 'cgi' and _run_threaded(module):
module.fail_json(msg="Your MPM seems to be threaded. No automatic actions on module %s possible." % name)
if not module.params['identifier']:
module.params['identifier'] = create_apache_identifier(module.params['name'])
if module.params['state'] in ['present', 'absent']:
_set_state(module, module.params['state'])
if __name__ == '__main__':
main()
| 30.420455 | 133 | 0.62321 |
0b4587c7e967242617f409d78d67f350571efe4e
| 790 |
py
|
Python
|
fslks/eval/accuracy.py
|
opscidia/mtft_zsl
|
2b3b524e9b50e27ed80ebf9e514fedb5eed9954f
|
[
"MIT"
] | 1 |
2020-12-08T04:28:50.000Z
|
2020-12-08T04:28:50.000Z
|
fslks/eval/accuracy.py
|
opscidia/mtft_zsl
|
2b3b524e9b50e27ed80ebf9e514fedb5eed9954f
|
[
"MIT"
] | null | null | null |
fslks/eval/accuracy.py
|
opscidia/mtft_zsl
|
2b3b524e9b50e27ed80ebf9e514fedb5eed9954f
|
[
"MIT"
] | 1 |
2021-08-25T11:01:37.000Z
|
2021-08-25T11:01:37.000Z
|
def sentence_accuracy(references, predictions):
"""Compute accuracy, each line contains a label."""
count = 0.0
match = 0.0
for label, pred in zip(references, predictions):
if label == pred:
match += 1
count += 1
return 100 * match / count
def word_accuracy(references, predictions):
"""Compute accuracy on per word basis."""
total_acc, total_count = 0., 0.
for labels, preds in zip(references, predictions):
match = 0.0
for pos in range(min(len(labels), len(preds))):
label = labels[pos]
pred = preds[pos]
if label == pred:
match += 1
total_acc += 100 * match / max(len(labels), len(preds))
total_count += 1
return total_acc / total_count
| 31.6 | 63 | 0.579747 |
8d58b29ff8551227e0820a24a80b4dff2e606808
| 48,392 |
py
|
Python
|
sdk/storage/azure-storage-file-share/tests/test_share.py
|
chhtw/azure-sdk-for-python
|
51c5ade919cb946fbe66faf6e4119b2e528331d4
|
[
"MIT"
] | null | null | null |
sdk/storage/azure-storage-file-share/tests/test_share.py
|
chhtw/azure-sdk-for-python
|
51c5ade919cb946fbe66faf6e4119b2e528331d4
|
[
"MIT"
] | null | null | null |
sdk/storage/azure-storage-file-share/tests/test_share.py
|
chhtw/azure-sdk-for-python
|
51c5ade919cb946fbe66faf6e4119b2e528331d4
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import time
import unittest
from datetime import datetime, timedelta
import pytest
import requests
from azure.core.pipeline.transport import RequestsTransport
from azure.core.exceptions import (
HttpResponseError,
ResourceNotFoundError,
ResourceExistsError)
from azure.storage.fileshare import (
AccessPolicy,
ShareSasPermissions,
ShareAccessTier,
ShareServiceClient,
ShareDirectoryClient,
ShareFileClient,
ShareClient,
generate_share_sas,
ShareRootSquash, ShareProtocols)
from devtools_testutils.storage import StorageTestCase, LogCaptured
from settings.testcase import FileSharePreparer
# ------------------------------------------------------------------------------
TEST_SHARE_PREFIX = 'share'
# ------------------------------------------------------------------------------
class StorageShareTest(StorageTestCase):
def _setup(self, storage_account_name, storage_account_key):
file_url = self.account_url(storage_account_name, "file")
credentials = storage_account_key
self.fsc = ShareServiceClient(account_url=file_url, credential=credentials)
self.test_shares = []
def _teardown(self, FILE_PATH):
if os.path.isfile(FILE_PATH):
try:
os.remove(FILE_PATH)
except:
pass
# --Helpers-----------------------------------------------------------------
def _get_share_reference(self, prefix=TEST_SHARE_PREFIX):
share_name = self.get_resource_name(prefix)
share = self.fsc.get_share_client(share_name)
self.test_shares.append(share_name)
return share
def _create_share(self, prefix=TEST_SHARE_PREFIX, **kwargs):
share_client = self._get_share_reference(prefix)
try:
share_client.create_share(**kwargs)
except:
pass
return share_client
def _create_if_not_exists(self, prefix=TEST_SHARE_PREFIX, **kwargs):
share_client = self._get_share_reference(prefix)
return share_client.create_if_not_exists(**kwargs)
def _delete_shares(self, prefix=TEST_SHARE_PREFIX):
for l in self.fsc.list_shares(include_snapshots=True):
try:
self.fsc.delete_share(l.name, delete_snapshots=True)
except:
pass
# --Test cases for shares -----------------------------------------
def test_create_share_client(self):
share_client = ShareClient.from_share_url("http://127.0.0.1:11002/account/customized/path/share?snapshot=baz&", credential={"account_name": "myaccount", "account_key": "key"})
self.assertEqual(share_client.share_name, "share")
self.assertEqual(share_client.snapshot, "baz")
share_client = ShareClient.from_share_url("http://127.0.0.1:11002/account/share?snapshot=baz&", credential="credential")
self.assertEqual(share_client.share_name, "share")
self.assertEqual(share_client.snapshot, "baz")
@FileSharePreparer()
def test_create_share(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share = self._get_share_reference()
# Act
created = self._create_share()
# Assert
self.assertTrue(created)
self._delete_shares(share.share_name)
@FileSharePreparer()
def test_create_share_if_not_exists_without_existing_share(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share = self._get_share_reference()
# Act
created = self._create_if_not_exists()
# Assert
self.assertTrue(created)
self._delete_shares(share.share_name)
@FileSharePreparer()
def test_create_share_if_not_exists_with_existing_share(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share = self._get_share_reference()
# Act
self._create_share()
created = self._create_if_not_exists()
# Assert
self.assertIsNone(created)
self._delete_shares(share.share_name)
@FileSharePreparer()
def test_create_share_snapshot(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share = self._get_share_reference()
# Act
created = share.create_share()
snapshot = share.create_snapshot()
# Assert
self.assertTrue(created)
self.assertIsNotNone(snapshot['snapshot'])
self.assertIsNotNone(snapshot['etag'])
self.assertIsNotNone(snapshot['last_modified'])
self._delete_shares(share.share_name)
@FileSharePreparer()
def test_create_snapshot_with_metadata(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share = self._get_share_reference()
metadata = {"test1": "foo", "test2": "bar"}
metadata2 = {"test100": "foo100", "test200": "bar200"}
# Act
created = share.create_share(metadata=metadata)
snapshot = share.create_snapshot(metadata=metadata2)
share_props = share.get_share_properties()
snapshot_client = ShareClient(
self.account_url(storage_account_name, "file"),
share_name=share.share_name,
snapshot=snapshot,
credential=storage_account_key
)
snapshot_props = snapshot_client.get_share_properties()
# Assert
self.assertTrue(created)
self.assertIsNotNone(snapshot['snapshot'])
self.assertIsNotNone(snapshot['etag'])
self.assertIsNotNone(snapshot['last_modified'])
self.assertEqual(share_props.metadata, metadata)
self.assertEqual(snapshot_props.metadata, metadata2)
self._delete_shares(share.share_name)
@FileSharePreparer()
def test_delete_share_with_snapshots(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share = self._get_share_reference()
share.create_share()
snapshot = share.create_snapshot()
# Act
with self.assertRaises(HttpResponseError):
share.delete_share()
deleted = share.delete_share(delete_snapshots=True)
self.assertIsNone(deleted)
self._delete_shares()
@pytest.mark.playback_test_only
@FileSharePreparer()
def test_undelete_share(self, storage_account_name, storage_account_key):
# share soft delete should enabled by SRP call or use armclient, so make this test as playback only.
self._setup(storage_account_name, storage_account_key)
share_client = self._create_share(prefix="sharerestore")
# Act
share_client.delete_share()
# to make sure the share deleted
with self.assertRaises(ResourceNotFoundError):
share_client.get_share_properties()
share_list = list(self.fsc.list_shares(include_deleted=True, include_snapshots=True, include_metadata=True))
self.assertTrue(len(share_list) >= 1)
for share in share_list:
# find the deleted share and restore it
if share.deleted and share.name == share_client.share_name:
if self.is_live:
time.sleep(60)
restored_share_client = self.fsc.undelete_share(share.name, share.version)
# to make sure the deleted share is restored
props = restored_share_client.get_share_properties()
self.assertIsNotNone(props)
@FileSharePreparer()
def test_lease_share_acquire_and_release(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share_client = self._create_share('test')
# Act
lease = share_client.acquire_lease()
lease.release()
# Assert
@FileSharePreparer()
def test_acquire_lease_on_sharesnapshot(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share = self._get_share_reference("testshare1")
# Act
share.create_share()
snapshot = share.create_snapshot()
snapshot_client = ShareClient(
self.account_url(storage_account_name, "file"),
share_name=share.share_name,
snapshot=snapshot,
credential=storage_account_key
)
share_lease = share.acquire_lease()
share_snapshot_lease = snapshot_client.acquire_lease()
# Assert
with self.assertRaises(HttpResponseError):
share.get_share_properties(lease=share_snapshot_lease)
with self.assertRaises(HttpResponseError):
snapshot_client.get_share_properties(lease=share_lease)
self.assertIsNotNone(snapshot['snapshot'])
self.assertIsNotNone(snapshot['etag'])
self.assertIsNotNone(snapshot['last_modified'])
self.assertIsNotNone(share_lease)
self.assertIsNotNone(share_snapshot_lease)
self.assertNotEqual(share_lease, share_snapshot_lease)
share_snapshot_lease.release()
share_lease.release()
self._delete_shares(share.share_name)
@FileSharePreparer()
def test_lease_share_renew(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share_client = self._create_share('test')
lease = share_client.acquire_lease(lease_duration=15)
self.sleep(10)
lease_id_start = lease.id
# Act
lease.renew()
# Assert
self.assertEqual(lease.id, lease_id_start)
self.sleep(5)
with self.assertRaises(HttpResponseError):
share_client.delete_share()
self.sleep(10)
share_client.delete_share()
@FileSharePreparer()
def test_lease_share_with_duration(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share_client = self._create_share('test')
# Act
lease = share_client.acquire_lease(lease_duration=15)
# Assert
with self.assertRaises(HttpResponseError):
share_client.acquire_lease()
self.sleep(15)
share_client.acquire_lease()
@FileSharePreparer()
def test_lease_share_twice(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share_client = self._create_share('test')
# Act
lease = share_client.acquire_lease(lease_duration=15)
# Assert
lease2 = share_client.acquire_lease(lease_id=lease.id)
self.assertEqual(lease.id, lease2.id)
@FileSharePreparer()
def test_lease_share_with_proposed_lease_id(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share_client = self._create_share('test')
# Act
proposed_lease_id = '55e97f64-73e8-4390-838d-d9e84a374321'
lease = share_client.acquire_lease(lease_id=proposed_lease_id)
# Assert
self.assertEqual(proposed_lease_id, lease.id)
@FileSharePreparer()
def test_lease_share_change_lease_id(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share_client = self._create_share('test')
# Act
lease_id = '29e0b239-ecda-4f69-bfa3-95f6af91464c'
lease = share_client.acquire_lease()
lease_id1 = lease.id
lease.change(proposed_lease_id=lease_id)
lease.renew()
lease_id2 = lease.id
# Assert
self.assertIsNotNone(lease_id1)
self.assertIsNotNone(lease_id2)
self.assertNotEqual(lease_id1, lease_id)
self.assertEqual(lease_id2, lease_id)
@FileSharePreparer()
def test_set_share_metadata_with_lease_id(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share_client = self._create_share('test')
metadata = {'hello': 'world', 'number': '43'}
lease_id = share_client.acquire_lease()
# Act
share_client.set_share_metadata(metadata, lease=lease_id)
# Assert
md = share_client.get_share_properties().metadata
self.assertDictEqual(md, metadata)
@FileSharePreparer()
def test_get_share_metadata_with_lease_id(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share_client = self._create_share('test')
metadata = {'hello': 'world', 'number': '43'}
share_client.set_share_metadata(metadata)
lease_id = share_client.acquire_lease()
# Act
md = share_client.get_share_properties(lease=lease_id).metadata
# Assert
self.assertDictEqual(md, metadata)
@FileSharePreparer()
def test_get_share_properties_with_lease_id(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share_client = self._create_share('test')
metadata = {'hello': 'world', 'number': '43'}
share_client.set_share_metadata(metadata)
lease_id = share_client.acquire_lease()
# Act
props = share_client.get_share_properties(lease=lease_id)
lease_id.break_lease()
# Assert
self.assertIsNotNone(props)
self.assertDictEqual(props.metadata, metadata)
self.assertEqual(props.lease.duration, 'infinite')
self.assertEqual(props.lease.state, 'leased')
self.assertEqual(props.lease.status, 'locked')
@FileSharePreparer()
def test_get_share_acl_with_lease_id(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share_client = self._create_share('test')
lease_id = share_client.acquire_lease()
# Act
acl = share_client.get_share_access_policy(lease=lease_id)
# Assert
self.assertIsNotNone(acl)
self.assertIsNone(acl.get('public_access'))
@FileSharePreparer()
def test_set_share_acl_with_lease_id(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share_client = self._create_share('test')
lease_id = share_client.acquire_lease()
# Act
access_policy = AccessPolicy(permission=ShareSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1),
start=datetime.utcnow())
signed_identifiers = {'testid': access_policy}
share_client.set_share_access_policy(signed_identifiers, lease=lease_id)
# Assert
acl = share_client.get_share_access_policy()
self.assertIsNotNone(acl)
self.assertIsNone(acl.get('public_access'))
@FileSharePreparer()
def test_lease_share_break_period(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share_client = self._create_share('test')
# Act
lease = share_client.acquire_lease(lease_duration=15)
# Assert
lease.break_lease(lease_break_period=5)
self.sleep(6)
with self.assertRaises(HttpResponseError):
share_client.delete_share(lease=lease)
@FileSharePreparer()
def test_delete_share_with_lease_id(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share_client = self._create_share('test')
lease = share_client.acquire_lease(lease_duration=15)
# Assert
with self.assertRaises(HttpResponseError):
share_client.delete_share()
# Act
deleted = share_client.delete_share(lease=lease)
# Assert
self.assertIsNone(deleted)
with self.assertRaises(ResourceNotFoundError):
share_client.get_share_properties()
@pytest.mark.playback_test_only
@FileSharePreparer()
def test_restore_to_existing_share(self, storage_account_name, storage_account_key):
# share soft delete should enabled by SRP call or use armclient, so make this test as playback only.
self._setup(storage_account_name, storage_account_key)
# Act
share_client = self._create_share()
share_client.delete_share()
# to make sure the share deleted
with self.assertRaises(ResourceNotFoundError):
share_client.get_share_properties()
# create a share with the same name as the deleted one
if self.is_live:
time.sleep(30)
share_client.create_share()
share_list = list(self.fsc.list_shares(include_deleted=True))
self.assertTrue(len(share_list) >= 1)
for share in share_list:
# find the deleted share and restore it
if share.deleted and share.name == share_client.share_name:
with self.assertRaises(HttpResponseError):
self.fsc.undelete_share(share.name, share.version)
@FileSharePreparer()
def test_delete_snapshot(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share = self._get_share_reference()
share.create_share()
snapshot = share.create_snapshot()
# Act
with self.assertRaises(HttpResponseError):
share.delete_share()
snapshot_client = ShareClient(
self.account_url(storage_account_name, "file"),
share_name=share.share_name,
snapshot=snapshot,
credential=storage_account_key
)
deleted = snapshot_client.delete_share()
self.assertIsNone(deleted)
self._delete_shares()
@FileSharePreparer()
def test_create_share_fail_on_exist(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share = self._get_share_reference()
# Act
created = share.create_share()
# Assert
self.assertTrue(created)
self._delete_shares()
@FileSharePreparer()
def test_create_share_with_already_existing_share_fail_on_exist(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share = self._get_share_reference()
# Act
created = share.create_share()
with self.assertRaises(HttpResponseError):
share.create_share()
# Assert
self.assertTrue(created)
self._delete_shares()
@FileSharePreparer()
def test_create_share_with_metadata(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
metadata = {'hello': 'world', 'number': '42'}
# Act
client = self._get_share_reference()
created = client.create_share(metadata=metadata)
# Assert
self.assertTrue(created)
md = client.get_share_properties().metadata
self.assertDictEqual(md, metadata)
self._delete_shares()
@FileSharePreparer()
def test_create_share_with_quota(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
# Act
client = self._get_share_reference()
created = client.create_share(quota=1)
# Assert
props = client.get_share_properties()
self.assertTrue(created)
self.assertEqual(props.quota, 1)
self._delete_shares()
@FileSharePreparer()
def test_create_share_with_access_tier(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
# Act
client = self._get_share_reference()
created = client.create_share(access_tier="Hot")
# Assert
props = client.get_share_properties()
self.assertTrue(created)
self.assertEqual(props.access_tier, "Hot")
self._delete_shares()
@FileSharePreparer()
def test_share_exists(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share = self._create_share()
# Act
exists = share.get_share_properties()
# Assert
self.assertTrue(exists)
self._delete_shares()
@FileSharePreparer()
def test_share_not_exists(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share = self._get_share_reference()
# Act
with self.assertRaises(ResourceNotFoundError):
share.get_share_properties()
# Assert
self._delete_shares()
@FileSharePreparer()
def test_share_snapshot_exists(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share = self._create_share()
snapshot = share.create_snapshot()
# Act
snapshot_client = self.fsc.get_share_client(share.share_name, snapshot=snapshot)
exists = snapshot_client.get_share_properties()
# Assert
self.assertTrue(exists)
self._delete_shares()
@FileSharePreparer()
def test_share_snapshot_not_exists(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share = self._create_share()
made_up_snapshot = '2017-07-19T06:53:46.0000000Z'
# Act
snapshot_client = self.fsc.get_share_client(share.share_name, snapshot=made_up_snapshot)
with self.assertRaises(ResourceNotFoundError):
snapshot_client.get_share_properties()
# Assert
self._delete_shares()
@FileSharePreparer()
def test_unicode_create_share_unicode_name(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share_name = u'啊齄丂狛狜'
# Act
with self.assertRaises(HttpResponseError):
# not supported - share name must be alphanumeric, lowercase
client = self.fsc.get_share_client(share_name)
client.create_share()
# Assert
self._delete_shares()
@FileSharePreparer()
def test_list_shares_no_options(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share = self._create_share()
# Act
shares = list(self.fsc.list_shares())
# Assert
self.assertIsNotNone(shares)
self.assertGreaterEqual(len(shares), 1)
self.assertIsNotNone(shares[0])
self.assertNamedItemInContainer(shares, share.share_name)
self._delete_shares()
@FileSharePreparer()
def test_list_shares_no_options_for_premium_account(self, premium_storage_file_account_name, premium_storage_file_account_key):
self._setup(premium_storage_file_account_name, premium_storage_file_account_key)
share = self._create_share()
# Act
shares = list(self.fsc.list_shares())
# Assert
self.assertIsNotNone(shares)
self.assertGreaterEqual(len(shares), 1)
self.assertIsNotNone(shares[0])
self.assertIsNotNone(shares[0].provisioned_iops)
self.assertIsNotNone(shares[0].provisioned_ingress_mbps)
self.assertIsNotNone(shares[0].provisioned_egress_mbps)
self.assertIsNotNone(shares[0].next_allowed_quota_downgrade_time)
self.assertIsNotNone(shares[0].provisioned_bandwidth)
self._delete_shares()
@FileSharePreparer()
def test_list_shares_leased_share(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share = self._create_share("test1")
# Act
lease = share.acquire_lease()
resp = list(self.fsc.list_shares())
# Assert
self.assertIsNotNone(resp)
self.assertGreaterEqual(len(resp), 1)
self.assertIsNotNone(resp[0])
self.assertEqual(resp[0].lease.duration, 'infinite')
self.assertEqual(resp[0].lease.status, 'locked')
self.assertEqual(resp[0].lease.state, 'leased')
lease.release()
self._delete_shares()
@pytest.mark.playback_test_only
@FileSharePreparer()
def test_list_shares_with_snapshot(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
#share = self._get_share_reference()
share = self._create_share('random')
snapshot1 = share.create_snapshot()
snapshot2 = share.create_snapshot()
# Act
shares = self.fsc.list_shares(include_snapshots=True)
# Assert
self.assertIsNotNone(shares)
all_shares = list(shares)
self.assertEqual(len(all_shares), 3)
self.assertNamedItemInContainer(all_shares, share.share_name)
self.assertNamedItemInContainer(all_shares, snapshot1['snapshot'])
self.assertNamedItemInContainer(all_shares, snapshot2['snapshot'])
share.delete_share(delete_snapshots=True)
self._delete_shares()
@pytest.mark.playback_test_only
@FileSharePreparer()
def test_list_shares_with_prefix(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share = self._get_share_reference()
share.create_share()
# Act
shares = list(self.fsc.list_shares(name_starts_with=share.share_name))
# Assert
self.assertEqual(len(shares), 1)
self.assertIsNotNone(shares[0])
self.assertEqual(shares[0].name, share.share_name)
self.assertIsNone(shares[0].metadata)
self._delete_shares()
@FileSharePreparer()
def test_list_shares_with_include_metadata(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
metadata = {'hello': 'world', 'number': '42'}
share = self._get_share_reference()
share.create_share(metadata=metadata)
# Act
shares = list(self.fsc.list_shares(share.share_name, include_metadata=True))
# Assert
self.assertIsNotNone(shares)
self.assertGreaterEqual(len(shares), 1)
self.assertIsNotNone(shares[0])
self.assertNamedItemInContainer(shares, share.share_name)
self.assertDictEqual(shares[0].metadata, metadata)
self._delete_shares()
@FileSharePreparer()
def test_list_shares_with_num_results_and_marker(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
prefix = 'listshare'
share_names = []
for i in range(0, 4):
share_names.append(self._create_share(prefix + str(i)).share_name)
#share_names.sort()
# Act
generator1 = self.fsc.list_shares(prefix, results_per_page=2).by_page()
shares1 = list(next(generator1))
generator2 = self.fsc.list_shares(
prefix, results_per_page=2).by_page(continuation_token=generator1.continuation_token)
shares2 = list(next(generator2))
# Assert
self.assertIsNotNone(shares1)
self.assertEqual(len(shares1), 2)
self.assertNamedItemInContainer(shares1, share_names[0])
self.assertNamedItemInContainer(shares1, share_names[1])
self.assertIsNotNone(shares2)
self.assertEqual(len(shares2), 2)
self.assertNamedItemInContainer(shares2, share_names[2])
self.assertNamedItemInContainer(shares2, share_names[3])
self._delete_shares()
@FileSharePreparer()
def test_set_share_metadata(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share = self._create_share()
metadata = {'hello': 'world', 'number': '42'}
# Act
share.set_share_metadata(metadata)
# Assert
md = share.get_share_properties().metadata
self.assertDictEqual(md, metadata)
self._delete_shares()
@FileSharePreparer()
def test_get_share_metadata(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
metadata = {'hello': 'world', 'number': '42'}
# Act
client = self._get_share_reference()
created = client.create_share(metadata=metadata)
# Assert
self.assertTrue(created)
md = client.get_share_properties().metadata
self.assertDictEqual(md, metadata)
self._delete_shares()
@FileSharePreparer()
def test_get_share_metadata_with_snapshot(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
metadata = {'hello': 'world', 'number': '42'}
# Act
client = self._get_share_reference()
created = client.create_share(metadata=metadata)
snapshot = client.create_snapshot()
snapshot_client = self.fsc.get_share_client(client.share_name, snapshot=snapshot)
# Assert
self.assertTrue(created)
md = snapshot_client.get_share_properties().metadata
self.assertDictEqual(md, metadata)
self._delete_shares()
@FileSharePreparer()
def test_set_share_properties(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share1 = self._create_share("share1")
share2 = self._create_share("share2")
share1.set_share_quota(3)
share1.set_share_properties(access_tier="Hot")
share2.set_share_properties(access_tier=ShareAccessTier("Cool"), quota=2)
# Act
props1 = share1.get_share_properties()
props2 = share2.get_share_properties()
share1_quota = props1.quota
share1_tier = props1.access_tier
share2_quota = props2.quota
share2_tier = props2.access_tier
# Assert
self.assertEqual(share1_quota, 3)
self.assertEqual(share1_tier, "Hot")
self.assertEqual(share2_quota, 2)
self.assertEqual(share2_tier, "Cool")
self._delete_shares()
@pytest.mark.playback_test_only
@FileSharePreparer()
def test_create_share_with_protocol(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
# Act
share_client = self._get_share_reference("testshare2")
with self.assertRaises(ValueError):
share_client.create_share(protocols="SMB", root_squash=ShareRootSquash.all_squash)
share_client.create_share(protocols="NFS", root_squash=ShareRootSquash.root_squash)
share_enabled_protocol = share_client.get_share_properties().protocols
share_root_squash = share_client.get_share_properties().root_squash
# Assert
self.assertEqual(share_enabled_protocol, ["NFS"])
self.assertEqual(share_root_squash, ShareRootSquash.root_squash)
share_client.delete_share()
@pytest.mark.playback_test_only
@FileSharePreparer()
def test_set_share_properties_with_root_squash(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share1 = self._create_share("share1", protocols=ShareProtocols.NFS)
share2 = self._create_share("share2", protocols=ShareProtocols.NFS)
share1.set_share_properties(root_squash="NoRootSquash")
share2.set_share_properties(root_squash=ShareRootSquash.root_squash)
# Act
share1_props = share1.get_share_properties()
share2_props = share2.get_share_properties()
# # Assert
self.assertEqual(share1_props.root_squash, ShareRootSquash.no_root_squash)
self.assertEqual(share1_props.protocols, ['NFS'])
self.assertEqual(share2_props.root_squash, ShareRootSquash.root_squash)
self.assertEqual(share2_props.protocols, ['NFS'])
@pytest.mark.playback_test_only
@FileSharePreparer()
def test_list_shares_with_root_squash_and_protocols(
self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
self._create_share(prefix="testshare1", protocols="NFS", root_squash=ShareRootSquash.all_squash)
self._create_share(prefix="testshare2", protocols=ShareProtocols.SMB)
# Act
shares = list(self.fsc.list_shares())
share1_props = shares[0]
share2_props = shares[1]
# Assert
self.assertIsNotNone(shares)
self.assertGreaterEqual(len(shares), 2)
self.assertEqual(share1_props.root_squash, ShareRootSquash.all_squash)
self.assertEqual(share1_props.protocols, ["NFS"])
self.assertEqual(share2_props.root_squash, None)
self.assertEqual(share2_props.protocols, ["SMB"])
self._delete_shares()
@FileSharePreparer()
def test_get_share_properties_for_premium_account(self, premium_storage_file_account_name, premium_storage_file_account_key):
self._setup(premium_storage_file_account_name, premium_storage_file_account_key)
share = self._create_share()
# Act
props = share.get_share_properties()
# Assert
self.assertIsNotNone(props)
self.assertIsNotNone(props.quota)
self.assertIsNotNone(props.provisioned_iops)
self.assertIsNotNone(props.provisioned_ingress_mbps)
self.assertIsNotNone(props.provisioned_egress_mbps)
self.assertIsNotNone(props.next_allowed_quota_downgrade_time)
self.assertIsNotNone(props.provisioned_bandwidth)
self._delete_shares()
@FileSharePreparer()
def test_delete_share_with_existing_share(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share = self._get_share_reference()
share.create_share()
# Act
deleted = share.delete_share()
# Assert
self.assertIsNone(deleted)
self._delete_shares()
@FileSharePreparer()
def test_delete_share_with_existing_share_fail_not_exist(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
client = self._get_share_reference()
# Act
with LogCaptured(self) as log_captured:
with self.assertRaises(HttpResponseError):
client.delete_share()
log_as_str = log_captured.getvalue()
self._delete_shares()
@FileSharePreparer()
def test_delete_share_with_non_existing_share(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
client = self._get_share_reference()
# Act
with LogCaptured(self) as log_captured:
with self.assertRaises(HttpResponseError):
deleted = client.delete_share()
log_as_str = log_captured.getvalue()
self.assertTrue('ERROR' not in log_as_str)
self._delete_shares()
@FileSharePreparer()
def test_delete_share_with_non_existing_share_fail_not_exist(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
client = self._get_share_reference()
# Act
with LogCaptured(self) as log_captured:
with self.assertRaises(HttpResponseError):
client.delete_share()
log_as_str = log_captured.getvalue()
self._delete_shares()
@FileSharePreparer()
def test_get_share_stats(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share = self._get_share_reference()
share.create_share()
# Act
share_usage = share.get_share_stats()
# Assert
self.assertEqual(share_usage, 0)
self._delete_shares()
@FileSharePreparer()
def test_set_share_acl(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share = self._get_share_reference()
share.create_share()
# Act
resp = share.set_share_access_policy(signed_identifiers=dict())
# Assert
acl = share.get_share_access_policy()
self.assertIsNotNone(acl)
self._delete_shares()
@FileSharePreparer()
def test_set_share_acl_with_empty_signed_identifiers(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share = self._get_share_reference()
share.create_share()
# Act
resp = share.set_share_access_policy(dict())
# Assert
acl = share.get_share_access_policy()
self.assertIsNotNone(acl)
self.assertEqual(len(acl.get('signed_identifiers')), 0)
self._delete_shares()
@FileSharePreparer()
def test_set_share_acl_with_signed_identifiers(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share = self._get_share_reference()
share.create_share()
# Act
identifiers = dict()
identifiers['testid'] = AccessPolicy(
permission=ShareSasPermissions(write=True),
expiry=datetime.utcnow() + timedelta(hours=1),
start=datetime.utcnow() - timedelta(minutes=1),
)
resp = share.set_share_access_policy(identifiers)
# Assert
acl = share.get_share_access_policy()
self.assertIsNotNone(acl)
self.assertEqual(len(acl['signed_identifiers']), 1)
self.assertEqual(acl['signed_identifiers'][0].id, 'testid')
self._delete_shares()
@FileSharePreparer()
def test_set_share_acl_too_many_ids(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share = self._get_share_reference()
share.create_share()
# Act
identifiers = dict()
for i in range(0, 6):
identifiers['id{}'.format(i)] = AccessPolicy()
# Assert
with self.assertRaises(ValueError) as e:
share.set_share_access_policy(identifiers)
self.assertEqual(
str(e.exception),
'Too many access policies provided. The server does not support setting more than 5 access policies on a single resource.'
)
self._delete_shares()
@FileSharePreparer()
def test_list_directories_and_files(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share = self._create_share()
dir0 = share.get_directory_client()
dir0.upload_file('file1', 'data1')
dir1 = share.get_directory_client('dir1')
dir1.create_directory()
dir1.upload_file('file2', 'data2')
dir2 = share.get_directory_client('dir2')
dir2.create_directory()
# Act
resp = list(share.list_directories_and_files())
# Assert
self.assertIsNotNone(resp)
self.assertEqual(len(resp), 3)
self.assertIsNotNone(resp[0])
self.assertNamedItemInContainer(resp, 'dir1')
self.assertNamedItemInContainer(resp, 'dir2')
self.assertNamedItemInContainer(resp, 'file1')
self._delete_shares()
@FileSharePreparer()
def test_list_directories_and_files_with_snapshot(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share_name = self._create_share()
dir1 = share_name.get_directory_client('dir1')
dir1.create_directory()
dir2 = share_name.get_directory_client('dir2')
dir2.create_directory()
snapshot1 = share_name.create_snapshot()
dir3 = share_name.get_directory_client('dir3')
dir3.create_directory()
file1 = share_name.get_file_client('file1')
file1.upload_file('data')
# Act
snapshot_client = self.fsc.get_share_client(share_name.share_name, snapshot=snapshot1)
resp = list(snapshot_client.list_directories_and_files())
# Assert
self.assertIsNotNone(resp)
self.assertEqual(len(resp), 2)
self.assertIsNotNone(resp[0])
self.assertNamedItemInContainer(resp, 'dir1')
self.assertNamedItemInContainer(resp, 'dir2')
self._delete_shares()
@FileSharePreparer()
def test_list_directories_and_files_with_num_results(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share_name = self._create_share()
dir1 = share_name.create_directory('dir1')
root = share_name.get_directory_client()
root.upload_file('filea1', '1024')
root.upload_file('filea2', '1024')
root.upload_file('filea3', '1024')
root.upload_file('fileb1', '1024')
# Act
result = share_name.list_directories_and_files(results_per_page=2).by_page()
result = list(next(result))
# Assert
self.assertIsNotNone(result)
self.assertEqual(len(result), 2)
self.assertNamedItemInContainer(result, 'dir1')
self.assertNamedItemInContainer(result, 'filea1')
self._delete_shares()
@FileSharePreparer()
def test_list_directories_and_files_with_num_results_and_marker(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share_name = self._create_share()
dir1 = share_name.get_directory_client('dir1')
dir1.create_directory()
dir1.upload_file('filea1', '1024')
dir1.upload_file('filea2', '1024')
dir1.upload_file('filea3', '1024')
dir1.upload_file('fileb1', '1024')
# Act
generator1 = share_name.list_directories_and_files(
'dir1', results_per_page=2).by_page()
result1 = list(next(generator1))
generator2 = share_name.list_directories_and_files(
'dir1', results_per_page=2).by_page(continuation_token=generator1.continuation_token)
result2 = list(next(generator2))
# Assert
self.assertEqual(len(result1), 2)
self.assertEqual(len(result2), 2)
self.assertNamedItemInContainer(result1, 'filea1')
self.assertNamedItemInContainer(result1, 'filea2')
self.assertNamedItemInContainer(result2, 'filea3')
self.assertNamedItemInContainer(result2, 'fileb1')
self.assertEqual(generator2.continuation_token, None)
self._delete_shares()
@FileSharePreparer()
def test_list_directories_and_files_with_prefix(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share = self._create_share()
dir1 = share.create_directory('dir1')
share.create_directory('dir1/pref_dir3')
share.create_directory('dir2')
root = share.get_directory_client()
root.upload_file('file1', '1024')
dir1.upload_file('pref_file2', '1025')
dir1.upload_file('file3', '1025')
# Act
resp = list(share.list_directories_and_files('dir1', name_starts_with='pref'))
# Assert
self.assertIsNotNone(resp)
self.assertEqual(len(resp), 2)
self.assertIsNotNone(resp[0])
self.assertNamedItemInContainer(resp, 'pref_file2')
self.assertNamedItemInContainer(resp, 'pref_dir3')
self._delete_shares()
@FileSharePreparer()
def test_shared_access_share(self, storage_account_name, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
if not self.is_live:
return
self._setup(storage_account_name, storage_account_key)
file_name = 'file1'
dir_name = 'dir1'
data = b'hello world'
share = self._create_share()
dir1 = share.create_directory(dir_name)
dir1.upload_file(file_name, data)
token = generate_share_sas(
share.account_name,
share.share_name,
share.credential.account_key,
expiry=datetime.utcnow() + timedelta(hours=1),
permission=ShareSasPermissions(read=True),
)
sas_client = ShareFileClient(
self.account_url(storage_account_name, "file"),
share_name=share.share_name,
file_path=dir_name + '/' + file_name,
credential=token,
)
# Act
print(sas_client.url)
response = requests.get(sas_client.url)
# Assert
self.assertTrue(response.ok)
self.assertEqual(data, response.content)
self._delete_shares()
@FileSharePreparer()
def test_create_permission_for_share(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
user_given_permission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-" \
"1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;" \
"S-1-5-21-397955417-626881126-188441444-3053964)"
share_client = self._create_share()
permission_key = share_client.create_permission_for_share(user_given_permission)
self.assertIsNotNone(permission_key)
server_returned_permission = share_client.get_permission_for_share(permission_key)
self.assertIsNotNone(server_returned_permission)
permission_key2 = share_client.create_permission_for_share(server_returned_permission)
# the permission key obtained from user_given_permission should be the same as the permission key obtained from
# server returned permission
self.assertEqual(permission_key, permission_key2)
@FileSharePreparer()
def test_transport_closed_only_once(self, storage_account_name, storage_account_key):
if not self.is_live:
return
self._setup(storage_account_name, storage_account_key)
transport = RequestsTransport()
url = self.account_url(storage_account_name, "file")
credential = storage_account_key
prefix = TEST_SHARE_PREFIX
share_name = self.get_resource_name(prefix)
with ShareServiceClient(url, credential=credential, transport=transport) as fsc:
fsc.get_service_properties()
assert transport.session is not None
with fsc.get_share_client(share_name) as fc:
assert transport.session is not None
fsc.get_service_properties()
assert transport.session is not None
@FileSharePreparer()
def test_delete_directory_from_share(self, storage_account_name, storage_account_key):
self._setup(storage_account_name, storage_account_key)
share = self._create_share()
dir1 = share.create_directory('dir1')
share.create_directory('dir2')
share.create_directory('dir3')
# Act
resp = list(share.list_directories_and_files())
self.assertEqual(len(resp), 3)
share.delete_directory('dir3')
# Assert
resp = list(share.list_directories_and_files())
self.assertEqual(len(resp), 2)
self._delete_shares()
# ------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| 37.80625 | 183 | 0.680174 |
b75490c5aeef981a36d32cac87599a542b550b99
| 20 |
py
|
Python
|
tests/__init__.py
|
nicolas-f/noisesensor
|
fc007fe5e03b0deca0863d987cb6776be1cd2bef
|
[
"BSD-3-Clause"
] | 2 |
2020-03-29T21:58:45.000Z
|
2021-09-21T12:43:15.000Z
|
tests/__init__.py
|
nicolas-f/noisesensor
|
fc007fe5e03b0deca0863d987cb6776be1cd2bef
|
[
"BSD-3-Clause"
] | null | null | null |
tests/__init__.py
|
nicolas-f/noisesensor
|
fc007fe5e03b0deca0863d987cb6776be1cd2bef
|
[
"BSD-3-Clause"
] | 1 |
2019-02-19T14:53:01.000Z
|
2019-02-19T14:53:01.000Z
|
import test_noisepy
| 10 | 19 | 0.9 |
defe1dac46625d7e611872fee52147eb750ce91e
| 8,477 |
py
|
Python
|
lib/reportlab/lib/textsplit.py
|
jwheare/digest
|
963a0f46862319aa499d4cbbfdbd6380287fc5a5
|
[
"BSD-3-Clause"
] | 5 |
2016-05-08T23:51:38.000Z
|
2021-05-02T10:09:29.000Z
|
lib/reportlab/lib/textsplit.py
|
jwheare/digest
|
963a0f46862319aa499d4cbbfdbd6380287fc5a5
|
[
"BSD-3-Clause"
] | null | null | null |
lib/reportlab/lib/textsplit.py
|
jwheare/digest
|
963a0f46862319aa499d4cbbfdbd6380287fc5a5
|
[
"BSD-3-Clause"
] | 1 |
2018-01-16T16:03:44.000Z
|
2018-01-16T16:03:44.000Z
|
#Copyright ReportLab Europe Ltd. 2000-2006
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/lib/textsplit.py
"""Helpers for text wrapping, hyphenation, Asian text splitting and kinsoku shori.
How to split a 'big word' depends on the language and the writing system. This module
works on a Unicode string. It ought to grow by allowing ore algoriths to be plugged
in based on possible knowledge of the language and desirable 'niceness' of the algorithm.
"""
__version__=''' $Id: textsplit.py 3239 2008-07-01 13:19:19Z rgbecker $ '''
from types import StringType, UnicodeType
import unicodedata
from reportlab.pdfbase.pdfmetrics import stringWidth
from reportlab.rl_config import _FUZZ
CANNOT_START_LINE = [
#strongly prohibited e.g. end brackets, stop, exclamation...
u'!\',.:;?!")]\u3001\u3002\u300d\u300f\u3011\u3015\uff3d\u3011\uff09',
#middle priority e.g. continuation small vowels - wrapped on two lines but one string...
u'\u3005\u2015\u3041\u3043\u3045\u3047\u3049\u3063\u3083\u3085\u3087\u308e\u30a1\u30a3'
u'\u30a5\u30a7\u30a9\u30c3\u30e3\u30e5\u30e7\u30ee\u30fc\u30f5\u30f6',
#weakly prohibited - continuations, celsius symbol etc.
u'\u309b\u309c\u30fb\u30fd\u30fe\u309d\u309e\u2015\u2010\xb0\u2032\u2033\u2103\uffe0\uff05\u2030'
]
ALL_CANNOT_START = u''.join(CANNOT_START_LINE)
CANNOT_END_LINE = [
#strongly prohibited
u'\u2018\u201c\uff08[{\uff08\u3014\uff3b\uff5b\u3008\u300a\u300c\u300e\u3010',
#weaker - currency symbols, hash, postcode - prefixes
u'$\u00a3@#\uffe5\uff04\uffe1\uff20\u3012\u00a7'
]
ALL_CANNOT_END = u''.join(CANNOT_END_LINE)
def getCharWidths(word, fontName, fontSize):
"""Returns a list of glyph widths. Should be easy to optimize in _rl_accel
>>> getCharWidths('Hello', 'Courier', 10)
[6.0, 6.0, 6.0, 6.0, 6.0]
>>> from reportlab.pdfbase.cidfonts import UnicodeCIDFont
>>> from reportlab.pdfbase.pdfmetrics import registerFont
>>> registerFont(UnicodeCIDFont('HeiseiMin-W3'))
>>> getCharWidths(u'\u6771\u4EAC', 'HeiseiMin-W3', 10) #most kanji are 100 ems
[10.0, 10.0]
"""
#character-level function call; the performance is going to SUCK
return [stringWidth(uChar, fontName, fontSize) for uChar in word]
def wordSplit(word, availWidth, fontName, fontSize, encoding='utf8'):
"""Attempts to break a word which lacks spaces into two parts, the first of which
fits in the remaining space. It is allowed to add hyphens or whatever it wishes.
This is intended as a wrapper for some language- and user-choice-specific splitting
algorithms. It should only be called after line breaking on spaces, which covers western
languages and is highly optimised already. It works on the 'last unsplit word'.
Presumably with further study one could write a Unicode splitting algorithm for text
fragments whick was much faster.
Courier characters should be 6 points wide.
>>> wordSplit('HelloWorld', 30, 'Courier', 10)
[[0.0, 'Hello'], [0.0, 'World']]
>>> wordSplit('HelloWorld', 31, 'Courier', 10)
[[1.0, 'Hello'], [1.0, 'World']]
"""
if type(word) is not UnicodeType:
uword = word.decode(encoding)
else:
uword = word
charWidths = getCharWidths(uword, fontName, fontSize)
lines = dumbSplit(uword, charWidths, availWidth)
if type(word) is not UnicodeType:
lines2 = []
#convert back
for (extraSpace, text) in lines:
lines2.append([extraSpace, text.encode(encoding)])
lines = lines2
return lines
def dumbSplit(word, widths, availWidth):
"""This function attempts to fit as many characters as possible into the available
space, cutting "like a knife" between characters. This would do for Chinese.
It returns a list of (text, extraSpace) items where text is a Unicode string,
and extraSpace is the points of unused space available on the line. This is a
structure which is fairly easy to display, and supports 'backtracking' approaches
after the fact.
Test cases assume each character is ten points wide...
>>> dumbSplit(u'Hello', [10]*5, 60)
[[10.0, u'Hello']]
>>> dumbSplit(u'Hello', [10]*5, 50)
[[0.0, u'Hello']]
>>> dumbSplit(u'Hello', [10]*5, 40)
[[0.0, u'Hell'], [30, u'o']]
"""
_more = """
#>>> dumbSplit(u'Hello', [10]*5, 4) # less than one character
#(u'', u'Hello')
# this says 'Nihongo wa muzukashii desu ne!' (Japanese is difficult isn't it?) in 12 characters
>>> jtext = u'\u65e5\u672c\u8a9e\u306f\u96e3\u3057\u3044\u3067\u3059\u306d\uff01'
>>> dumbSplit(jtext, [10]*11, 30) #
(u'\u65e5\u672c\u8a9e', u'\u306f\u96e3\u3057\u3044\u3067\u3059\u306d\uff01')
"""
assert type(word) is UnicodeType
lines = []
widthUsed = 0.0
lineStartPos = 0
for (i, w) in enumerate(widths):
widthUsed += w
if widthUsed > availWidth + _FUZZ:
#used more than can fit...
#ping out with previous cut, then set up next line with one character
extraSpace = availWidth - widthUsed + w
#print 'ending a line; used %d, available %d' % (widthUsed, availWidth)
selected = word[lineStartPos:i]
#This is the most important of the Japanese typography rules.
#if next character cannot start a line, wrap it up to this line so it hangs
#in the right margin. We won't do two or more though - that's unlikely and
#would result in growing ugliness.
nextChar = word[i]
if nextChar in ALL_CANNOT_START:
#it's punctuation or a closing bracket of some kind. 'wrap up'
#so it stays on the line above, slightly exceeding our target width.
#print 'wrapping up', repr(nextChar)
selected += nextChar
extraSpace -= w
i += 1
lines.append([extraSpace, selected])
lineStartPos = i
widthUsed = w
i -= 1
#any characters left?
if widthUsed > 0:
extraSpace = availWidth - widthUsed
lines.append([extraSpace, word[lineStartPos:]])
return lines
def kinsokuShoriSplit(word, widths, availWidth):
#NOT USED OR FINISHED YET!
"""Split according to Japanese rules according to CJKV (Lunde).
Essentially look for "nice splits" so that we don't end a line
with an open bracket, or start one with a full stop, or stuff like
that. There is no attempt to try to split compound words into
constituent kanji. It currently uses wrap-down: packs as much
on a line as possible, then backtracks if needed
This returns a number of words each of which should just about fit
on a line. If you give it a whole paragraph at once, it will
do all the splits.
It's possible we might slightly step over the width limit
if we do hanging punctuation marks in future (e.g. dangle a Japanese
full stop in the right margin rather than using a whole character
box.
"""
lines = []
assert len(word) == len(widths)
curWidth = 0.0
curLine = []
i = 0 #character index - we backtrack at times so cannot use for loop
while 1:
ch = word[i]
w = widths[i]
if curWidth + w < availWidth:
curLine.append(ch)
curWidth += w
else:
#end of line. check legality
if ch in CANNOT_END_LINE[0]:
pass
#to be completed
# This recipe refers:
#
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/148061
import re
rx=re.compile(u"([\u2e80-\uffff])", re.UNICODE)
def cjkwrap(text, width, encoding="utf8"):
return reduce(lambda line, word, width=width: '%s%s%s' %
(line,
[' ','\n', ''][(len(line)-line.rfind('\n')-1
+ len(word.split('\n',1)[0] ) >= width) or
line[-1:] == '\0' and 2],
word),
rx.sub(r'\1\0 ', unicode(text,encoding)).split(' ')
).replace('\0', '').encode(encoding)
if __name__=='__main__':
import doctest, textsplit
doctest.testmod(textsplit)
| 41.553922 | 106 | 0.635012 |
87db260441ed7e615f0c65185e195febb06676b2
| 22,929 |
py
|
Python
|
release/nightly_tests/dataset/ray_sgd_training.py
|
iamhatesz/ray
|
ce78d6b9495bd71cb16117032793ae9f133efb68
|
[
"Apache-2.0"
] | 1 |
2021-07-12T11:16:14.000Z
|
2021-07-12T11:16:14.000Z
|
release/nightly_tests/dataset/ray_sgd_training.py
|
RomaKoks/ray
|
4ef0d4a37a42c529af98b0cfb31e505b51088395
|
[
"Apache-2.0"
] | 115 |
2021-01-19T04:40:50.000Z
|
2022-03-26T07:09:00.000Z
|
release/nightly_tests/dataset/ray_sgd_training.py
|
RomaKoks/ray
|
4ef0d4a37a42c529af98b0cfb31e505b51088395
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import collections
import json
import os
import sys
import time
from typing import Tuple
import boto3
import mlflow
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
from torch.nn.parallel import DistributedDataParallel
import ray
from ray import train
from ray.data.aggregate import Mean, Std
from ray.data.dataset_pipeline import DatasetPipeline
from ray.train import Trainer, TrainingCallback
from ray.train.callbacks import TBXLoggerCallback
# TODO(amogkam): Upstream this into Ray Train.
class MLflowCallback(TrainingCallback):
def __init__(self, config):
self.config = config
def handle_result(self, results, **info):
# For each result that's being reported by ``train.report()``,
# we get the result from the rank 0 worker (i.e. first worker) and
# report it to MLflow.
rank_zero_results = results[0]
mlflow.log_metrics(rank_zero_results)
# TODO: fix type hint for logdir
def start_training(self, logdir, **info):
mlflow.start_run(run_name=str(logdir.name))
mlflow.log_params(config)
# TODO: Update TrainCallback to provide logdir in finish_training.
self.logdir = logdir
def finish_training(self, error: bool = False, **info):
# Save the Trainer checkpoints as artifacts to mlflow.
mlflow.log_artifacts(self.logdir)
def read_dataset(path: str) -> ray.data.Dataset:
print(f"reading data from {path}")
return ray.data.read_parquet(path, _spread_resource_prefix="node:") \
.random_shuffle(_spread_resource_prefix="node:")
class DataPreprocessor:
"""A Datasets-based preprocessor that fits scalers/encoders to the training
dataset and transforms the training, testing, and inference datasets using
those fitted scalers/encoders.
"""
def __init__(self):
# List of present fruits, used for one-hot encoding of fruit column.
self.fruits = None
# Mean and stddev stats used for standard scaling of the feature
# columns.
self.standard_stats = None
def preprocess_train_data(self, ds: ray.data.Dataset
) -> Tuple[ray.data.Dataset, ray.data.Dataset]:
print("\n\nPreprocessing training dataset.\n")
return self._preprocess(ds, False)
def preprocess_inference_data(self,
df: ray.data.Dataset) -> ray.data.Dataset:
print("\n\nPreprocessing inference dataset.\n")
return self._preprocess(df, True)[0]
def _preprocess(self, ds: ray.data.Dataset, inferencing: bool
) -> Tuple[ray.data.Dataset, ray.data.Dataset]:
print(
"\nStep 1: Dropping nulls, creating new_col, updating feature_1\n")
def batch_transformer(df: pd.DataFrame):
# Disable chained assignment warning.
pd.options.mode.chained_assignment = None
# Drop nulls.
df = df.dropna(subset=["nullable_feature"])
# Add new column.
df["new_col"] = (
df["feature_1"] - 2 * df["feature_2"] + df["feature_3"]) / 3.
# Transform column.
df["feature_1"] = 2. * df["feature_1"] + 0.1
return df
ds = ds.map_batches(batch_transformer, batch_format="pandas")
print("\nStep 2: Precalculating fruit-grouped mean for new column and "
"for one-hot encoding (latter only uses fruit groups)\n")
agg_ds = ds.groupby("fruit").mean("feature_1")
fruit_means = {
r["fruit"]: r["mean(feature_1)"]
for r in agg_ds.take_all()
}
print("\nStep 3: create mean_by_fruit as mean of feature_1 groupby "
"fruit; one-hot encode fruit column\n")
if inferencing:
assert self.fruits is not None
else:
assert self.fruits is None
self.fruits = list(fruit_means.keys())
fruit_one_hots = {
fruit: collections.defaultdict(int, fruit=1)
for fruit in self.fruits
}
def batch_transformer(df: pd.DataFrame):
# Add column containing the feature_1-mean of the fruit groups.
df["mean_by_fruit"] = df["fruit"].map(fruit_means)
# One-hot encode the fruit column.
for fruit, one_hot in fruit_one_hots.items():
df[f"fruit_{fruit}"] = df["fruit"].map(one_hot)
# Drop the fruit column, which is no longer needed.
df.drop(columns="fruit", inplace=True)
return df
ds = ds.map_batches(batch_transformer, batch_format="pandas")
if inferencing:
print("\nStep 4: Standardize inference dataset\n")
assert self.standard_stats is not None
else:
assert self.standard_stats is None
print("\nStep 4a: Split training dataset into train-test split\n")
# Split into train/test datasets.
split_index = int(0.9 * ds.count())
# Split into 90% training set, 10% test set.
train_ds, test_ds = ds.split_at_indices([split_index])
print("\nStep 4b: Precalculate training dataset stats for "
"standard scaling\n")
# Calculate stats needed for standard scaling feature columns.
feature_columns = [
col for col in train_ds.schema().names if col != "label"
]
standard_aggs = [
agg(on=col) for col in feature_columns for agg in (Mean, Std)
]
self.standard_stats = train_ds.aggregate(*standard_aggs)
print("\nStep 4c: Standardize training dataset\n")
# Standard scaling of feature columns.
standard_stats = self.standard_stats
def batch_standard_scaler(df: pd.DataFrame):
def column_standard_scaler(s: pd.Series):
if s.name == "label":
# Don't scale the label column.
return s
s_mean = standard_stats[f"mean({s.name})"]
s_std = standard_stats[f"std({s.name})"]
return (s - s_mean) / s_std
return df.transform(column_standard_scaler)
if inferencing:
# Apply standard scaling to inference dataset.
inference_ds = ds.map_batches(
batch_standard_scaler, batch_format="pandas")
return inference_ds, None
else:
# Apply standard scaling to both training dataset and test dataset.
train_ds = train_ds.map_batches(
batch_standard_scaler, batch_format="pandas")
test_ds = test_ds.map_batches(
batch_standard_scaler, batch_format="pandas")
return train_ds, test_ds
def inference(dataset, model_cls: type, batch_size: int, result_path: str,
use_gpu: bool):
print("inferencing...")
num_gpus = 1 if use_gpu else 0
dataset \
.map_batches(
model_cls,
compute="actors",
batch_size=batch_size,
num_gpus=num_gpus,
num_cpus=0) \
.write_parquet(result_path)
"""
TODO: Define neural network code in pytorch
P0:
1. can take arguments to change size of net arbitrarily so we can stress test
against distributed training on cluster
2. has a network (nn.module?), optimizer, and loss function for binary
classification
3. has some semblence of regularization (ie: via dropout) so that this
artificially gigantic net doesn't just overfit horrendously
4. works well with pytorch dataset we'll create from Ray data
.to_torch_dataset()
P1:
1. also tracks AUC for training, testing sets and records to tensorboard to
"""
class Net(nn.Module):
def __init__(self, n_layers, n_features, num_hidden, dropout_every,
drop_prob):
super().__init__()
self.n_layers = n_layers
self.dropout_every = dropout_every
self.drop_prob = drop_prob
self.fc_input = nn.Linear(n_features, num_hidden)
self.relu_input = nn.ReLU()
for i in range(self.n_layers):
layer = nn.Linear(num_hidden, num_hidden)
relu = nn.ReLU()
dropout = nn.Dropout(p=self.drop_prob)
setattr(self, f"fc_{i}", layer)
setattr(self, f"relu_{i}", relu)
if i % self.dropout_every == 0:
# only apply every few layers
setattr(self, f"drop_{i}", dropout)
self.add_module(f"drop_{i}", dropout)
self.add_module(f"fc_{i}", layer)
self.add_module(f"relu_{i}", relu)
self.fc_output = nn.Linear(num_hidden, 1)
def forward(self, x):
x = self.fc_input(x)
x = self.relu_input(x)
for i in range(self.n_layers):
x = getattr(self, f"fc_{i}")(x)
x = getattr(self, f"relu_{i}")(x)
if i % self.dropout_every == 0:
x = getattr(self, f"drop_{i}")(x)
x = self.fc_output(x)
return x
def train_epoch(dataset, model, device, criterion, optimizer):
num_correct = 0
num_total = 0
running_loss = 0.0
for i, (inputs, labels) in enumerate(dataset):
inputs = inputs.to(device)
labels = labels.to(device)
# Zero the parameter gradients
optimizer.zero_grad()
# Forward + backward + optimize
outputs = model(inputs.float())
loss = criterion(outputs, labels.float())
loss.backward()
optimizer.step()
# how are we doing?
predictions = (torch.sigmoid(outputs) > 0.5).int()
num_correct += (predictions == labels).sum().item()
num_total += len(outputs)
# Save loss to plot
running_loss += loss.item()
if i % 100 == 0:
print(f"training batch [{i}] loss: {loss.item()}")
return (running_loss, num_correct, num_total)
def test_epoch(dataset, model, device, criterion):
num_correct = 0
num_total = 0
running_loss = 0.0
with torch.no_grad():
for i, (inputs, labels) in enumerate(dataset):
inputs = inputs.to(device)
labels = labels.to(device)
# Forward + backward + optimize
outputs = model(inputs.float())
loss = criterion(outputs, labels.float())
# how are we doing?
predictions = (torch.sigmoid(outputs) > 0.5).int()
num_correct += (predictions == labels).sum().item()
num_total += len(outputs)
# Save loss to plot
running_loss += loss.item()
if i % 100 == 0:
print(f"testing batch [{i}] loss: {loss.item()}")
return (running_loss, num_correct, num_total)
def train_func(config):
use_gpu = config["use_gpu"]
num_epochs = config["num_epochs"]
batch_size = config["batch_size"]
num_layers = config["num_layers"]
num_hidden = config["num_hidden"]
dropout_every = config["dropout_every"]
dropout_prob = config["dropout_prob"]
num_features = config["num_features"]
print("Defining model, loss, and optimizer...")
# Setup device.
device = torch.device(f"cuda:{train.local_rank()}"
if use_gpu and torch.cuda.is_available() else "cpu")
print(f"Device: {device}")
# Setup data.
train_dataset_pipeline = train.get_dataset_shard("train_dataset")
train_dataset_epoch_iterator = train_dataset_pipeline.iter_epochs()
test_dataset = train.get_dataset_shard("test_dataset")
test_torch_dataset = test_dataset.to_torch(
label_column="label", batch_size=batch_size)
net = Net(
n_layers=num_layers,
n_features=num_features,
num_hidden=num_hidden,
dropout_every=dropout_every,
drop_prob=dropout_prob,
).to(device)
print(net.parameters)
net = train.torch.prepare_model(net)
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(net.parameters(), weight_decay=0.0001)
print("Starting training...")
for epoch in range(num_epochs):
train_dataset = next(train_dataset_epoch_iterator)
train_torch_dataset = train_dataset.to_torch(
label_column="label", batch_size=batch_size)
train_running_loss, train_num_correct, train_num_total = train_epoch(
train_torch_dataset, net, device, criterion, optimizer)
train_acc = train_num_correct / train_num_total
print(f"epoch [{epoch + 1}]: training accuracy: "
f"{train_num_correct} / {train_num_total} = {train_acc:.4f}")
test_running_loss, test_num_correct, test_num_total = test_epoch(
test_torch_dataset, net, device, criterion)
test_acc = test_num_correct / test_num_total
print(f"epoch [{epoch + 1}]: testing accuracy: "
f"{test_num_correct} / {test_num_total} = {test_acc:.4f}")
# Record and log stats.
train.report(
train_acc=train_acc,
train_loss=train_running_loss,
test_acc=test_acc,
test_loss=test_running_loss)
# Checkpoint model.
module = (net.module
if isinstance(net, DistributedDataParallel) else net)
train.save_checkpoint(model_state_dict=module.state_dict())
if train.world_rank() == 0:
return module.cpu()
@ray.remote
class TrainingWorker:
def __init__(self, rank: int, shard: DatasetPipeline, batch_size: int):
self.rank = rank
self.shard = shard
self.batch_size = batch_size
def train(self):
for epoch, training_dataset in enumerate(self.shard.iter_datasets()):
# Following code emulates epoch based SGD training.
print(f"Training... worker: {self.rank}, epoch: {epoch}")
for i, _ in enumerate(
training_dataset.to_torch(
batch_size=self.batch_size, label_column="label")):
if i % 10000 == 0:
print(f"epoch: {epoch}, worker: {self.rank}, batch: {i}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--use-s3",
action="store_true",
default=False,
help="Use data from s3 for testing.")
parser.add_argument(
"--smoke-test",
action="store_true",
default=False,
help="Finish quickly for testing.")
parser.add_argument(
"--address",
required=False,
type=str,
help=("The address to use for Ray. `auto` if running through "
"`ray submit`"))
parser.add_argument(
"--num-workers",
default=1,
type=int,
help="The number of Ray workers to use for distributed training")
parser.add_argument(
"--large-dataset",
action="store_true",
default=False,
help="Use 500GB dataset")
parser.add_argument(
"--use-gpu",
action="store_true",
default=False,
help="Use GPU for training.")
parser.add_argument(
"--mlflow-register-model",
action="store_true",
help="Whether to use mlflow model registry. If set, a local MLflow "
"tracking server is expected to have already been started.")
parser.add_argument(
"--debug",
action="store_true",
default=False,
help="Use dummy trainer to debug dataset performance")
args = parser.parse_args()
smoke_test = args.smoke_test
address = args.address
num_workers = args.num_workers
use_gpu = args.use_gpu
use_s3 = args.use_s3
large_dataset = args.large_dataset
if large_dataset:
assert use_s3, "--large-dataset requires --use-s3 to be set."
start_time = time.time()
ray.init(address=address)
# Setup MLflow.
# By default, all metrics & artifacts for each run will be saved to disk
# in ./mlruns directory. Uncomment the below lines if you want to change
# the URI for the tracking uri.
# TODO: Use S3 backed tracking server for golden notebook.
if args.mlflow_register_model:
# MLflow model registry does not work with a local file system backend.
# Have to start a mlflow tracking server on localhost
mlflow.set_tracking_uri("http://127.0.0.1:5000")
# Set the experiment. This will create the experiment if not already
# exists.
mlflow.set_experiment("cuj-big-data-training")
if use_s3:
# Check if s3 data is populated.
BUCKET_NAME = "cuj-big-data"
FOLDER_NAME = "big-data/" if large_dataset else "data/"
s3_resource = boto3.resource("s3")
bucket = s3_resource.Bucket(BUCKET_NAME)
count = bucket.objects.filter(Prefix=FOLDER_NAME)
if len(list(count)) == 0:
print("please run `python make_and_upload_dataset.py` first")
sys.exit(1)
# 156 files, 3_120_000_000 rows and 501_748_803_387 bytes
data_path = ("s3://cuj-big-data/big-data/"
if large_dataset else "s3://cuj-big-data/data/")
inference_path = "s3://cuj-big-data/inference/"
inference_output_path = "s3://cuj-big-data/output/"
else:
dir_path = os.path.dirname(os.path.realpath(__file__))
data_path = os.path.join(dir_path, "data")
inference_path = os.path.join(dir_path, "inference")
inference_output_path = "/tmp"
if len(os.listdir(data_path)) <= 1 or len(
os.listdir(inference_path)) <= 1:
print("please run `python make_and_upload_dataset.py` first")
sys.exit(1)
if smoke_test:
# Only read a single file.
data_path = os.path.join(data_path, "data_00000.parquet.snappy")
inference_path = os.path.join(inference_path,
"data_00000.parquet.snappy")
preprocessor = DataPreprocessor()
train_dataset, test_dataset = preprocessor.preprocess_train_data(
read_dataset(data_path))
num_columns = len(train_dataset.schema().names)
# remove label column and internal Arrow column.
num_features = num_columns - 2
NUM_EPOCHS = 2
BATCH_SIZE = 512
NUM_HIDDEN = 50 # 200
NUM_LAYERS = 3 # 15
DROPOUT_EVERY = 5
DROPOUT_PROB = 0.2
if args.debug:
shards = train_dataset.repeat(NUM_EPOCHS) \
.random_shuffle_each_window(_spread_resource_prefix="node:") \
.split(num_workers)
del train_dataset
num_gpus = 1 if use_gpu else 0
training_workers = [
TrainingWorker.options(num_gpus=num_gpus, num_cpus=0).remote(
rank, shard, BATCH_SIZE) for rank, shard in enumerate(shards)
]
ray.get([worker.train.remote() for worker in training_workers])
total_time = time.time() - start_time
print(f"Job finished in {total_time} seconds.")
with open(os.environ["TEST_OUTPUT_JSON"], "w") as f:
f.write(json.dumps({"time": total_time, "success": 1}))
exit()
# Random global shuffle
train_dataset_pipeline = train_dataset.repeat() \
.random_shuffle_each_window(_spread_resource_prefix="node:")
del train_dataset
datasets = {
"train_dataset": train_dataset_pipeline,
"test_dataset": test_dataset
}
config = {
"use_gpu": use_gpu,
"num_epochs": NUM_EPOCHS,
"batch_size": BATCH_SIZE,
"num_hidden": NUM_HIDDEN,
"num_layers": NUM_LAYERS,
"dropout_every": DROPOUT_EVERY,
"dropout_prob": DROPOUT_PROB,
"num_features": num_features
}
# Create 2 callbacks: one for Tensorboard Logging and one for MLflow
# logging. Pass these into Trainer, and all results that are
# reported by ``train.report()`` will be logged to these 2 places.
# TODO: TBXLoggerCallback should create nonexistent logdir
# and should also create 1 directory per file.
callbacks = [TBXLoggerCallback(logdir="/tmp"), MLflowCallback(config)]
# Remove CPU resource so Datasets can be scheduled.
resources_per_worker = {"CPU": 0, "GPU": 1} if use_gpu else None
trainer = Trainer(
backend="torch",
num_workers=num_workers,
use_gpu=use_gpu,
resources_per_worker=resources_per_worker)
trainer.start()
results = trainer.run(
train_func=train_func,
config=config,
callbacks=callbacks,
dataset=datasets)
model = results[0]
trainer.shutdown()
if args.mlflow_register_model:
mlflow.pytorch.log_model(
model, artifact_path="models", registered_model_name="torch_model")
# Get the latest model from mlflow model registry.
client = mlflow.tracking.MlflowClient()
registered_model_name = "torch_model"
# Get the info for the latest model.
# By default, registered models are in stage "None".
latest_model_info = client.get_latest_versions(
registered_model_name, stages=["None"])[0]
latest_version = latest_model_info.version
def load_model_func():
model_uri = f"models:/torch_model/{latest_version}"
return mlflow.pytorch.load_model(model_uri)
else:
state_dict = model.state_dict()
def load_model_func():
num_layers = config["num_layers"]
num_hidden = config["num_hidden"]
dropout_every = config["dropout_every"]
dropout_prob = config["dropout_prob"]
num_features = config["num_features"]
model = Net(
n_layers=num_layers,
n_features=num_features,
num_hidden=num_hidden,
dropout_every=dropout_every,
drop_prob=dropout_prob)
model.load_state_dict(state_dict)
return model
class BatchInferModel:
def __init__(self, load_model_func):
self.device = torch.device("cuda:0"
if torch.cuda.is_available() else "cpu")
self.model = load_model_func().to(self.device)
def __call__(self, batch) -> "pd.DataFrame":
tensor = torch.FloatTensor(batch.to_pandas().values).to(
self.device)
return pd.DataFrame(self.model(tensor).cpu().detach().numpy())
inference_dataset = preprocessor.preprocess_inference_data(
read_dataset(inference_path))
inference(inference_dataset, BatchInferModel(load_model_func), 100,
inference_output_path, use_gpu)
end_time = time.time()
total_time = end_time - start_time
print(f"Job finished in {total_time} seconds.")
with open(os.environ["TEST_OUTPUT_JSON"], "w") as f:
f.write(json.dumps({"time": total_time, "success": 1}))
| 34.846505 | 79 | 0.620001 |
3e1e7f1c296ecbff3c161b23a9857f58430d6dcb
| 627 |
py
|
Python
|
tests/test_extract_title.py
|
ssklyg36/mdut
|
98874be1ea422e23fbb61e46c205718afd026cbf
|
[
"MIT"
] | 1 |
2022-01-09T18:06:09.000Z
|
2022-01-09T18:06:09.000Z
|
tests/test_extract_title.py
|
ssklyg36/mdut
|
98874be1ea422e23fbb61e46c205718afd026cbf
|
[
"MIT"
] | null | null | null |
tests/test_extract_title.py
|
ssklyg36/mdut
|
98874be1ea422e23fbb61e46c205718afd026cbf
|
[
"MIT"
] | null | null | null |
import pytest
from mdut.mdut import extract_title
@pytest.mark.parametrize(
"html,title",
[
("<html><head><title>foo</title></head></html>", "foo"),
("<html><head><title>bar</title>", "bar"),
("<html><head><title>baz</head></html>", "baz"),
],
)
def test_valid(html, title):
assert extract_title(html) == title
@pytest.mark.parametrize(
"html",
[
"<html><head></head></html>",
"<html><body><title>nope</body></html>",
"foo",
"",
None,
13,
int,
],
)
def test_invalid(html):
assert extract_title(html) == "TODO"
| 19.59375 | 64 | 0.526316 |
119b935c0ca101aacb6558c8f0f847d0a3ae1b5a
| 942 |
py
|
Python
|
estrategias/e_Edu.py
|
lucasmoschen/jogos_vorazes
|
a483d901a32e95acb9fa17fca0f72c743122992c
|
[
"MIT"
] | 1 |
2021-01-29T05:20:25.000Z
|
2021-01-29T05:20:25.000Z
|
estrategias/e_Edu.py
|
lucasmoschen/jogos_vorazes
|
a483d901a32e95acb9fa17fca0f72c743122992c
|
[
"MIT"
] | null | null | null |
estrategias/e_Edu.py
|
lucasmoschen/jogos_vorazes
|
a483d901a32e95acb9fa17fca0f72c743122992c
|
[
"MIT"
] | 19 |
2016-01-15T17:24:45.000Z
|
2021-01-28T18:12:50.000Z
|
__author__ = 'Bruno Almeida'
from random import choice
from estrategias.jogadores import Jogador
class MeuJogador(Jogador):
def __init__(self):
self.rodada_par = False
def escolha_de_cacada(self, rodada, comida_atual, reputacao_atual, m, reputacoes_dos_jogadores):
if rodada == 1:
escolhas = [ 'c' for x in reputacoes_dos_jogadores]
self.rodada_par = True
elif len(reputacoes_dos_jogadores)<3 or rodada >=350:
escolhas = ['c' if x==reputacao_atual else 'd' for x in reputacoes_dos_jogadores]
else:
if self.rodada_par:
escolhas = [ 'd' for x in reputacoes_dos_jogadores]
self.rodada_par = False
else:
escolhas = [ 'c' for x in reputacoes_dos_jogadores]
self.rodada_par = True
return escolhas
| 28.545455 | 100 | 0.576433 |
83e134606e3f06152ba22c612838add25c2dc337
| 3,246 |
py
|
Python
|
tests/integration/helpers/http_server.py
|
torilov/ClickHouse
|
bbbe51033dfd5b8c3d54e168475ca707ac7ec0b4
|
[
"Apache-2.0"
] | 1 |
2020-10-01T01:54:37.000Z
|
2020-10-01T01:54:37.000Z
|
tests/integration/helpers/http_server.py
|
torilov/ClickHouse
|
bbbe51033dfd5b8c3d54e168475ca707ac7ec0b4
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/helpers/http_server.py
|
torilov/ClickHouse
|
bbbe51033dfd5b8c3d54e168475ca707ac7ec0b4
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import argparse
import csv
import socket
import ssl
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
# Decorator used to see if authentication works for external dictionary who use a HTTP source.
def check_auth(fn):
def wrapper(req):
auth_header = req.headers.get('authorization', None)
api_key = req.headers.get('api-key', None)
if not auth_header or auth_header != 'Basic Zm9vOmJhcg==' or not api_key or api_key != 'secret':
req.send_response(401)
else:
fn(req)
return wrapper
def start_server(server_address, data_path, schema, cert_path, address_family):
class TSVHTTPHandler(BaseHTTPRequestHandler):
@check_auth
def do_GET(self):
self.__send_headers()
self.__send_data()
@check_auth
def do_POST(self):
ids = self.__read_and_decode_post_ids()
print "ids=", ids
self.__send_headers()
self.__send_data(ids)
def __send_headers(self):
self.send_response(200)
self.send_header('Content-type', 'text/tsv')
self.end_headers()
def __send_data(self, only_ids=None):
with open(data_path, 'r') as fl:
reader = csv.reader(fl, delimiter='\t')
for row in reader:
if not only_ids or (row[0] in only_ids):
self.wfile.write('\t'.join(row) + '\n')
def __read_and_decode_post_ids(self):
data = self.__read_and_decode_post_data()
return filter(None, data.split())
def __read_and_decode_post_data(self):
transfer_encoding = self.headers.get("Transfer-encoding")
decoded = "";
if transfer_encoding == "chunked":
while True:
s = self.rfile.readline()
chunk_length = int(s, 16)
if not chunk_length:
break
decoded += self.rfile.read(chunk_length)
self.rfile.readline()
else:
content_length = int(self.headers.get("Content-Length", 0))
decoded = self.rfile.read(content_length)
return decoded
if address_family == "ipv6":
HTTPServer.address_family = socket.AF_INET6
httpd = HTTPServer(server_address, TSVHTTPHandler)
if schema == "https":
httpd.socket = ssl.wrap_socket(httpd.socket, certfile=cert_path, server_side=True)
httpd.serve_forever()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Simple HTTP server returns data from file")
parser.add_argument("--host", default="localhost")
parser.add_argument("--port", default=5555, type=int)
parser.add_argument("--data-path", required=True)
parser.add_argument("--schema", choices=("http", "https"), required=True)
parser.add_argument("--cert-path", default="./fake_cert.pem")
parser.add_argument('--address-family', choices=("ipv4", "ipv6"), default="ipv4")
args = parser.parse_args()
start_server((args.host, args.port), args.data_path, args.schema, args.cert_path, args.address_family)
| 36.886364 | 106 | 0.610906 |
e547c2df21e84b0a7ba4a6de2399e683209a4b03
| 5,229 |
py
|
Python
|
doc/source/code/tut7.py
|
andaole/netpyne
|
9ff475ff722fa8901d39008fab89d020e357b9ef
|
[
"MIT"
] | null | null | null |
doc/source/code/tut7.py
|
andaole/netpyne
|
9ff475ff722fa8901d39008fab89d020e357b9ef
|
[
"MIT"
] | null | null | null |
doc/source/code/tut7.py
|
andaole/netpyne
|
9ff475ff722fa8901d39008fab89d020e357b9ef
|
[
"MIT"
] | null | null | null |
"""
params.py
netParams is a dict containing a set of network parameters using a standardized structure
simConfig is a dict containing a set of simulation configurations using a standardized structure
Contributors: [email protected]
"""
from netpyne import specs
###############################################################################
# NETWORK PARAMETERS
###############################################################################
netParams = specs.NetParams() # object of class NetParams to store the network parameters
# Population parameters
netParams.popParams['hop'] = {'cellType': 'PYR', 'cellModel': 'HH', 'numCells': 50} # add dict with params for this pop
#netParams.popParams['background'] = {'cellModel': 'NetStim', 'rate': 50, 'noise': 0.5} # background inputs
# Cell parameters
## PYR cell properties
cellRule = {'conds': {'cellType': 'PYR'}, 'secs': {}}
cellRule['secs']['soma'] = {'geom': {}, 'topol': {}, 'mechs': {}} # soma properties
cellRule['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8}
cellRule['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl': 0.003, 'el': -70}
netParams.cellParams['PYR'] = cellRule # add dict to list of cell properties
# Synaptic mechanism parameters
netParams.synMechParams['exc'] = {'mod': 'Exp2Syn', 'tau1': 0.1, 'tau2': 1.0, 'e': 0}
netParams.synMechParams['inh'] = {'mod': 'Exp2Syn', 'tau1': 0.1, 'tau2': 1.0, 'e': -80}
# Stimulation parameters
netParams.stimSourceParams['bkg'] = {'type': 'NetStim', 'rate': 50, 'noise': 0.5}
netParams.stimTargetParams['bkg->all'] = {'source': 'bkg', 'conds': {'pop': 'hop'}, 'weight': 0.1, 'delay': 1, 'synMech': 'exc'}
# Connectivity parameters
netParams.connParams['hop->hop'] = {
'preConds': {'pop': 'hop'}, 'postConds': {'pop': 'hop'},
'weight': 0.0, # weight of each connection
'synMech': 'inh', # target inh synapse
'delay': 5} # delay
###############################################################################
# SIMULATION PARAMETERS
###############################################################################
simConfig = specs.SimConfig() # object of class SimConfig to store simulation configuration
# Simulation options
simConfig.duration = 0.5*1e3 # Duration of the simulation, in ms
simConfig.dt = 0.025 # Internal integration timestep to use
simConfig.verbose = False # Show detailed messages
simConfig.recordTraces = {'V_soma':{'sec':'soma','loc':0.5,'var':'v'}} # Dict with traces to record
simConfig.recordStep = 1 # Step size in ms to save data (eg. V traces, LFP, etc)
simConfig.filename = 'model_output' # Set file output name
simConfig.savePickle = False # Save params, network and sim output to pickle file
simConfig.analysis['plotRaster'] = {'syncLines': True} # Plot a raster
simConfig.analysis['plotTraces'] = {'include': [1]} # Plot recorded traces for this list of cells
simConfig.analysis['plot2Dnet'] = True # plot 2D visualization of cell positions and connections
###############################################################################
# EXECUTION CODE (via netpyne)
###############################################################################
from netpyne import sim
# Create network and run simulation
sim.initialize( # create network object and set cfg and net params
simConfig = simConfig, # pass simulation config and network params as arguments
netParams = netParams)
sim.net.createPops() # instantiate network populations
sim.net.createCells() # instantiate network cells based on defined populations
sim.net.connectCells() # create connections between cells based on params
sim.net.addStims() # add stimulation
sim.setupRecording() # setup variables to record for each cell (spikes, V traces, etc)
sim.runSim() # run parallel Neuron simulation
sim.gatherData() # gather spiking data and cell info from each node
sim.saveData() # save params, cell info and sim output to file (pickle,mat,txt,etc)
sim.analysis.plotData() # plot spike raster
###############################################################################
# INTERACTING WITH INSTANTIATED NETWORK
###############################################################################
# modify conn weights
sim.net.modifyConns({'conds': {'label': 'hop->hop'}, 'weight': 0.5})
sim.runSim() # run parallel Neuron simulation
sim.gatherData() # gather spiking data and cell info from each node
sim.saveData() # save params, cell info and sim output to file (pickle,mat,txt,etc)
sim.analysis.plotData() # plot spike raster
# modify cells geometry
sim.net.modifyCells({'conds': {'pop': 'hop'},
'secs': {'soma': {'geom': {'L': 160}}}})
sim.simulate()
from netpyne import __gui__
if __gui__:
sim.analysis.plotRaster(syncLines=True)
sim.analysis.plotTraces(include = [1])
| 44.313559 | 128 | 0.560337 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.