blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
535268bab86addff45ad4349f708df0310b4627d | d5c1f39f619e49be02238bbd9c327103ee0c6199 | /vai/plugins/commands/Time/Time.py | 4ed8dc22387aae6b3ff5563e24423513b64af622 | [] | no_license | CRY-D/vai | 520d35ef1b32de2e4058f64a73cd1b9da9b2e313 | 7e6981690209e8ccd9a6e6f64d2f2a6c7426ef3f | refs/heads/master | 2023-07-06T18:24:43.884669 | 2021-07-20T09:40:48 | 2021-07-20T09:40:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | from vai import sdk
import time
class TimePlugin(sdk.CommandPlugin):
def name(self):
"""
To be reimplement in the plugin
"""
return "Time"
def keyword(self):
return "time"
def execute(self, command_line):
sdk.statusBar().setMessage(time.asctime(), 3000)
| [
"[email protected]"
] | |
3933ec1fd97d722f5292bc31f1a94483cfcf3809 | 1fdc846f4e5b7bda56e8740b859c8340d9b5141a | /sfepy/fem/region.py | 4984860518fb2a1df5196debf2fd39474ed6f272 | [
"BSD-3-Clause"
] | permissive | olivierverdier/sfepy | b824fdab7d91e137a371c277901fbb807b316b02 | 83aefb7b33ea17f4acb83388ba8bc7314c77616c | refs/heads/master | 2021-01-18T05:39:13.127137 | 2010-10-25T13:13:18 | 2010-10-25T17:31:37 | 1,022,869 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,130 | py | from sfepy.base.base import *
_depends = re.compile( 'r\.([a-zA-Z_0-9]+)' ).findall
def get_parents(selector):
"""Given a region selector, return names of regions it is based on."""
parents = _depends(selector)
return parents
def get_dependency_graph(region_defs):
"""Return a dependency graph and a name-sort name mapping for given
region definitions."""
graph = {}
name_to_sort_name = {}
for sort_name, rdef in region_defs.iteritems():
name, sel = rdef.name, rdef.select
## print sort_name, name, sel
if name_to_sort_name.has_key( name ):
msg = 'region %s/%s already defined!' % (sort_name, name)
raise ValueError(msg)
name_to_sort_name[name] = sort_name
if not graph.has_key( name ):
graph[name] = [0]
for parent in get_parents(sel):
graph[name].append(parent)
## print graph
return graph, name_to_sort_name
##
# 15.06.2006, c
# 17.07.2006
# 04.09.2006
def sort_by_dependency( graph ):
out = []
n_nod = len( graph )
idone = 0
idone0 = -1
while idone < n_nod:
dep_removed = 0
for node, deps in graph.iteritems():
# print '--', node, deps
if (len( deps ) == 1) and not deps[0]:
out.append( node )
deps[0] = 1
idone += 1
elif not deps[0]:
# print '--->', deps
for ii, dep in enumerate( deps[1:] ):
if graph[dep][0]:
ir = deps.index( dep )
deps.pop( ir )
dep_removed += 1
# print '---<', deps
## print graph
## print out
## print idone, idone0, n_nod, dep_removed
## pause()
if (idone <= idone0) and not dep_removed:
raise ValueError, 'circular dependency'
idone0 = idone
return out
##
# 15.06.2006, c
def _join( def1, op, def2 ):
return '(' + def1 + ' ' + op + ' ' + def2 + ')'
##
# 31.10.2005, c
class Region( Struct ):
##
# 14.06.2006, c
# 15.06.2006
# 23.02.2007
def __init__( self, name, definition, domain, parse_def ):
"""conns, vertex_groups are links to domain data"""
Struct.__init__(self,
name = name, definition = definition,
n_v_max = domain.shape.n_nod, domain = domain,
parse_def = parse_def, all_vertices = None,
igs = [], vertices = {}, edges = {}, faces = {},
cells = {}, fis = {},
can_cells = True, must_update = True,
is_complete = False,
mirror_region = None, ig_map = None,
ig_map_i = None)
##
# 15.06.2006, c
def light_copy( self, name, parse_def ):
return Region( name, self.definition, self.domain, parse_def )
##
# c: 15.06.2006, r: 04.02.2008
def update_groups( self, force = False ):
"""Vertices common to several groups are listed only in all of them -
fa, ed.unique_indx contain no edge/face duplicates already."""
if self.must_update or force:
self.igs = []
self.vertices = {}
self.cells = {}
for group in self.domain.iter_groups():
ig = group.ig
vv = nm.intersect1d( group.vertices, self.all_vertices )
if len( vv ) == 0: continue
self.igs.append( ig )
self.vertices[ig] = vv
if self.can_cells:
mask = nm.zeros( self.n_v_max, nm.int32 )
mask[vv] = 1
conn = group.conn
aux = nm.sum( mask[conn], 1, dtype = nm.int32 )
rcells = nm.where( aux == conn.shape[1] )[0]
self.cells[ig] = nm.asarray( rcells, dtype = nm.int32 )
self.must_update = False
##
# 15.06.2006, c
def update_vertices( self ):
self.all_vertices = nm.zeros( (0,), nm.int32 )
self.vertices = {}
for ig, group in self.domain.iter_groups( self.igs ):
rcells = self.cells[ig]
conn = group.conn
nods = conn[rcells,:].ravel()
aux = nm.unique( nods )
self.vertices[ig] = aux
self.all_vertices = nm.unique( nm.r_[self.all_vertices, aux] )
##
# 15.06.2006, c
def set_vertices( self, vertices ):
self.all_vertices = nm.array(vertices, dtype=nm.int32)
self.update_groups( force = True )
self.is_complete = False
##
# c: 15.06.2006, r: 14.07.2008
def set_cells( self, cells ):
self.igs = []
self.cells = {}
for ig, rcells in cells.iteritems():
self.cells[ig] = nm.array( rcells, dtype = nm.int32, ndmin = 1 )
self.igs.append( ig )
self.update_vertices()
self.is_complete = False
self.must_update = False
##
# 15.06.2006, c
def set_from_group( self, ig, vertices, n_cell ):
self.igs = [ig]
self.cells = {ig : nm.arange( n_cell, dtype = nm.int32 )}
self.vertices = {ig: vertices.copy()}
self.all_vertices = vertices.copy()
self.must_update = False
##
# c: 23.02.2007, r: 22.01.2008
def delete_groups( self, digs ):
"""self.complete_description must be called after!"""
for ig in digs:
try:
del self.vertices[ig]
del self.cells[ig]
self.igs.remove( ig )
except KeyError:
pass
##
# 17.07.2007, c
def switch_cells( self, can_cells ):
if self.can_cells:
self.can_cells = can_cells
if not can_cells:
self.cells = {}
else:
self.can_cells = can_cells
if can_cells:
self.update_groups( force = True )
def complete_description(self, ed, fa):
"""
self.edges, self.faces list edge/face indices per group
(pointers to ed.facets, fa.facets) - repetitions among groups
are possible.
"""
##
# Get edges, faces, etc. par subdomain.
edges = ed.facets
if fa is not None:
faces = fa.facets
self.edges = {}
self.faces = {}
self.shape = {}
for ig, group in self.domain.iter_groups( self.igs ):
vv = self.vertices[ig]
if self.cells.has_key( ig ):
n_cell = self.cells[ig].shape[0]
else:
n_cell = 0
self.shape[ig] = Struct( n_vertex = vv.shape[0],
n_cell = n_cell )
if len( vv ) == 0: continue
mask = nm.zeros( self.n_v_max, nm.int32 )
mask[vv] = 1
indx = ed.indx[ig]
aux = nm.sum(mask[edges[indx]], 1)
# Points to ed.facets.
redges = indx.start + nm.where( aux == 2 )[0]
self.edges[ig] = redges
if fa is None: continue
n_fp = fa.n_fps[ig]
indx = fa.indx[ig]
aux = nm.sum(mask[faces[indx,:n_fp]], 1)
# Points to fa.facets.
rfaces = indx.start + nm.where(aux == n_fp)[0]
self.faces[ig] = rfaces
self.shape[ig].n_edge = redges.shape[0]
self.shape[ig].n_face = rfaces.shape[0]
self.is_complete = True
def setup_face_indices(self, reset=True):
"""
Initialize an array (per group) of (iel, ifa) for each face.
"""
if reset or not self.fis:
fa = self.domain.get_facets(force_faces=True)[1]
if self.faces:
faces = self.faces
else:
faces = self.edges
self.fis = {}
for ig in self.igs:
rfaces = faces[ig]
fi = fa.indices[rfaces]
assert_(nm.all(fi[:,0] == ig))
self.fis[ig] = fi[:,1:].copy()
##
# 05.09.2006, c
# 22.02.2007
# 17.07.2007
def select_cells( self, n_verts ):
"""Select cells containing at least n_verts[ii] vertices per group ii."""
if not self.can_cells:
print 'region %s cannot have cells!' % self.name
raise ValueError
self.cells = {}
for ig, group in self.domain.iter_groups( self.igs ):
vv = self.vertices[ig]
if len( vv ) == 0: continue
mask = nm.zeros( self.n_v_max, nm.int32 )
mask[vv] = 1
aux = nm.sum( mask[group.conn], 1 )
rcells = nm.where( aux >= n_verts[ig] )[0]
# print rcells.shape
self.cells[ig] = rcells
def select_cells_of_surface(self, reset=True):
"""
Select cells corresponding to faces (or edges in 2D).
"""
if not self.can_cells:
raise ValueError('region %s cannot have cells!' % self.name)
self.setup_face_indices(reset=reset)
if self.faces:
faces = self.faces
else:
faces = self.edges
self.cells = {}
for ig in self.igs:
rcells = self.fis[ig][:,0]
self.cells[ig]= rcells
##
# 02.03.2007, c
def copy( self ):
"""Vertices-based copy."""
tmp = self.light_copy( 'copy', self.parse_def )
tmp.set_vertices( copy( self.all_vertices ) )
return tmp
##
# 15.06.2006, c
def sub_n( self, other ):
tmp = self.light_copy( 'op',
_join( self.parse_def, '-n', other.parse_def ) )
tmp.set_vertices( nm.setdiff1d( self.all_vertices,
other.all_vertices ) )
return tmp
##
# 15.06.2006, c
def add_n( self, other ):
tmp = self.light_copy( 'op',
_join( self.parse_def, '+n', other.parse_def ) )
tmp.set_vertices( nm.union1d( self.all_vertices,
other.all_vertices ) )
return tmp
##
# 15.06.2006, c
def intersect_n( self, other ):
tmp = self.light_copy( 'op',
_join( self.parse_def, '*n', other.parse_def ) )
tmp.set_vertices( nm.intersect1d( self.all_vertices,
other.all_vertices ) )
return tmp
##
# c: 15.06.2006, r: 15.04.2008
def sub_e( self, other ):
tmp = self.light_copy( 'op',
_join( self.parse_def, '-e', other.parse_def ) )
for ig in self.igs:
if ig not in other.igs:
tmp.igs.append( ig )
tmp.cells[ig] = self.cells[ig].copy()
continue
aux = nm.setdiff1d( self.cells[ig], other.cells[ig] )
if not len( aux ): continue
tmp.cells[ig] = aux
tmp.igs.append( ig )
tmp.update_vertices()
return tmp
##
# 15.06.2006, c
def add_e( self, other ):
tmp = self.light_copy( 'op',
_join( self.parse_def, '+e', other.parse_def ) )
for ig in self.igs:
tmp.igs.append( ig )
if ig not in other.igs:
tmp.cells[ig] = self.cells[ig].copy()
continue
tmp.cells[ig] = nm.union1d( self.cells[ig],
other.cells[ig] )
for ig in other.igs:
if ig in tmp.igs: continue
tmp.igs.append( ig )
tmp.cells[ig] = other.cells[ig].copy()
tmp.update_vertices()
return tmp
##
# 15.06.2006, c
# 20.02.2007
def intersect_e( self, other ):
tmp = self.light_copy( 'op',
_join( self.parse_def, '*e', other.parse_def ) )
for ig in self.igs:
if ig not in other.igs: continue
aux = nm.intersect1d( self.cells[ig], other.cells[ig] )
if not len( aux ): continue
tmp.igs.append( ig )
tmp.cells[ig] = aux
tmp.update_vertices()
return tmp
def setup_mirror_region(self):
"""
Find the corresponding mirror region, set up element mapping.
"""
for reg in self.domain.regions:
if (reg is not self) and \
(len(reg.igs) == len(self.igs)) and \
nm.all(self.all_vertices == reg.all_vertices):
mirror_region = reg
break
else:
raise ValueError('cannot find mirror region! (%s)' % self.name)
ig_map = {}
ig_map_i = {}
for igr in self.igs:
for igc in mirror_region.igs:
if nm.all(self.vertices[igr] ==
mirror_region.vertices[igc]):
ig_map[igc] = igr
ig_map_i[igr] = igc
break
else:
raise ValueError('cannot find mirror region group! (%d)' \
% igr)
self.mirror_region = mirror_region
self.ig_map = ig_map
self.ig_map_i = ig_map_i
def get_mirror_region(self):
return self.mirror_region, self.ig_map, self.ig_map_i
def create_mapping(self, kind, ig):
"""
Create mapping from reference elements to physical elements,
given the integration kind ('v' or 's').
This mapping can be used to compute the physical quadrature
points.
Returns
-------
mapping : VolumeMapping or SurfaceMapping instance
The requested mapping.
"""
from sfepy.fem.mappings import VolumeMapping, SurfaceMapping
from sfepy.fem.fe_surface import FESurface
coors = self.domain.get_mesh_coors()
if kind == 's':
coors = coors[self.all_vertices]
gel = self.domain.groups[ig].gel
conn = self.domain.groups[ig].conn
if kind == 'v':
cells = self.cells[ig]
mapping = VolumeMapping(coors, conn[cells], gel=gel)
elif kind == 's':
aux = FESurface('aux', self, gel.get_surface_entities(),
conn , ig)
mapping = SurfaceMapping(coors, aux.leconn, gel=gel.surface_facet)
return mapping
def get_field_nodes(self, field, merge=False, clean=False,
warn=False, igs=None):
"""
Get nodes of the field contained in the region.
Notes
-----
For one edge node type only! (should index row of cnt_en...)
"""
if igs is None:
igs = self.igs
cnt_en = field.cnt_en
nods = []
node_descs = field.get_node_descs( self )
for ig, node_desc in node_descs.iteritems():
if not ig in igs:
nods.append( None )
continue
nnew = nm.empty( (0,), dtype = nm.int32 )
if node_desc.vertex is not None:
nnew = nm.concatenate( (nnew, field.remap[self.vertices[ig]]) )
if node_desc.edge is not None:
ed = field.domain.ed
# ed.uid_i[self.edges[ii]] == ed.uid[ed.perm_i[self.edges[ii]]]
enods = cnt_en[:cnt_en.shape[0],ed.uid_i[self.edges[ig]]].ravel()
enods = nm.compress( (enods >= 0), enods )
nnew = nm.concatenate( (nnew, enods) )
if node_desc.face is not None:
print self.name, field.name
raise NotImplementedError
if (node_desc.bubble is not None) and self.can_cells:
noft = field.aps.node_offset_table
ia = field.aps.igs.index( ig )
enods = self.cells[ig] + noft[3,ia]
nnew = nm.concatenate( (nnew, enods) )
nods.append( nnew )
if merge:
nods = [nn for nn in nods if nn is not None]
nods = nm.unique( nm.hstack( nods ) )
elif clean:
for nn in nods[:]:
if nn is None:
nods.remove(nn)
if warn is not None:
output(warn + ('%s' % region.name))
return nods
def get_n_cells(self, ig, is_surface=False):
if is_surface:
return self.shape[ig].n_face
else:
return self.shape[ig].n_cell
##
# 22.02.2007, c
def get_vertices( self, ig ):
return self.vertices[ig]
##
# 05.06.2007, c
def get_edges( self, ig ):
return self.edges[ig]
##
# 05.06.2007, c
def get_faces( self, ig ):
return self.faces[ig]
##
# 05.06.2007, c
def get_cells( self, ig ):
return self.cells[ig]
def iter_cells(self):
ii = 0
for ig, cells in self.cells.iteritems():
for iel in cells:
yield ig, ii, iel
ii += 1
##
# created: 28.05.2007
# last revision: 11.12.2007
def has_cells( self ):
if self.can_cells:
for cells in self.cells.itervalues():
if cells.size:
return True
return False
else:
return False
def has_cells_if_can( self ):
if self.can_cells:
for cells in self.cells.itervalues():
if cells.size:
return True
return False
else:
return True
def contains( self, other ):
"""Tests only igs for now!!!"""
return set( other.igs ).issubset( set( self.igs ) )
##
# c: 25.03.2008, r: 25.03.2008
def get_cell_offsets( self ):
offs = {}
off = 0
for ig in self.igs:
offs[ig] = off
off += self.shape[ig].n_cell
return offs
def get_charfun( self, by_cell = False, val_by_id = False ):
"""
Return the characteristic function of the region as a vector of values
defined either in the mesh nodes (by_cell == False) or cells. The
values are either 1 (val_by_id == False) or sequential id + 1.
"""
if by_cell:
chf = nm.zeros( (self.domain.shape.n_el,), dtype = nm.float64 )
offs = self.get_cell_offsets()
for ig, cells in self.cells.iteritems():
iel = offs[ig] + cells
if val_by_id:
chf[iel] = iel + 1
else:
chf[iel] = 1.0
else:
chf = nm.zeros( (self.domain.shape.n_nod,), dtype = nm.float64 )
if val_by_id:
chf[self.all_vertices] = self.all_vertices + 1
else:
chf[self.all_vertices] = 1.0
return chf
| [
"[email protected]"
] | |
11c869ecd5e826427cc7d523ecdbb29f056a4b97 | 0ddcfcbfc3faa81c79e320c34c35a972dab86498 | /tests/test_summary_ranges.py | c75bf5a290b7f94b3f204aa5c8f8d1d2a2f4a90a | [] | no_license | IvanWoo/coding-interview-questions | 3311da45895ac4f3c394b22530079c79a9215a1c | 1312305b199b65a11804a000432ebe28d1fba87e | refs/heads/master | 2023-08-09T19:46:28.278111 | 2023-06-21T01:47:07 | 2023-06-21T01:47:07 | 135,307,912 | 0 | 0 | null | 2023-07-20T12:14:38 | 2018-05-29T14:24:43 | Python | UTF-8 | Python | false | false | 329 | py | import pytest
from puzzles.summary_ranges import summary_ranges
@pytest.mark.parametrize(
"nums, expected",
[
([0, 1, 2, 4, 5, 7], ["0->2", "4->5", "7"]),
([0, 2, 3, 4, 6, 8, 9], ["0", "2->4", "6", "8->9"]),
],
)
def test_summary_ranges(nums, expected):
assert summary_ranges(nums) == expected
| [
"[email protected]"
] | |
a6ba03e0e4a8062c8b63455aa01e52a7f24e0db4 | 8dcd3ee098b4f5b80879c37a62292f42f6b2ae17 | /venv/Lib/site-packages/pandas/plotting/_core.py | 17f6b990ba84c33027e706bda6f9149466ab80d2 | [] | no_license | GregVargas1999/InfinityAreaInfo | 53fdfefc11c4af8f5d2b8f511f7461d11a3f7533 | 2e4a7c6a2424514ca0ec58c9153eb08dc8e09a4a | refs/heads/master | 2022-12-01T20:26:05.388878 | 2020-08-11T18:37:05 | 2020-08-11T18:37:05 | 286,821,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57,368 | py | import importlib
from pandas._config import get_option
from pandas.core.base import PandasObject
from pandas.core.dtypes.common import is_integer, is_list_like
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
from pandas.util._decorators import Appender, Substitution
def hist_series(
self,
by=None,
ax=None,
grid=True,
xlabelsize=None,
xrot=None,
ylabelsize=None,
yrot=None,
figsize=None,
bins=10,
backend=None,
**kwargs,
):
"""
Draw histogram of the input series using matplotlib.
Parameters
----------
by : object, optional
If passed, then used to form histograms for separate groups.
ax : matplotlib axis object
If not passed, uses gca().
grid : bool, default True
Whether to show axis grid lines.
xlabelsize : int, default None
If specified changes the x-axis label size.
xrot : float, default None
Rotation of x axis labels.
ylabelsize : int, default None
If specified changes the y-axis label size.
yrot : float, default None
Rotation of y axis labels.
figsize : tuple, default None
Figure size in inches by default.
bins : int or sequence, default 10
Number of histogram bins to be used. If an integer is given, bins + 1
bin edges are calculated and returned. If bins is a sequence, gives
bin edges, including left edge of first bin and right edge of last
bin. In this case, bins is returned unmodified.
backend : str, default None
Backend to use instead of the backend specified in the option
``plotting.backend``. For instance, 'matplotlib'. Alternatively, to
specify the ``plotting.backend`` for the whole session, set
``pd.options.plotting.backend``.
.. versionadded:: 1.0.0
**kwargs
To be passed to the actual plotting function.
Returns
-------
matplotlib.AxesSubplot
A histogram plot.
See Also
--------
matplotlib.axes.Axes.hist : Plot a histogram using matplotlib.
"""
plot_backend = _get_plot_backend(backend)
return plot_backend.hist_series(
self,
by=by,
ax=ax,
grid=grid,
xlabelsize=xlabelsize,
xrot=xrot,
ylabelsize=ylabelsize,
yrot=yrot,
figsize=figsize,
bins=bins,
**kwargs,
)
def hist_frame(
data,
column=None,
by=None,
grid=True,
xlabelsize=None,
xrot=None,
ylabelsize=None,
yrot=None,
ax=None,
sharex=False,
sharey=False,
figsize=None,
layout=None,
bins=10,
backend=None,
**kwargs,
):
"""
Make a histogram of the DataFrame's.
A `histogram`_ is a representation of the distribution of data.
This function calls :meth:`matplotlib.pyplot.hist`, on each series in
the DataFrame, resulting in one histogram per column.
.. _histogram: https://en.wikipedia.org/wiki/Histogram
Parameters
----------
data : DataFrame
The pandas object holding the data.
column : str or sequence
If passed, will be used to limit data to a subset of columns.
by : object, optional
If passed, then used to form histograms for separate groups.
grid : bool, default True
Whether to show axis grid lines.
xlabelsize : int, default None
If specified changes the x-axis label size.
xrot : float, default None
Rotation of x axis labels. For example, a value of 90 displays the
x labels rotated 90 degrees clockwise.
ylabelsize : int, default None
If specified changes the y-axis label size.
yrot : float, default None
Rotation of y axis labels. For example, a value of 90 displays the
y labels rotated 90 degrees clockwise.
ax : Matplotlib axes object, default None
The axes to plot the histogram on.
sharex : bool, default True if ax is None else False
In case subplots=True, share x axis and set some x axis labels to
invisible; defaults to True if ax is None otherwise False if an ax
is passed in.
Note that passing in both an ax and sharex=True will alter all x axis
labels for all subplots in a figure.
sharey : bool, default False
In case subplots=True, share y axis and set some y axis labels to
invisible.
figsize : tuple
The size in inches of the figure to create. Uses the value in
`matplotlib.rcParams` by default.
layout : tuple, optional
Tuple of (rows, columns) for the layout of the histograms.
bins : int or sequence, default 10
Number of histogram bins to be used. If an integer is given, bins + 1
bin edges are calculated and returned. If bins is a sequence, gives
bin edges, including left edge of first bin and right edge of last
bin. In this case, bins is returned unmodified.
backend : str, default None
Backend to use instead of the backend specified in the option
``plotting.backend``. For instance, 'matplotlib'. Alternatively, to
specify the ``plotting.backend`` for the whole session, set
``pd.options.plotting.backend``.
.. versionadded:: 1.0.0
**kwargs
All other plotting keyword arguments to be passed to
:meth:`matplotlib.pyplot.hist`.
Returns
-------
matplotlib.AxesSubplot or numpy.ndarray of them
See Also
--------
matplotlib.pyplot.hist : Plot a histogram using matplotlib.
Examples
--------
.. plot::
:context: close-figs
This example draws a histogram based on the length and width of
some animals, displayed in three bins
>>> df = pd.DataFrame({
... 'length': [1.5, 0.5, 1.2, 0.9, 3],
... 'width': [0.7, 0.2, 0.15, 0.2, 1.1]
... }, index=['pig', 'rabbit', 'duck', 'chicken', 'horse'])
>>> hist = df.hist(bins=3)
"""
plot_backend = _get_plot_backend(backend)
return plot_backend.hist_frame(
data,
column=column,
by=by,
grid=grid,
xlabelsize=xlabelsize,
xrot=xrot,
ylabelsize=ylabelsize,
yrot=yrot,
ax=ax,
sharex=sharex,
sharey=sharey,
figsize=figsize,
layout=layout,
bins=bins,
**kwargs,
)
_boxplot_doc = """
Make a box plot from DataFrame columns.
Make a box-and-whisker plot from DataFrame columns, optionally grouped
by some other columns. A box plot is a method for graphically depicting
groups of numerical data through their quartiles.
The box extends from the Q1 to Q3 quartile values of the data,
with a line at the median (Q2). The whiskers extend from the edges
of box to show the range of the data. The position of the whiskers
is set by default to `1.5 * IQR (IQR = Q3 - Q1)` from the edges of the box.
Outlier points are those past the end of the whiskers.
For further details see
Wikipedia's entry for `boxplot <https://en.wikipedia.org/wiki/Box_plot>`_.
Parameters
----------
column : str or list of str, optional
Column name or list of names, or vector.
Can be any valid input to :meth:`pandas.DataFrame.groupby`.
by : str or array-like, optional
Column in the DataFrame to :meth:`pandas.DataFrame.groupby`.
One box-plot will be done per value of columns in `by`.
ax : object of class matplotlib.axes.Axes, optional
The matplotlib axes to be used by boxplot.
fontsize : float or str
Tick label font size in points or as a string (e.g., `large`).
rot : int or float, default 0
The rotation angle of labels (in degrees)
with respect to the screen coordinate system.
grid : bool, default True
Setting this to True will show the grid.
figsize : A tuple (width, height) in inches
The size of the figure to create in matplotlib.
layout : tuple (rows, columns), optional
For example, (3, 5) will display the subplots
using 3 columns and 5 rows, starting from the top-left.
return_type : {'axes', 'dict', 'both'} or None, default 'axes'
The kind of object to return. The default is ``axes``.
* 'axes' returns the matplotlib axes the boxplot is drawn on.
* 'dict' returns a dictionary whose values are the matplotlib
Lines of the boxplot.
* 'both' returns a namedtuple with the axes and dict.
* when grouping with ``by``, a Series mapping columns to
``return_type`` is returned.
If ``return_type`` is `None`, a NumPy array
of axes with the same shape as ``layout`` is returned.
%(backend)s\
**kwargs
All other plotting keyword arguments to be passed to
:func:`matplotlib.pyplot.boxplot`.
Returns
-------
result
See Notes.
See Also
--------
Series.plot.hist: Make a histogram.
matplotlib.pyplot.boxplot : Matplotlib equivalent plot.
Notes
-----
The return type depends on the `return_type` parameter:
* 'axes' : object of class matplotlib.axes.Axes
* 'dict' : dict of matplotlib.lines.Line2D objects
* 'both' : a namedtuple with structure (ax, lines)
For data grouped with ``by``, return a Series of the above or a numpy
array:
* :class:`~pandas.Series`
* :class:`~numpy.array` (for ``return_type = None``)
Use ``return_type='dict'`` when you want to tweak the appearance
of the lines after plotting. In this case a dict containing the Lines
making up the boxes, caps, fliers, medians, and whiskers is returned.
Examples
--------
Boxplots can be created for every column in the dataframe
by ``df.boxplot()`` or indicating the columns to be used:
.. plot::
:context: close-figs
>>> np.random.seed(1234)
>>> df = pd.DataFrame(np.random.randn(10, 4),
... columns=['Col1', 'Col2', 'Col3', 'Col4'])
>>> boxplot = df.boxplot(column=['Col1', 'Col2', 'Col3'])
Boxplots of variables distributions grouped by the values of a third
variable can be created using the option ``by``. For instance:
.. plot::
:context: close-figs
>>> df = pd.DataFrame(np.random.randn(10, 2),
... columns=['Col1', 'Col2'])
>>> df['X'] = pd.Series(['A', 'A', 'A', 'A', 'A',
... 'B', 'B', 'B', 'B', 'B'])
>>> boxplot = df.boxplot(by='X')
A list of strings (i.e. ``['X', 'Y']``) can be passed to boxplot
in order to group the data by combination of the variables in the x-axis:
.. plot::
:context: close-figs
>>> df = pd.DataFrame(np.random.randn(10, 3),
... columns=['Col1', 'Col2', 'Col3'])
>>> df['X'] = pd.Series(['A', 'A', 'A', 'A', 'A',
... 'B', 'B', 'B', 'B', 'B'])
>>> df['Y'] = pd.Series(['A', 'B', 'A', 'B', 'A',
... 'B', 'A', 'B', 'A', 'B'])
>>> boxplot = df.boxplot(column=['Col1', 'Col2'], by=['X', 'Y'])
The layout of boxplot can be adjusted giving a tuple to ``layout``:
.. plot::
:context: close-figs
>>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X',
... layout=(2, 1))
Additional formatting can be done to the boxplot, like suppressing the grid
(``grid=False``), rotating the labels in the x-axis (i.e. ``rot=45``)
or changing the fontsize (i.e. ``fontsize=15``):
.. plot::
:context: close-figs
>>> boxplot = df.boxplot(grid=False, rot=45, fontsize=15)
The parameter ``return_type`` can be used to select the type of element
returned by `boxplot`. When ``return_type='axes'`` is selected,
the matplotlib axes on which the boxplot is drawn are returned:
>>> boxplot = df.boxplot(column=['Col1', 'Col2'], return_type='axes')
>>> type(boxplot)
<class 'matplotlib.axes._subplots.AxesSubplot'>
When grouping with ``by``, a Series mapping columns to ``return_type``
is returned:
>>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X',
... return_type='axes')
>>> type(boxplot)
<class 'pandas.core.series.Series'>
If ``return_type`` is `None`, a NumPy array of axes with the same shape
as ``layout`` is returned:
>>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X',
... return_type=None)
>>> type(boxplot)
<class 'numpy.ndarray'>
"""
_backend_doc = """\
backend : str, default None
Backend to use instead of the backend specified in the option
``plotting.backend``. For instance, 'matplotlib'. Alternatively, to
specify the ``plotting.backend`` for the whole session, set
``pd.options.plotting.backend``.
.. versionadded:: 1.0.0
"""
@Substitution(backend="")
@Appender(_boxplot_doc)
def boxplot(
data,
column=None,
by=None,
ax=None,
fontsize=None,
rot=0,
grid=True,
figsize=None,
layout=None,
return_type=None,
**kwargs,
):
plot_backend = _get_plot_backend("matplotlib")
return plot_backend.boxplot(
data,
column=column,
by=by,
ax=ax,
fontsize=fontsize,
rot=rot,
grid=grid,
figsize=figsize,
layout=layout,
return_type=return_type,
**kwargs,
)
@Substitution(backend=_backend_doc)
@Appender(_boxplot_doc)
def boxplot_frame(
self,
column=None,
by=None,
ax=None,
fontsize=None,
rot=0,
grid=True,
figsize=None,
layout=None,
return_type=None,
backend=None,
**kwargs,
):
plot_backend = _get_plot_backend(backend)
return plot_backend.boxplot_frame(
self,
column=column,
by=by,
ax=ax,
fontsize=fontsize,
rot=rot,
grid=grid,
figsize=figsize,
layout=layout,
return_type=return_type,
**kwargs,
)
def boxplot_frame_groupby(
grouped,
subplots=True,
column=None,
fontsize=None,
rot=0,
grid=True,
ax=None,
figsize=None,
layout=None,
sharex=False,
sharey=True,
backend=None,
**kwargs,
):
"""
Make box plots from DataFrameGroupBy data.
Parameters
----------
grouped : Grouped DataFrame
subplots : bool
* ``False`` - no subplots will be used
* ``True`` - create a subplot for each group.
column : column name or list of names, or vector
Can be any valid input to groupby.
fontsize : int or str
rot : label rotation angle
grid : Setting this to True will show the grid
ax : Matplotlib axis object, default None
figsize : A tuple (width, height) in inches
layout : tuple (optional)
The layout of the plot: (rows, columns).
sharex : bool, default False
Whether x-axes will be shared among subplots.
.. versionadded:: 0.23.1
sharey : bool, default True
Whether y-axes will be shared among subplots.
.. versionadded:: 0.23.1
backend : str, default None
Backend to use instead of the backend specified in the option
``plotting.backend``. For instance, 'matplotlib'. Alternatively, to
specify the ``plotting.backend`` for the whole session, set
``pd.options.plotting.backend``.
.. versionadded:: 1.0.0
**kwargs
All other plotting keyword arguments to be passed to
matplotlib's boxplot function.
Returns
-------
dict of key/value = group key/DataFrame.boxplot return value
or DataFrame.boxplot return value in case subplots=figures=False
Examples
--------
>>> import itertools
>>> tuples = [t for t in itertools.product(range(1000), range(4))]
>>> index = pd.MultiIndex.from_tuples(tuples, names=['lvl0', 'lvl1'])
>>> data = np.random.randn(len(index),4)
>>> df = pd.DataFrame(data, columns=list('ABCD'), index=index)
>>>
>>> grouped = df.groupby(level='lvl1')
>>> boxplot_frame_groupby(grouped)
>>>
>>> grouped = df.unstack(level='lvl1').groupby(level=0, axis=1)
>>> boxplot_frame_groupby(grouped, subplots=False)
"""
plot_backend = _get_plot_backend(backend)
return plot_backend.boxplot_frame_groupby(
grouped,
subplots=subplots,
column=column,
fontsize=fontsize,
rot=rot,
grid=grid,
ax=ax,
figsize=figsize,
layout=layout,
sharex=sharex,
sharey=sharey,
**kwargs,
)
class PlotAccessor(PandasObject):
"""
Make plots of Series or DataFrame.
Uses the backend specified by the
option ``plotting.backend``. By default, matplotlib is used.
Parameters
----------
data : Series or DataFrame
The object for which the method is called.
x : label or position, default None
Only used if data is a DataFrame.
y : label, position or list of label, positions, default None
Allows plotting of one column versus another. Only used if data is a
DataFrame.
kind : str
The kind of plot to produce:
- 'line' : line plot (default)
- 'bar' : vertical bar plot
- 'barh' : horizontal bar plot
- 'hist' : histogram
- 'box' : boxplot
- 'kde' : Kernel Density Estimation plot
- 'density' : same as 'kde'
- 'area' : area plot
- 'pie' : pie plot
- 'scatter' : scatter plot
- 'hexbin' : hexbin plot.
figsize : a tuple (width, height) in inches
use_index : bool, default True
Use index as ticks for x axis.
title : str or list
Title to use for the plot. If a string is passed, print the string
at the top of the figure. If a list is passed and `subplots` is
True, print each item in the list above the corresponding subplot.
grid : bool, default None (matlab style default)
Axis grid lines.
legend : bool or {'reverse'}
Place legend on axis subplots.
style : list or dict
The matplotlib line style per column.
logx : bool or 'sym', default False
Use log scaling or symlog scaling on x axis.
.. versionchanged:: 0.25.0
logy : bool or 'sym' default False
Use log scaling or symlog scaling on y axis.
.. versionchanged:: 0.25.0
loglog : bool or 'sym', default False
Use log scaling or symlog scaling on both x and y axes.
.. versionchanged:: 0.25.0
xticks : sequence
Values to use for the xticks.
yticks : sequence
Values to use for the yticks.
xlim : 2-tuple/list
ylim : 2-tuple/list
rot : int, default None
Rotation for ticks (xticks for vertical, yticks for horizontal
plots).
fontsize : int, default None
Font size for xticks and yticks.
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that
name from matplotlib.
colorbar : bool, optional
If True, plot colorbar (only relevant for 'scatter' and 'hexbin'
plots).
position : float
Specify relative alignments for bar plot layout.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5
(center).
table : bool, Series or DataFrame, default False
If True, draw a table using the data in the DataFrame and the data
will be transposed to meet matplotlib's default layout.
If a Series or DataFrame is passed, use passed data to draw a
table.
yerr : DataFrame, Series, array-like, dict and str
See :ref:`Plotting with Error Bars <visualization.errorbars>` for
detail.
xerr : DataFrame, Series, array-like, dict and str
Equivalent to yerr.
mark_right : bool, default True
When using a secondary_y axis, automatically mark the column
labels with "(right)" in the legend.
include_bool : bool, default is False
If True, boolean values can be plotted.
backend : str, default None
Backend to use instead of the backend specified in the option
``plotting.backend``. For instance, 'matplotlib'. Alternatively, to
specify the ``plotting.backend`` for the whole session, set
``pd.options.plotting.backend``.
.. versionadded:: 1.0.0
**kwargs
Options to pass to matplotlib plotting method.
Returns
-------
:class:`matplotlib.axes.Axes` or numpy.ndarray of them
If the backend is not the default matplotlib one, the return value
will be the object returned by the backend.
Notes
-----
- See matplotlib documentation online for more on this subject
- If `kind` = 'bar' or 'barh', you can specify relative alignments
for bar plot layout by `position` keyword.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5
(center)
"""
_common_kinds = ("line", "bar", "barh", "kde", "density", "area", "hist", "box")
_series_kinds = ("pie",)
_dataframe_kinds = ("scatter", "hexbin")
_kind_aliases = {"density": "kde"}
_all_kinds = _common_kinds + _series_kinds + _dataframe_kinds
def __init__(self, data):
self._parent = data
@staticmethod
def _get_call_args(backend_name, data, args, kwargs):
"""
This function makes calls to this accessor `__call__` method compatible
with the previous `SeriesPlotMethods.__call__` and
`DataFramePlotMethods.__call__`. Those had slightly different
signatures, since `DataFramePlotMethods` accepted `x` and `y`
parameters.
"""
if isinstance(data, ABCSeries):
arg_def = [
("kind", "line"),
("ax", None),
("figsize", None),
("use_index", True),
("title", None),
("grid", None),
("legend", False),
("style", None),
("logx", False),
("logy", False),
("loglog", False),
("xticks", None),
("yticks", None),
("xlim", None),
("ylim", None),
("rot", None),
("fontsize", None),
("colormap", None),
("table", False),
("yerr", None),
("xerr", None),
("label", None),
("secondary_y", False),
]
elif isinstance(data, ABCDataFrame):
arg_def = [
("x", None),
("y", None),
("kind", "line"),
("ax", None),
("subplots", False),
("sharex", None),
("sharey", False),
("layout", None),
("figsize", None),
("use_index", True),
("title", None),
("grid", None),
("legend", True),
("style", None),
("logx", False),
("logy", False),
("loglog", False),
("xticks", None),
("yticks", None),
("xlim", None),
("ylim", None),
("rot", None),
("fontsize", None),
("colormap", None),
("table", False),
("yerr", None),
("xerr", None),
("secondary_y", False),
("sort_columns", False),
]
else:
raise TypeError(
f"Called plot accessor for type {type(data).__name__}, "
"expected Series or DataFrame"
)
if args and isinstance(data, ABCSeries):
positional_args = str(args)[1:-1]
keyword_args = ", ".join(
f"{name}={repr(value)}" for (name, default), value in zip(arg_def, args)
)
msg = (
"`Series.plot()` should not be called with positional "
"arguments, only keyword arguments. The order of "
"positional arguments will change in the future. "
f"Use `Series.plot({keyword_args})` instead of "
f"`Series.plot({positional_args})`."
)
raise TypeError(msg)
pos_args = {name: value for value, (name, _) in zip(args, arg_def)}
if backend_name == "pandas.plotting._matplotlib":
kwargs = dict(arg_def, **pos_args, **kwargs)
else:
kwargs = dict(pos_args, **kwargs)
x = kwargs.pop("x", None)
y = kwargs.pop("y", None)
kind = kwargs.pop("kind", "line")
return x, y, kind, kwargs
def __call__(self, *args, **kwargs):
plot_backend = _get_plot_backend(kwargs.pop("backend", None))
x, y, kind, kwargs = self._get_call_args(
plot_backend.__name__, self._parent, args, kwargs
)
kind = self._kind_aliases.get(kind, kind)
# when using another backend, get out of the way
if plot_backend.__name__ != "pandas.plotting._matplotlib":
return plot_backend.plot(self._parent, x=x, y=y, kind=kind, **kwargs)
if kind not in self._all_kinds:
raise ValueError(f"{kind} is not a valid plot kind")
# The original data structured can be transformed before passed to the
# backend. For example, for DataFrame is common to set the index as the
# `x` parameter, and return a Series with the parameter `y` as values.
data = self._parent.copy()
if isinstance(data, ABCSeries):
kwargs["reuse_plot"] = True
if kind in self._dataframe_kinds:
if isinstance(data, ABCDataFrame):
return plot_backend.plot(data, x=x, y=y, kind=kind, **kwargs)
else:
raise ValueError(f"plot kind {kind} can only be used for data frames")
elif kind in self._series_kinds:
if isinstance(data, ABCDataFrame):
if y is None and kwargs.get("subplots") is False:
raise ValueError(
f"{kind} requires either y column or 'subplots=True'"
)
elif y is not None:
if is_integer(y) and not data.columns.holds_integer():
y = data.columns[y]
# converted to series actually. copy to not modify
data = data[y].copy()
data.index.name = y
elif isinstance(data, ABCDataFrame):
data_cols = data.columns
if x is not None:
if is_integer(x) and not data.columns.holds_integer():
x = data_cols[x]
elif not isinstance(data[x], ABCSeries):
raise ValueError("x must be a label or position")
data = data.set_index(x)
if y is not None:
# check if we have y as int or list of ints
int_ylist = is_list_like(y) and all(is_integer(c) for c in y)
int_y_arg = is_integer(y) or int_ylist
if int_y_arg and not data.columns.holds_integer():
y = data_cols[y]
label_kw = kwargs["label"] if "label" in kwargs else False
for kw in ["xerr", "yerr"]:
if kw in kwargs and (
isinstance(kwargs[kw], str) or is_integer(kwargs[kw])
):
try:
kwargs[kw] = data[kwargs[kw]]
except (IndexError, KeyError, TypeError):
pass
# don't overwrite
data = data[y].copy()
if isinstance(data, ABCSeries):
label_name = label_kw or y
data.name = label_name
else:
match = is_list_like(label_kw) and len(label_kw) == len(y)
if label_kw and not match:
raise ValueError(
"label should be list-like and same length as y"
)
label_name = label_kw or data.columns
data.columns = label_name
return plot_backend.plot(data, kind=kind, **kwargs)
__call__.__doc__ = __doc__
def line(self, x=None, y=None, **kwargs):
"""
Plot Series or DataFrame as lines.
This function is useful to plot lines using DataFrame's values
as coordinates.
Parameters
----------
x : int or str, optional
Columns to use for the horizontal axis.
Either the location or the label of the columns to be used.
By default, it will use the DataFrame indices.
y : int, str, or list of them, optional
The values to be plotted.
Either the location or the label of the columns to be used.
By default, it will use the remaining DataFrame numeric columns.
**kwargs
Keyword arguments to pass on to :meth:`DataFrame.plot`.
Returns
-------
:class:`matplotlib.axes.Axes` or :class:`numpy.ndarray`
Return an ndarray when ``subplots=True``.
See Also
--------
matplotlib.pyplot.plot : Plot y versus x as lines and/or markers.
Examples
--------
.. plot::
:context: close-figs
>>> s = pd.Series([1, 3, 2])
>>> s.plot.line()
.. plot::
:context: close-figs
The following example shows the populations for some animals
over the years.
>>> df = pd.DataFrame({
... 'pig': [20, 18, 489, 675, 1776],
... 'horse': [4, 25, 281, 600, 1900]
... }, index=[1990, 1997, 2003, 2009, 2014])
>>> lines = df.plot.line()
.. plot::
:context: close-figs
An example with subplots, so an array of axes is returned.
>>> axes = df.plot.line(subplots=True)
>>> type(axes)
<class 'numpy.ndarray'>
.. plot::
:context: close-figs
The following example shows the relationship between both
populations.
>>> lines = df.plot.line(x='pig', y='horse')
"""
return self(kind="line", x=x, y=y, **kwargs)
def bar(self, x=None, y=None, **kwargs):
"""
Vertical bar plot.
A bar plot is a plot that presents categorical data with
rectangular bars with lengths proportional to the values that they
represent. A bar plot shows comparisons among discrete categories. One
axis of the plot shows the specific categories being compared, and the
other axis represents a measured value.
Parameters
----------
x : label or position, optional
Allows plotting of one column versus another. If not specified,
the index of the DataFrame is used.
y : label or position, optional
Allows plotting of one column versus another. If not specified,
all numerical columns are used.
**kwargs
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or np.ndarray of them
An ndarray is returned with one :class:`matplotlib.axes.Axes`
per column when ``subplots=True``.
See Also
--------
DataFrame.plot.barh : Horizontal bar plot.
DataFrame.plot : Make plots of a DataFrame.
matplotlib.pyplot.bar : Make a bar plot with matplotlib.
Examples
--------
Basic plot.
.. plot::
:context: close-figs
>>> df = pd.DataFrame({'lab':['A', 'B', 'C'], 'val':[10, 30, 20]})
>>> ax = df.plot.bar(x='lab', y='val', rot=0)
Plot a whole dataframe to a bar plot. Each column is assigned a
distinct color, and each row is nested in a group along the
horizontal axis.
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.bar(rot=0)
Instead of nesting, the figure can be split by column with
``subplots=True``. In this case, a :class:`numpy.ndarray` of
:class:`matplotlib.axes.Axes` are returned.
.. plot::
:context: close-figs
>>> axes = df.plot.bar(rot=0, subplots=True)
>>> axes[1].legend(loc=2) # doctest: +SKIP
Plot a single column.
.. plot::
:context: close-figs
>>> ax = df.plot.bar(y='speed', rot=0)
Plot only selected categories for the DataFrame.
.. plot::
:context: close-figs
>>> ax = df.plot.bar(x='lifespan', rot=0)
"""
return self(kind="bar", x=x, y=y, **kwargs)
def barh(self, x=None, y=None, **kwargs):
"""
Make a horizontal bar plot.
A horizontal bar plot is a plot that presents quantitative data with
rectangular bars with lengths proportional to the values that they
represent. A bar plot shows comparisons among discrete categories. One
axis of the plot shows the specific categories being compared, and the
other axis represents a measured value.
Parameters
----------
x : label or position, default DataFrame.index
Column to be used for categories.
y : label or position, default All numeric columns in dataframe
Columns to be plotted from the DataFrame.
**kwargs
Keyword arguments to pass on to :meth:`DataFrame.plot`.
Returns
-------
:class:`matplotlib.axes.Axes` or numpy.ndarray of them
See Also
--------
DataFrame.plot.bar: Vertical bar plot.
DataFrame.plot : Make plots of DataFrame using matplotlib.
matplotlib.axes.Axes.bar : Plot a vertical bar plot using matplotlib.
Examples
--------
Basic example
.. plot::
:context: close-figs
>>> df = pd.DataFrame({'lab': ['A', 'B', 'C'], 'val': [10, 30, 20]})
>>> ax = df.plot.barh(x='lab', y='val')
Plot a whole DataFrame to a horizontal bar plot
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.barh()
Plot a column of the DataFrame to a horizontal bar plot
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.barh(y='speed')
Plot DataFrame versus the desired column
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.barh(x='lifespan')
"""
return self(kind="barh", x=x, y=y, **kwargs)
def box(self, by=None, **kwargs):
r"""
Make a box plot of the DataFrame columns.
A box plot is a method for graphically depicting groups of numerical
data through their quartiles.
The box extends from the Q1 to Q3 quartile values of the data,
with a line at the median (Q2). The whiskers extend from the edges
of box to show the range of the data. The position of the whiskers
is set by default to 1.5*IQR (IQR = Q3 - Q1) from the edges of the
box. Outlier points are those past the end of the whiskers.
For further details see Wikipedia's
entry for `boxplot <https://en.wikipedia.org/wiki/Box_plot>`__.
A consideration when using this chart is that the box and the whiskers
can overlap, which is very common when plotting small sets of data.
Parameters
----------
by : str or sequence
Column in the DataFrame to group by.
**kwargs
Additional keywords are documented in
:meth:`DataFrame.plot`.
Returns
-------
:class:`matplotlib.axes.Axes` or numpy.ndarray of them
See Also
--------
DataFrame.boxplot: Another method to draw a box plot.
Series.plot.box: Draw a box plot from a Series object.
matplotlib.pyplot.boxplot: Draw a box plot in matplotlib.
Examples
--------
Draw a box plot from a DataFrame with four columns of randomly
generated data.
.. plot::
:context: close-figs
>>> data = np.random.randn(25, 4)
>>> df = pd.DataFrame(data, columns=list('ABCD'))
>>> ax = df.plot.box()
"""
return self(kind="box", by=by, **kwargs)
def hist(self, by=None, bins=10, **kwargs):
"""
Draw one histogram of the DataFrame's columns.
A histogram is a representation of the distribution of data.
This function groups the values of all given Series in the DataFrame
into bins and draws all bins in one :class:`matplotlib.axes.Axes`.
This is useful when the DataFrame's Series are in a similar scale.
Parameters
----------
by : str or sequence, optional
Column in the DataFrame to group by.
bins : int, default 10
Number of histogram bins to be used.
**kwargs
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
class:`matplotlib.AxesSubplot`
Return a histogram plot.
See Also
--------
DataFrame.hist : Draw histograms per DataFrame's Series.
Series.hist : Draw a histogram with Series' data.
Examples
--------
When we draw a dice 6000 times, we expect to get each value around 1000
times. But when we draw two dices and sum the result, the distribution
is going to be quite different. A histogram illustrates those
distributions.
.. plot::
:context: close-figs
>>> df = pd.DataFrame(
... np.random.randint(1, 7, 6000),
... columns = ['one'])
>>> df['two'] = df['one'] + np.random.randint(1, 7, 6000)
>>> ax = df.plot.hist(bins=12, alpha=0.5)
"""
return self(kind="hist", by=by, bins=bins, **kwargs)
def kde(self, bw_method=None, ind=None, **kwargs):
"""
Generate Kernel Density Estimate plot using Gaussian kernels.
In statistics, `kernel density estimation`_ (KDE) is a non-parametric
way to estimate the probability density function (PDF) of a random
variable. This function uses Gaussian kernels and includes automatic
bandwidth determination.
.. _kernel density estimation:
https://en.wikipedia.org/wiki/Kernel_density_estimation
Parameters
----------
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable.
If None (default), 'scott' is used.
See :class:`scipy.stats.gaussian_kde` for more information.
ind : NumPy array or int, optional
Evaluation points for the estimated PDF. If None (default),
1000 equally spaced points are used. If `ind` is a NumPy array, the
KDE is evaluated at the points passed. If `ind` is an integer,
`ind` number of equally spaced points are used.
**kwargs
Additional keyword arguments are documented in
:meth:`pandas.%(this-datatype)s.plot`.
Returns
-------
matplotlib.axes.Axes or numpy.ndarray of them
See Also
--------
scipy.stats.gaussian_kde : Representation of a kernel-density
estimate using Gaussian kernels. This is the function used
internally to estimate the PDF.
Examples
--------
Given a Series of points randomly sampled from an unknown
distribution, estimate its PDF using KDE with automatic
bandwidth determination and plot the results, evaluating them at
1000 equally spaced points (default):
.. plot::
:context: close-figs
>>> s = pd.Series([1, 2, 2.5, 3, 3.5, 4, 5])
>>> ax = s.plot.kde()
A scalar bandwidth can be specified. Using a small bandwidth value can
lead to over-fitting, while using a large bandwidth value may result
in under-fitting:
.. plot::
:context: close-figs
>>> ax = s.plot.kde(bw_method=0.3)
.. plot::
:context: close-figs
>>> ax = s.plot.kde(bw_method=3)
Finally, the `ind` parameter determines the evaluation points for the
plot of the estimated PDF:
.. plot::
:context: close-figs
>>> ax = s.plot.kde(ind=[1, 2, 3, 4, 5])
For DataFrame, it works in the same way:
.. plot::
:context: close-figs
>>> df = pd.DataFrame({
... 'x': [1, 2, 2.5, 3, 3.5, 4, 5],
... 'y': [4, 4, 4.5, 5, 5.5, 6, 6],
... })
>>> ax = df.plot.kde()
A scalar bandwidth can be specified. Using a small bandwidth value can
lead to over-fitting, while using a large bandwidth value may result
in under-fitting:
.. plot::
:context: close-figs
>>> ax = df.plot.kde(bw_method=0.3)
.. plot::
:context: close-figs
>>> ax = df.plot.kde(bw_method=3)
Finally, the `ind` parameter determines the evaluation points for the
plot of the estimated PDF:
.. plot::
:context: close-figs
>>> ax = df.plot.kde(ind=[1, 2, 3, 4, 5, 6])
"""
return self(kind="kde", bw_method=bw_method, ind=ind, **kwargs)
density = kde
def area(self, x=None, y=None, **kwargs):
"""
Draw a stacked area plot.
An area plot displays quantitative data visually.
This function wraps the matplotlib area function.
Parameters
----------
x : label or position, optional
Coordinates for the X axis. By default uses the index.
y : label or position, optional
Column to plot. By default uses all columns.
stacked : bool, default True
Area plots are stacked by default. Set to False to create a
unstacked plot.
**kwargs
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or numpy.ndarray
Area plot, or array of area plots if subplots is True.
See Also
--------
DataFrame.plot : Make plots of DataFrame using matplotlib / pylab.
Examples
--------
Draw an area plot based on basic business metrics:
.. plot::
:context: close-figs
>>> df = pd.DataFrame({
... 'sales': [3, 2, 3, 9, 10, 6],
... 'signups': [5, 5, 6, 12, 14, 13],
... 'visits': [20, 42, 28, 62, 81, 50],
... }, index=pd.date_range(start='2018/01/01', end='2018/07/01',
... freq='M'))
>>> ax = df.plot.area()
Area plots are stacked by default. To produce an unstacked plot,
pass ``stacked=False``:
.. plot::
:context: close-figs
>>> ax = df.plot.area(stacked=False)
Draw an area plot for a single column:
.. plot::
:context: close-figs
>>> ax = df.plot.area(y='sales')
Draw with a different `x`:
.. plot::
:context: close-figs
>>> df = pd.DataFrame({
... 'sales': [3, 2, 3],
... 'visits': [20, 42, 28],
... 'day': [1, 2, 3],
... })
>>> ax = df.plot.area(x='day')
"""
return self(kind="area", x=x, y=y, **kwargs)
def pie(self, **kwargs):
"""
Generate a pie plot.
A pie plot is a proportional representation of the numerical data in a
column. This function wraps :meth:`matplotlib.pyplot.pie` for the
specified column. If no column reference is passed and
``subplots=True`` a pie plot is drawn for each numerical column
independently.
Parameters
----------
y : int or label, optional
Label or position of the column to plot.
If not provided, ``subplots=True`` argument must be passed.
**kwargs
Keyword arguments to pass on to :meth:`DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or np.ndarray of them
A NumPy array is returned when `subplots` is True.
See Also
--------
Series.plot.pie : Generate a pie plot for a Series.
DataFrame.plot : Make plots of a DataFrame.
Examples
--------
In the example below we have a DataFrame with the information about
planet's mass and radius. We pass the the 'mass' column to the
pie function to get a pie plot.
.. plot::
:context: close-figs
>>> df = pd.DataFrame({'mass': [0.330, 4.87 , 5.97],
... 'radius': [2439.7, 6051.8, 6378.1]},
... index=['Mercury', 'Venus', 'Earth'])
>>> plot = df.plot.pie(y='mass', figsize=(5, 5))
.. plot::
:context: close-figs
>>> plot = df.plot.pie(subplots=True, figsize=(6, 3))
"""
if (
isinstance(self._parent, ABCDataFrame)
and kwargs.get("y", None) is None
and not kwargs.get("subplots", False)
):
raise ValueError("pie requires either y column or 'subplots=True'")
return self(kind="pie", **kwargs)
def scatter(self, x, y, s=None, c=None, **kwargs):
"""
Create a scatter plot with varying marker point size and color.
The coordinates of each point are defined by two dataframe columns and
filled circles are used to represent each point. This kind of plot is
useful to see complex correlations between two variables. Points could
be for instance natural 2D coordinates like longitude and latitude in
a map or, in general, any pair of metrics that can be plotted against
each other.
Parameters
----------
x : int or str
The column name or column position to be used as horizontal
coordinates for each point.
y : int or str
The column name or column position to be used as vertical
coordinates for each point.
s : scalar or array_like, optional
The size of each point. Possible values are:
- A single scalar so all points have the same size.
- A sequence of scalars, which will be used for each point's size
recursively. For instance, when passing [2,14] all points size
will be either 2 or 14, alternatively.
c : str, int or array_like, optional
The color of each point. Possible values are:
- A single color string referred to by name, RGB or RGBA code,
for instance 'red' or '#a98d19'.
- A sequence of color strings referred to by name, RGB or RGBA
code, which will be used for each point's color recursively. For
instance ['green','yellow'] all points will be filled in green or
yellow, alternatively.
- A column name or position whose values will be used to color the
marker points according to a colormap.
**kwargs
Keyword arguments to pass on to :meth:`DataFrame.plot`.
Returns
-------
:class:`matplotlib.axes.Axes` or numpy.ndarray of them
See Also
--------
matplotlib.pyplot.scatter : Scatter plot using multiple input data
formats.
Examples
--------
Let's see how to draw a scatter plot using coordinates from the values
in a DataFrame's columns.
.. plot::
:context: close-figs
>>> df = pd.DataFrame([[5.1, 3.5, 0], [4.9, 3.0, 0], [7.0, 3.2, 1],
... [6.4, 3.2, 1], [5.9, 3.0, 2]],
... columns=['length', 'width', 'species'])
>>> ax1 = df.plot.scatter(x='length',
... y='width',
... c='DarkBlue')
And now with the color determined by a column as well.
.. plot::
:context: close-figs
>>> ax2 = df.plot.scatter(x='length',
... y='width',
... c='species',
... colormap='viridis')
"""
return self(kind="scatter", x=x, y=y, s=s, c=c, **kwargs)
def hexbin(self, x, y, C=None, reduce_C_function=None, gridsize=None, **kwargs):
"""
Generate a hexagonal binning plot.
Generate a hexagonal binning plot of `x` versus `y`. If `C` is `None`
(the default), this is a histogram of the number of occurrences
of the observations at ``(x[i], y[i])``.
If `C` is specified, specifies values at given coordinates
``(x[i], y[i])``. These values are accumulated for each hexagonal
bin and then reduced according to `reduce_C_function`,
having as default the NumPy's mean function (:meth:`numpy.mean`).
(If `C` is specified, it must also be a 1-D sequence
of the same length as `x` and `y`, or a column label.)
Parameters
----------
x : int or str
The column label or position for x points.
y : int or str
The column label or position for y points.
C : int or str, optional
The column label or position for the value of `(x, y)` point.
reduce_C_function : callable, default `np.mean`
Function of one argument that reduces all the values in a bin to
a single number (e.g. `np.mean`, `np.max`, `np.sum`, `np.std`).
gridsize : int or tuple of (int, int), default 100
The number of hexagons in the x-direction.
The corresponding number of hexagons in the y-direction is
chosen in a way that the hexagons are approximately regular.
Alternatively, gridsize can be a tuple with two elements
specifying the number of hexagons in the x-direction and the
y-direction.
**kwargs
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
matplotlib.AxesSubplot
The matplotlib ``Axes`` on which the hexbin is plotted.
See Also
--------
DataFrame.plot : Make plots of a DataFrame.
matplotlib.pyplot.hexbin : Hexagonal binning plot using matplotlib,
the matplotlib function that is used under the hood.
Examples
--------
The following examples are generated with random data from
a normal distribution.
.. plot::
:context: close-figs
>>> n = 10000
>>> df = pd.DataFrame({'x': np.random.randn(n),
... 'y': np.random.randn(n)})
>>> ax = df.plot.hexbin(x='x', y='y', gridsize=20)
The next example uses `C` and `np.sum` as `reduce_C_function`.
Note that `'observations'` values ranges from 1 to 5 but the result
plot shows values up to more than 25. This is because of the
`reduce_C_function`.
.. plot::
:context: close-figs
>>> n = 500
>>> df = pd.DataFrame({
... 'coord_x': np.random.uniform(-3, 3, size=n),
... 'coord_y': np.random.uniform(30, 50, size=n),
... 'observations': np.random.randint(1,5, size=n)
... })
>>> ax = df.plot.hexbin(x='coord_x',
... y='coord_y',
... C='observations',
... reduce_C_function=np.sum,
... gridsize=10,
... cmap="viridis")
"""
if reduce_C_function is not None:
kwargs["reduce_C_function"] = reduce_C_function
if gridsize is not None:
kwargs["gridsize"] = gridsize
return self(kind="hexbin", x=x, y=y, C=C, **kwargs)
_backends = {}
def _find_backend(backend: str):
"""
Find a pandas plotting backend>
Parameters
----------
backend : str
The identifier for the backend. Either an entrypoint item registered
with pkg_resources, or a module name.
Notes
-----
Modifies _backends with imported backends as a side effect.
Returns
-------
types.ModuleType
The imported backend.
"""
import pkg_resources # Delay import for performance.
for entry_point in pkg_resources.iter_entry_points("pandas_plotting_backends"):
if entry_point.name == "matplotlib":
# matplotlib is an optional dependency. When
# missing, this would raise.
continue
_backends[entry_point.name] = entry_point.load()
try:
return _backends[backend]
except KeyError:
# Fall back to unregisted, module name approach.
try:
module = importlib.import_module(backend)
except ImportError:
# We re-raise later on.
pass
else:
if hasattr(module, "plot"):
# Validate that the interface is implemented when the option
# is set, rather than at plot time.
_backends[backend] = module
return module
raise ValueError(
f"Could not find plotting backend '{backend}'. Ensure that you've installed "
f"the package providing the '{backend}' entrypoint, or that the package has a "
"top-level `.plot` method."
)
def _get_plot_backend(backend=None):
"""
Return the plotting backend to use (e.g. `pandas.plotting._matplotlib`).
The plotting system of pandas has been using matplotlib, but the idea here
is that it can also work with other third-party backends. In the future,
this function will return the backend from a pandas option, and all the
rest of the code in this file will use the backend specified there for the
plotting.
The backend is imported lazily, as matplotlib is a soft dependency, and
pandas can be used without it being installed.
"""
backend = backend or get_option("plotting.backend")
if backend == "matplotlib":
# Because matplotlib is an optional dependency and first-party backend,
# we need to attempt an import here to raise an ImportError if needed.
try:
import pandas.plotting._matplotlib as module
except ImportError:
raise ImportError(
"matplotlib is required for plotting when the "
'default backend "matplotlib" is selected.'
) from None
_backends["matplotlib"] = module
if backend in _backends:
return _backends[backend]
module = _find_backend(backend)
_backends[backend] = module
return module
| [
"[email protected]"
] | |
1252894bbc2a8443c571bc7d8c3dfc7f34290c28 | fafde4b09e56cadbe893c09ae1dfb52a745ae45a | /0x03-python-data_structures/0-print_list_integer.py | 72437fda223c519c834d47e5b084b819c6cca790 | [] | no_license | MCavigli/holbertonschool-higher_level_programming | 2120c6c3018c86215d221c6fffec1957ca1d0f48 | c3408521683f03214813839b7eed03fee083081e | refs/heads/master | 2022-07-12T20:59:12.732979 | 2022-06-17T05:21:01 | 2022-06-17T05:21:01 | 461,013,788 | 3 | 7 | null | null | null | null | UTF-8 | Python | false | false | 111 | py | #!/usr/bin/python3
def print_list_integer(my_list=[]):
for i in my_list:
print('{:d}'.format(i))
| [
"[email protected]"
] | |
accc82f441ea51a73813ec2cb7dbc086e252d603 | cf9494e7953c91d786e003bfbcd9f6ad93126c7f | /widgets_entry2.py | 4879d321a74e5d54581585dbc09c16bb798c330e | [] | no_license | utisz86/TkinterTutorial | 7fd318e631f6e1a858083c3c157fa68a60bcc85a | 32030494ac3035d442a432bf6b389b6ff7e1c537 | refs/heads/master | 2022-11-26T06:36:47.959218 | 2020-07-07T22:10:33 | 2020-07-07T22:10:33 | 277,841,542 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | import tkinter as tk
window = tk.Tk()
# create a Label and an Entry widget
label = tk.Label(text="Name")
entry = tk.Entry()
# visible
label.pack()
entry.pack()
entry.insert(0,"mi?")
name = entry.get()
print(name)
window.mainloop() | [
"[email protected]"
] | |
45aca59783a82f8aebeef1769d251a6c7c1aea2f | 1186e0f758d930960aeb5319200ca50e09ff1d35 | /build/lib/cplvm/lm.py | 5d6ea254cd9fc48a55c71f271f202ac3c7edc685 | [
"MIT"
] | permissive | ethanweinberger/cplvm | c182ee3a960f20ce2975cec5492ec5b1f434dd71 | f4bbcfc4b2e9a9cec7d01eb5f7ff3a169d6e3ff6 | refs/heads/main | 2023-06-17T15:23:15.604291 | 2021-07-07T12:04:57 | 2021-07-07T12:04:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,771 | py | import functools
import warnings
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability import distributions as tfd
from tensorflow_probability import bijectors as tfb
from scipy.stats import multivariate_normal
tf.enable_v2_behavior()
warnings.filterwarnings('ignore')
NUM_VI_ITERS = 300
LEARNING_RATE_VI = 0.05
# ------- Specify model ---------
def clvm(data_dim, num_datapoints, counts_per_cell, dummy, is_H0=False):
mu = yield tfd.Normal(loc=tf.zeros([data_dim, 1]),
scale=tf.ones([data_dim, 1]),
name="mu")
beta = yield tfd.Normal(loc=tf.zeros([data_dim, 1]),
scale=tf.ones([data_dim, 1]),
name="beta")
# sigma = yield tfd.InverseGamma(concentration=tf.ones([data_dim, 1]),
# scale=1,
# name="sigma")
data = yield tfd.Normal(loc=(tf.matmul(beta, dummy) + mu) + np.log(counts_per_cell),
scale=1,
name="x")
def fit_model(X, Y, compute_size_factors=True, is_H0=False):
assert X.shape[0] == Y.shape[0]
data_dim = X.shape[0]
num_datapoints_x, num_datapoints_y = X.shape[1], Y.shape[1]
n = num_datapoints_x + num_datapoints_y
dummy = np.zeros(n)
dummy[num_datapoints_x:] = 1
dummy = np.expand_dims(dummy, 0)
data = np.concatenate([X, Y], axis=1)
data = np.log(data + 1)
if compute_size_factors:
# counts_per_cell = np.sum(data, axis=0)
# counts_per_cell = np.expand_dims(counts_per_cell, axis=0)
counts_per_cell = np.sum(np.concatenate([X, Y], axis=1), axis=0)
counts_per_cell = np.expand_dims(counts_per_cell, axis=0)
assert counts_per_cell.shape[1] == X.shape[1] + Y.shape[1]
else:
counts_per_cell = 1.0
# ------- Specify model ---------
concrete_clvm_model = functools.partial(clvm,
data_dim=data_dim,
num_datapoints=n,
counts_per_cell=counts_per_cell,
dummy=dummy,
is_H0=is_H0)
model = tfd.JointDistributionCoroutineAutoBatched(concrete_clvm_model)
if is_H0:
def target_log_prob_fn(mu, beta): return model.log_prob(
(mu, beta, data))
else:
def target_log_prob_fn(mu, beta): return model.log_prob(
(mu, beta, data))
# ------- Specify variational families -----------
# Variational parmater means
# mu
qmu_mean = tf.Variable(tf.random.normal([data_dim, 1]))
qmu_stddv = tfp.util.TransformedVariable(
1e-4 * tf.ones([data_dim, 1]),
bijector=tfb.Softplus())
# beta
qbeta_mean = tf.Variable(tf.random.normal([data_dim, 1]))
qbeta_stddv = tfp.util.TransformedVariable(
1e-4 * tf.ones([data_dim, 1]),
bijector=tfb.Softplus())
# sigma
# qsigma_concentration = tfp.util.TransformedVariable(
# tf.ones([data_dim, 1]),
# bijector=tfb.Softplus())
def factored_normal_variational_model():
qmu = yield tfd.Normal(loc=qmu_mean,
scale=qmu_stddv,
name="qmu")
qbeta = yield tfd.Normal(loc=qbeta_mean,
scale=qbeta_stddv,
name="qbeta")
# qsigma = yield tfd.InverseGamma(concentration=qsigma_concentration,
# scale=1,
# name="qsigma")
# Surrogate posterior that we will try to make close to p
surrogate_posterior = tfd.JointDistributionCoroutineAutoBatched(
factored_normal_variational_model)
# --------- Fit variational inference model using MC samples and gradient descent ----------
losses = tfp.vi.fit_surrogate_posterior(
target_log_prob_fn,
surrogate_posterior=surrogate_posterior,
optimizer=tf.optimizers.Adam(learning_rate=LEARNING_RATE_VI),
num_steps=NUM_VI_ITERS)
# d = np.log(data + 1)
# d = data / data.sum(0)
# from sklearn.linear_model import LinearRegression
# plt.scatter(np.squeeze(LinearRegression().fit(dummy.T, d.T).coef_), np.squeeze(qbeta_mean.numpy()))
# plt.show()
# d = (d.T - d.mean(1)).T
# x = np.mean(d[:, num_datapoints_x:], axis=1)
# y = np.mean(d[:, :num_datapoints_x], axis=1)
# from sklearn.linear_model import LinearRegression
# import ipdb
# ipdb.set_trace()
# plt.scatter(x - y, np.squeeze(qbeta_mean.numpy()))
# plt.show()
# import ipdb
# ipdb.set_trace()
if is_H0:
return_dict = {
'loss_trace': losses,
# 'qs_mean': qs_mean,
# 'qzx_mean': qzx_mean,
# 'qzy_mean': qzy_mean,
# 'qs_stddv': qs_stddv,
# 'qzx_stddv': qzx_stddv,
# 'qzy_stddv': qzy_stddv,
# 'qdelta_mean': qdelta_mean,
# 'qdelta_stddv': qdelta_stddv,
}
else:
return_dict = {
'loss_trace': losses,
# 'qs_mean': qs_mean,
# 'qw_mean': qw_mean,
# 'qzx_mean': qzx_mean,
# 'qzy_mean': qzy_mean,
# 'qty_mean': qty_mean,
# 'qs_stddv': qs_stddv,
# 'qw_stddv': qw_stddv,
# 'qzx_stddv': qzx_stddv,
# 'qzy_stddv': qzy_stddv,
# 'qty_stddv': qty_stddv,
# 'qdelta_mean': qdelta_mean,
# 'qdelta_stddv': qdelta_stddv,
}
return return_dict
| [
"[email protected]"
] | |
53bbaf0c1927f1dd6a655edc347f5f068614c4fe | a4681043cb56a9ab45be32a62fa9700b391f087f | /14-Statistics_with_Python/Histograms/Finding_your_Best_Bin_Size.py | ee5208c7421bb08d3d728fd281c28ccc5c4509db | [] | no_license | MarceloDL-A/Python | b16b221ae4355b6323092d069bf83d1d142b9975 | c091446ae0089f03ffbdc47b3a6901f4fa2a25fb | refs/heads/main | 2023-01-01T02:29:31.591861 | 2020-10-27T19:04:11 | 2020-10-27T19:04:11 | 301,565,957 | 0 | 0 | null | 2020-10-27T19:04:12 | 2020-10-05T23:41:30 | Python | MacCentralEurope | Python | false | false | 1,976 | py | """
HISTOGRAMS
Finding your Best Bin Size
The figure below displays the graph that you created in the last exercise:
Histogram
This histogram is helpful for our store manager. The last six hours of the day are the busiest ó from 6 pm until midnight. Does this mean the manager should staff their grocery store with the most employees between 6 pm and midnight?
To the manager, this doesnít make much sense. The manager knows the store is busy when many people get off work, but the rush certainly doesnít continue later than 9 pm.
The issue with this histogram is that we have too few bins. When plotting a histogram, itís essential to select bins that fully capture the trends in the underlying data. Often, this will require some guessing and checking. There isnít much of a science to selecting bin size.
How many bins do you think makes sense for this example? I would try 24 because there are 24 hours in a day.
"""
# Import packages
import codecademylib
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
# Read in transactions data
transactions = pd.read_csv("transactions.csv")
# Save transaction times to a separate numpy array
times = transactions["Transaction Time"].values
"""
Change the number of bins in your code from 4 to 24.
What do you notice about the data?
Given this new graph, when would you recommend staffing the grocery store?
Check the hint to see what we thought.
"""
# Use plt.hist() below
plt.hist(times, range=(0, 24), bins=24, edgecolor="black")
plt.title("Weekday Frequency of Customers")
plt.xlabel("Hours (1 hour increments)")
plt.ylabel("Count")
plt.show()
"""
It looks like the busiest times of day are in the morning, from 5am to 10am, and in the evening from 5pm to 10pm.
This histogram has two distinct peaks, neither of which are close to our average of 3pm. As you can see, averages donít tell the full story. By visualizing the shape of our data, we can make better-informed decisions.
""" | [
"[email protected]"
] | |
0f0685f8137ae113e06f7428ee41f7d757b0a252 | 15bfc574ae99ea02f10c1f549136bd9951f399cd | /articles/views.py | f5c3eaddddb2e91b794b65638563c13318ad1b73 | [] | no_license | dimka1993kh/Dj_HW_5.3 | a2b9a197729eb26fd4e6d3b4872b754542bd8c07 | e891370b4cb740fd1bf44e19e27a17021a65f99c | refs/heads/master | 2023-04-15T01:54:24.943214 | 2021-05-03T18:44:02 | 2021-05-03T18:44:02 | 364,027,709 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 661 | py | from django.views.generic import ListView
from django.shortcuts import render
from .models import Article
def articles_list(request):
template_name = 'articles/news.html'
news = Article.objects.all().select_related('author', 'genre').defer('author__phone')
# news = Article.objects.all()
context = {
'object_list': news,
}
# используйте этот параметр для упорядочивания результатов
# https://docs.djangoproject.com/en/2.2/ref/models/querysets/#django.db.models.query.QuerySet.order_by
ordering = '-published_at'
return render(request, template_name, context)
| [
"[email protected]"
] | |
5f58ecef0b8e82fcead874e63f358470565ad618 | c5bfc4509bedafe822691bbb3eb927e1fdd6daef | /ProblemSet5/Coordinate.py | 0d75033313b8a1597c52bb185feea8efdae00559 | [] | no_license | BMariscal/MITx-6.00.1x | 046f9891dcdc9c5fabf0543a01434a1304a7db9d | 37951478f41f9f2e00bb2e1ec12ccbafb4ab8e78 | refs/heads/master | 2021-06-19T09:08:56.827010 | 2017-07-16T08:10:26 | 2017-07-16T08:10:26 | 82,771,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,403 | py | """Your task is to define the following two methods for the Coordinate class:
1.Add an __eq__ method that returns True if coordinates refer to same point in
the plane (i.e., have the same x and y coordinate).
2.Define __repr__, a special method that returns a string that looks like a valid
Python expression that could be used to recreate an object with the same value.
In other words, eval(repr(c)) == c given the definition of __eq__ from part 1.
"""
class Coordinate(object):
def __init__(self,x,y):
self.x = x
self.y = y
def __eq__(self,other):
# First make sure `other` is of the same type
assert type(other) == type(self)
# Since `other` is the same type, test if coordinates are equal
return self.getX() == other.getX() and self.getY() == other.getY()
def __repr__(self):
return 'Coordinate(' + str(self.getX()) + ',' + str(self.getY()) + ')'
def getX(self):
# Getter method for a Coordinate object's x coordinate.
# Getter methods are better practice than just accessing an attribute directly
return self.x
def getY(self):
# Getter method for a Coordinate object's y coordinate
return self.y
def __str__(self):
return '<' + str(self.getX()) + ',' + str(self.getY()) + '>'
> print(c1)
<1,-8>
> print(c2)
<1,-8>
> print(c1 == c2)
True
| [
"[email protected]"
] | |
f6613b94c9e9d8ee741d57e5cd2f3e1165564490 | f4f181f2c970a163801b4202fc8d6c92a4e8113d | /google-cloud-sdk/lib/googlecloudsdk/command_lib/compute/ssh_utils.py | f61e082ca4e2e06461cb227e69749734f27b9406 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | Sorsly/subtle | 7732a6cb910f5e2f4eed1ac0d3b5979001582340 | 718e79a3e04f1f57f39b6ebe90dec9e028e88d40 | refs/heads/master | 2021-05-24T01:21:39.218495 | 2017-10-28T01:33:58 | 2017-10-28T01:33:58 | 83,103,372 | 0 | 1 | MIT | 2020-07-25T11:21:05 | 2017-02-25T03:33:07 | Python | UTF-8 | Python | false | false | 23,694 | py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for subcommands that need to SSH into virtual machine guests."""
import logging
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import constants
from googlecloudsdk.api_lib.compute import metadata_utils
from googlecloudsdk.api_lib.compute import path_simplifier
from googlecloudsdk.api_lib.compute import request_helper
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.api_lib.compute.users import client as user_client
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.util import gaia
from googlecloudsdk.command_lib.util import ssh
from googlecloudsdk.command_lib.util import time_util
from googlecloudsdk.core import exceptions as core_exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core.console import progress_tracker
# The maximum amount of time to wait for a newly-added SSH key to
# propagate before giving up.
_SSH_KEY_PROPAGATION_TIMEOUT_SEC = 60
_TROUBLESHOOTING_URL = (
'https://cloud.google.com/compute/docs/troubleshooting#ssherrors')
class CommandError(core_exceptions.Error):
"""Wraps ssh.CommandError, primarly for adding troubleshooting info."""
def __init__(self, original_error, message=None):
if message is None:
message = 'See {url} for troubleshooting hints.'.format(
url=_TROUBLESHOOTING_URL)
super(CommandError, self).__init__(
'{0}\n{1}'.format(original_error, message),
exit_code=original_error.exit_code)
class SetProjectMetadataError(core_exceptions.Error):
pass
def GetExternalIPAddress(instance_resource, no_raise=False):
"""Returns the external IP address of the instance.
Args:
instance_resource: An instance resource object.
no_raise: A boolean flag indicating whether or not to return None instead of
raising.
Raises:
ToolException: If no external IP address is found for the instance_resource
and no_raise is False.
Returns:
A string IP or None is no_raise is True and no ip exists.
"""
if instance_resource.networkInterfaces:
access_configs = instance_resource.networkInterfaces[0].accessConfigs
if access_configs:
ip_address = access_configs[0].natIP
if ip_address:
return ip_address
elif not no_raise:
raise exceptions.ToolException(
'Instance [{0}] in zone [{1}] has not been allocated an external '
'IP address yet. Try rerunning this command later.'.format(
instance_resource.name,
path_simplifier.Name(instance_resource.zone)))
if no_raise:
return None
raise exceptions.ToolException(
'Instance [{0}] in zone [{1}] does not have an external IP address, '
'so you cannot SSH into it. To add an external IP address to the '
'instance, use [gcloud compute instances add-access-config].'
.format(instance_resource.name,
path_simplifier.Name(instance_resource.zone)))
def _GetMetadataKey(iam_ssh_keys):
"""Get the metadata key name for the desired SSH key metadata.
There are four SSH key related metadata pairs:
* Per-project 'sshKeys': this grants SSH access to VMs project-wide.
* Per-instance 'sshKeys': this is used to grant access to an individual
instance. For historical reasons, it acts as an override to the
project-global value.
* Per-instance 'block-project-ssh-keys': this determines whether 'ssh-keys'
overrides or adds to the per-project 'sshKeys'
* Per-instance 'ssh-keys': this also grants access to an individual
instance, but acts in addition or as an override to the per-project
'sshKeys' depending on 'block-project-ssh-keys'
Args:
iam_ssh_keys: bool. If False, give the name of the original SSH metadata key
(that overrides the project-global SSH metadata key). If True, give the
name of the IAM SSH metadata key (that works in conjunction with the
project-global SSH key metadata).
Returns:
str, the corresponding metadata key name.
"""
if iam_ssh_keys:
metadata_key = constants.SSH_KEYS_INSTANCE_RESTRICTED_METADATA_KEY
else:
metadata_key = constants.SSH_KEYS_METADATA_KEY
return metadata_key
def _GetSSHKeysFromMetadata(metadata, iam_keys=False):
"""Returns the value of the "sshKeys" metadata as a list."""
if not metadata:
return []
for item in metadata.items:
if item.key == _GetMetadataKey(iam_keys):
return [key.strip() for key in item.value.split('\n') if key]
return []
def _PrepareSSHKeysValue(ssh_keys):
"""Returns a string appropriate for the metadata.
Values from are taken from the tail until either all values are
taken or _MAX_METADATA_VALUE_SIZE_IN_BYTES is reached, whichever
comes first. The selected values are then reversed. Only values at
the head of the list will be subject to removal.
Args:
ssh_keys: A list of keys. Each entry should be one key.
Returns:
A new-line-joined string of SSH keys.
"""
keys = []
bytes_consumed = 0
for key in reversed(ssh_keys):
num_bytes = len(key + '\n')
if bytes_consumed + num_bytes > constants.MAX_METADATA_VALUE_SIZE_IN_BYTES:
log.warn('The following SSH key will be removed from your project '
'because your sshKeys metadata value has reached its '
'maximum allowed size of {0} bytes: {1}'
.format(constants.MAX_METADATA_VALUE_SIZE_IN_BYTES, key))
else:
keys.append(key)
bytes_consumed += num_bytes
keys.reverse()
return '\n'.join(keys)
def _AddSSHKeyToMetadataMessage(message_classes, user, public_key, metadata,
iam_keys=False):
"""Adds the public key material to the metadata if it's not already there."""
entry = u'{user}:{public_key}'.format(
user=user, public_key=public_key)
ssh_keys = _GetSSHKeysFromMetadata(metadata, iam_keys=iam_keys)
log.debug('Current SSH keys in project: {0}'.format(ssh_keys))
if entry in ssh_keys:
return metadata
else:
ssh_keys.append(entry)
return metadata_utils.ConstructMetadataMessage(
message_classes=message_classes,
metadata={
_GetMetadataKey(iam_keys): _PrepareSSHKeysValue(ssh_keys)},
existing_metadata=metadata)
def _MetadataHasBlockProjectSshKeys(metadata):
"""Return true if the metadata has 'block-project-ssh-keys' set and 'true'."""
if not (metadata and metadata.items):
return False
matching_values = [item.value for item in metadata.items
if item.key == constants.SSH_KEYS_BLOCK_METADATA_KEY]
if not matching_values:
return False
return matching_values[0].lower() == 'true'
class BaseSSHCommand(base_classes.BaseCommand):
"""Base class for subcommands that need to connect to instances using SSH.
Subclasses can call EnsureSSHKeyIsInProject() to make sure that the
user's public SSH key is placed in the project metadata before
proceeding.
Attributes:
keys: ssh.Keys, the public/private key pair.
env: ssh.Environment, the current environment, used by subclasses.
"""
@staticmethod
def Args(parser):
"""Args is called by calliope to gather arguments for this command.
Please add arguments in alphabetical order except for no- or a clear-
pair for that argument which can follow the argument itself.
Args:
parser: An argparse parser that you can use to add arguments that go
on the command line after this command. Positional arguments are
allowed.
"""
force_key_file_overwrite = parser.add_argument(
'--force-key-file-overwrite',
action='store_true',
default=None,
help=('Force overwrite the files associated with a broken SSH key.')
)
force_key_file_overwrite.detailed_help = """\
If enabled gcloud will regenerate and overwrite the files associated
with a broken SSH key without asking for confirmation in both
interactive and non-interactive environment.
If disabled gcloud will not attempt to regenerate the files associated
with a broken SSH key and fail in both interactive and non-interactive
environment.
"""
# Last line empty to preserve spacing between last paragraph and calliope
# attachment "Use --no-force-key-file-overwrite to disable."
ssh_key_file = parser.add_argument(
'--ssh-key-file',
help='The path to the SSH key file.')
ssh_key_file.detailed_help = """\
The path to the SSH key file. By default, this is ``{0}''.
""".format(ssh.Keys.DEFAULT_KEY_FILE)
def Run(self, args):
"""Sets up resources to be used by concrete subclasses.
Subclasses must call this in their Run() before continuing.
Args:
args: argparse.Namespace, arguments that this command was invoked with.
Raises:
ssh.CommandNotFoundError: SSH is not supported.
"""
self.keys = ssh.Keys.FromFilename(args.ssh_key_file)
self.env = ssh.Environment.Current()
self.env.RequireSSH()
def GetProject(self, project):
"""Returns the project object.
Args:
project: str, the project we are requesting or None for value from
from properties
Returns:
The project object
"""
errors = []
objects = list(request_helper.MakeRequests(
requests=[(self.compute.projects,
'Get',
self.messages.ComputeProjectsGetRequest(
project=project or properties.VALUES.core.project.Get(
required=True),
))],
http=self.http,
batch_url=self.batch_url,
errors=errors))
if errors:
utils.RaiseToolException(
errors,
error_message='Could not fetch project resource:')
return objects[0]
def _SetProjectMetadata(self, new_metadata):
"""Sets the project metadata to the new metadata."""
compute = self.compute
errors = []
list(request_helper.MakeRequests(
requests=[
(compute.projects,
'SetCommonInstanceMetadata',
self.messages.ComputeProjectsSetCommonInstanceMetadataRequest(
metadata=new_metadata,
project=properties.VALUES.core.project.Get(
required=True),
))],
http=self.http,
batch_url=self.batch_url,
errors=errors))
if errors:
utils.RaiseException(
errors,
SetProjectMetadataError,
error_message='Could not add SSH key to project metadata:')
def SetProjectMetadata(self, new_metadata):
"""Sets the project metadata to the new metadata with progress tracker."""
with progress_tracker.ProgressTracker('Updating project ssh metadata'):
self._SetProjectMetadata(new_metadata)
def _SetInstanceMetadata(self, instance, new_metadata):
"""Sets the project metadata to the new metadata."""
compute = self.compute
errors = []
# API wants just the zone name, not the full URL
zone = instance.zone.split('/')[-1]
list(request_helper.MakeRequests(
requests=[
(compute.instances,
'SetMetadata',
self.messages.ComputeInstancesSetMetadataRequest(
instance=instance.name,
metadata=new_metadata,
project=properties.VALUES.core.project.Get(
required=True),
zone=zone
))],
http=self.http,
batch_url=self.batch_url,
errors=errors))
if errors:
utils.RaiseToolException(
errors,
error_message='Could not add SSH key to instance metadata:')
def SetInstanceMetadata(self, instance, new_metadata):
"""Sets the instance metadata to the new metadata with progress tracker."""
with progress_tracker.ProgressTracker('Updating instance ssh metadata'):
self._SetInstanceMetadata(instance, new_metadata)
def EnsureSSHKeyIsInInstance(self, user, instance, iam_keys=False):
"""Ensures that the user's public SSH key is in the instance metadata.
Args:
user: str, the name of the user associated with the SSH key in the
metadata
instance: Instance, ensure the SSH key is in the metadata of this instance
iam_keys: bool. If False, write to the original SSH metadata key (that
overrides the project-global SSH metadata key). If true, write to the
new SSH metadata key (that works in union with the project-global SSH
key metadata).
Returns:
bool, True if the key was newly added, False if it was in the metadata
already
"""
public_key = self.keys.GetPublicKey().ToEntry(include_comment=True)
new_metadata = _AddSSHKeyToMetadataMessage(self.messages, user, public_key,
instance.metadata,
iam_keys=iam_keys)
if new_metadata != instance.metadata:
self.SetInstanceMetadata(instance, new_metadata)
return True
else:
return False
def EnsureSSHKeyIsInProject(self, user, project_name=None):
"""Ensures that the user's public SSH key is in the project metadata.
Args:
user: str, the name of the user associated with the SSH key in the
metadata
project_name: str, the project SSH key will be added to
Returns:
bool, True if the key was newly added, False if it was in the metadata
already
"""
public_key = self.keys.GetPublicKey().ToEntry(include_comment=True)
project = self.GetProject(project_name)
existing_metadata = project.commonInstanceMetadata
new_metadata = _AddSSHKeyToMetadataMessage(
self.messages, user, public_key, existing_metadata)
if new_metadata != existing_metadata:
self.SetProjectMetadata(new_metadata)
return True
else:
return False
def _EnsureSSHKeyExistsForUser(self, fetcher, user):
"""Ensure the user's public SSH key is known by the Account Service."""
public_key = self.keys.GetPublicKey().ToEntry(include_comment=True)
should_upload = True
try:
user_info = fetcher.LookupUser(user)
except user_client.UserException:
owner_email = gaia.GetAuthenticatedGaiaEmail(self.http)
fetcher.CreateUser(user, owner_email)
user_info = fetcher.LookupUser(user)
for remote_public_key in user_info.publicKeys:
if remote_public_key.key.rstrip() == public_key:
expiration_time = remote_public_key.expirationTimestamp
if expiration_time and time_util.IsExpired(expiration_time):
# If a key is expired we remove and reupload
fetcher.RemovePublicKey(
user_info.name, remote_public_key.fingerprint)
else:
should_upload = False
break
if should_upload:
fetcher.UploadPublicKey(user, public_key)
return True
@property
def resource_type(self):
return 'instances'
class BaseSSHCLICommand(BaseSSHCommand):
"""Base class for subcommands that use ssh or scp."""
@staticmethod
def Args(parser):
"""Args is called by calliope to gather arguments for this command.
Please add arguments in alphabetical order except for no- or a clear-
pair for that argument which can follow the argument itself.
Args:
parser: An argparse parser that you can use to add arguments that go
on the command line after this command. Positional arguments are
allowed.
"""
BaseSSHCommand.Args(parser)
parser.add_argument(
'--dry-run',
action='store_true',
help=('If provided, prints the command that would be run to standard '
'out instead of executing it.'))
plain = parser.add_argument(
'--plain',
action='store_true',
help='Suppresses the automatic addition of ssh/scp flags.')
plain.detailed_help = """\
Suppresses the automatic addition of *ssh(1)*/*scp(1)* flags. This flag
is useful if you want to take care of authentication yourself or
use specific ssh/scp features.
"""
strict_host_key = parser.add_argument(
'--strict-host-key-checking',
choices=['yes', 'no', 'ask'],
help='Override the default behavior for ssh/scp StrictHostKeyChecking')
strict_host_key.detailed_help = """\
Override the default behavior of StrictHostKeyChecking. By default,
StrictHostKeyChecking is set to 'no' the first time you connect to an
instance and will be set to 'yes' for all subsequent connections. Use
this flag to specify a value for the connection.
"""
def Run(self, args):
super(BaseSSHCLICommand, self).Run(args)
if not args.plain:
self.keys.EnsureKeysExist(args.force_key_file_overwrite)
def GetInstance(self, instance_ref):
"""Fetch an instance based on the given instance_ref."""
request = (self.compute.instances,
'Get',
self.messages.ComputeInstancesGetRequest(
instance=instance_ref.Name(),
project=instance_ref.project,
zone=instance_ref.zone))
errors = []
objects = list(request_helper.MakeRequests(
requests=[request],
http=self.http,
batch_url=self.batch_url,
errors=errors))
if errors:
utils.RaiseToolException(
errors,
error_message='Could not fetch instance:')
return objects[0]
def HostKeyAlias(self, instance):
return 'compute.{0}'.format(instance.id)
def ActuallyRun(self, args, cmd_args, user, instance, project,
strict_error_checking=True, use_account_service=False,
wait_for_sshable=True, ignore_ssh_errors=False):
"""Runs the scp/ssh command specified in cmd_args.
If the scp/ssh command exits non-zero, this command will exit with the same
exit code.
Args:
args: argparse.Namespace, The calling command invocation args.
cmd_args: [str], The argv for the command to execute.
user: str, The user name.
instance: Instance, the instance to connect to
project: str, the project instance is in
strict_error_checking: bool, whether to fail on a non-zero, non-255 exit
code (alternative behavior is to return the exit code
use_account_service: bool, when false upload ssh keys to project metadata.
wait_for_sshable: bool, when false skip the sshability check.
ignore_ssh_errors: bool, when true ignore all errors, including the 255
exit code.
Raises:
CommandError: If the scp/ssh command fails.
Returns:
int, the exit code of the command that was run
"""
cmd_args = ssh.LocalizeCommand(cmd_args, self.env)
if args.dry_run:
log.out.Print(' '.join(cmd_args))
return
if args.plain:
keys_newly_added = []
elif use_account_service:
fetcher = user_client.UserResourceFetcher(
self.clouduseraccounts, self.project, self.http, self.batch_url)
keys_newly_added = self._EnsureSSHKeyExistsForUser(fetcher, user)
else:
# There are two kinds of metadata: project-wide metadata and per-instance
# metadata. There are four SSH-key related metadata keys:
#
# * project['sshKeys']: shared project-wide
# * instance['sshKeys']: legacy. Acts as an override to project['sshKeys']
# * instance['block-project-ssh-keys']: If true, instance['ssh-keys']
# overrides project['sshKeys']. Otherwise, keys from both metadata
# pairs are valid.
# * instance['ssh-keys']: Acts either in conjunction with or as an
# override to project['sshKeys'], depending on
# instance['block-project-ssh-keys']
#
# SSH-like commands work by copying a relevant SSH key to
# the appropriate metadata value. The VM grabs keys from the metadata as
# follows (pseudo-Python):
#
# def GetAllSshKeys(project, instance):
# if 'sshKeys' in instance.metadata:
# return (instance.metadata['sshKeys'] +
# instance.metadata['ssh-keys'])
# elif instance.metadata['block-project-ssh-keys'] == 'true':
# return instance.metadata['ssh-keys']
# else:
# return (instance.metadata['ssh-keys'] +
# project.metadata['sshKeys'])
#
if _GetSSHKeysFromMetadata(instance.metadata):
# If we add a key to project-wide metadata but the per-instance
# 'sshKeys' metadata exists, we won't be able to ssh in because the VM
# won't check the project-wide metadata. To avoid this, if the instance
# has per-instance SSH key metadata, we add the key there instead.
keys_newly_added = self.EnsureSSHKeyIsInInstance(user, instance)
elif _MetadataHasBlockProjectSshKeys(instance.metadata):
# If the instance 'ssh-keys' metadata overrides the project-wide
# 'sshKeys' metadata, we should put our key there.
keys_newly_added = self.EnsureSSHKeyIsInInstance(user, instance,
iam_keys=True)
else:
# Otherwise, try to add to the project-wide metadata. If we don't have
# permissions to do that, add to the instance 'ssh-keys' metadata.
try:
keys_newly_added = self.EnsureSSHKeyIsInProject(user, project)
except SetProjectMetadataError:
log.info('Could not set project metadata:', exc_info=True)
# If we can't write to the project metadata, it may be because of a
# permissions problem (we could inspect this exception object further
# to make sure, but because we only get a string back this would be
# fragile). If that's the case, we want to try the writing to the
# iam_keys metadata (we may have permissions to write to instance
# metadata). We prefer this to the per-instance override of the
# project metadata.
log.info('Attempting to set instance metadata.')
keys_newly_added = self.EnsureSSHKeyIsInInstance(user, instance,
iam_keys=True)
if keys_newly_added and wait_for_sshable:
external_ip_address = GetExternalIPAddress(instance)
host_key_alias = self.HostKeyAlias(instance)
ssh.WaitUntilSSHable(
user, external_ip_address, self.env, self.keys.key_file,
host_key_alias, args.plain, args.strict_host_key_checking,
_SSH_KEY_PROPAGATION_TIMEOUT_SEC)
logging.debug('%s command: %s', cmd_args[0], ' '.join(cmd_args))
try:
return ssh.RunExecutable(cmd_args,
strict_error_checking=strict_error_checking,
ignore_ssh_errors=ignore_ssh_errors)
except ssh.CommandError as e:
raise CommandError(e)
| [
"[email protected]"
] | |
f047b7e7e7ffeddbf2f1357674c44aee7ab8d35a | 6e8f2e28479566dbaa338300b2d61f784ff83f97 | /.history/code/datasetup_20210414102701.py | 67abb47540226e9e7256dd767a367bdddaf1b16d | [] | no_license | eeng5/CV-final-project | 55a7d736f75602858233ebc380c4e1d67ab2b866 | 580e28819560b86f6974959efb1d31ef138198fc | refs/heads/main | 2023-04-09T21:28:21.531293 | 2021-04-21T19:57:22 | 2021-04-21T19:57:22 | 352,703,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,231 | py | from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pandas as pd
import cv2
import os
import glob
from pathlib import Path
def cleanTestDirs():
emotions = ['angry', 'happy', 'disgust', 'sad', 'neutral', 'surprise', 'fear']
for e in emotions:
pathy = '/Users/Natalie/Desktop/cs1430/CV-final-project/data/test/'+e
for f in Path(pathy).glob('*.jpg'):
try:
#f.unlink()
os.remove(f)
except OSError as e:
print("Error: %s : %s" % (f, e.strerror))
def cleanTrainDirs():
emotions = ['angry', 'happy', 'disgust', 'sad', 'neutral', 'surprise', 'fear']
for e in emotions:
pathy = '/Users/Natalie/Desktop/cs1430/CV-final-project/data/train/'+e
for f in Path(pathy).glob('*.jpg'):
try:
#f.unlink()
os.remove(f)
except OSError as e:
print("Error: %s : %s" % (f, e.strerror))
def cleanAll():
cleanTestDirs()
cleanTrainDirs()
def createPixelArray(arr):
arr = list(map(int, arr.split()))
array = np.array(arr, dtype=np.uint8)
array = array.reshape((48, 48))
return array
def equalize_hist(img):
img = cv2.equalizeHist(img)
return img
def showImages(imgs):
_, axs = plt.subplots(1, len(imgs), figsize=(20, 20))
axs = axs.flatten()
for img, ax in zip(imgs, axs):
ax.imshow(img,cmap=plt.get_cmap('gray'))
plt.show()
def augmentIMG(img, task):
imgs = [img]
img1 = equalize_hist(img)
imgs.append(img1)
if(task == 3):
img2 = cv2.bilateralFilter(img1, d=9, sigmaColor=75, sigmaSpace=75)
imgs.append(img2)
img6 = cv2.flip(img, 1) # flip horizontally
imgs.append(img6)
return imgs
def saveIMG(arr, num, folderLoc):
im = Image.fromarray(arr)
filename = folderLoc + "image_"+ num+".jpg"
im.save(filename)
def createTrain(emotion_dict, task):
df = pd.read_csv('/Users/Natalie/Desktop/cs1430/CV-final-project/data/icml_face_data.csv') # CHANGE ME
base_filename = "/Users/Natalie/Desktop/cs1430/CV-final-project/data/train/" # CHANGE ME
for index, row in df.iterrows():
if (row[' Usage'] == "Training"):
px = row[' pixels']
emot = int(row['emotion'])
emot_loc = emotion_dict[emot]
filename = base_filename + emot_loc
img = createPixelArray(px)
img_arr = augmentIMG(img, task)
idx = 0
for i in img_arr:
num = str(index) + "_" + str(idx)
idx +=1
saveIMG(i, num, filename)
def createTest(emotion_dict , task):
df = pd.read_csv('/Users/Natalie/Desktop/cs1430/CV-final-project/data/icml_face_data.csv') # CHANGE ME
base_filename = "/Users/Natalie/Desktop/cs1430/CV-final-project/data/test/" # CHANGE ME
for index, row in df.iterrows():
if (row[' Usage'] == "PublicTest"):
px = row[' pixels']
emot = int(row['emotion'])
emot_loc = emotion_dict[emot]
filename = base_filename + emot_loc
img = createPixelArray(px)
img_arr = augmentIMG(img, task)
idx = 0
for i in img_arr:
num = str(index) + "_" + str(idx)
idx +=1
saveIMG(i, num, filename)
def createEmotionDict():
emotionDict = {}
emotionDict[0]="angry/"
emotionDict[1]="disgust/"
emotionDict[2]="fear/"
emotionDict[3]="happy/"
emotionDict[4]="sad/"
emotionDict[5]="surprise/"
emotionDict[6] = "neutral/"
return emotionDict
def createSimpleData():
cleanAll()
print("Cleaning done")
emot_dict = createEmotionDict()
createTrain(emot_dict, 1)
print("Training done")
createTest(emot_dict, 1)
print("Testing done")
def createComplexData():
cleanAll()
emot_dict = createEmotionDict()
createTrain(emot_dict, 3)
createTest(emot_dict, 3)
def main():
cleanAll()
print("Cleaning done")
emot_dict = createEmotionDict()
createTrain(emot_dict, 1)
print("Training done")
createTest(emot_dict, 1)
print("Testing done")
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
be6f744ed74a9d985bf2d457c64dc8a20447b721 | 543e4a93fd94a1ebcadb7ba9bd8b1f3afd3a12b8 | /maza/modules/creds/cameras/sentry360/ftp_default_creds.py | 36767e634607918cc0d8f56a5cba7e413c92d652 | [
"MIT"
] | permissive | ArturSpirin/maza | e3127f07b90034f08ff294cc4afcad239bb6a6c3 | 56ae6325c08bcedd22c57b9fe11b58f1b38314ca | refs/heads/master | 2020-04-10T16:24:47.245172 | 2018-12-11T07:13:15 | 2018-12-11T07:13:15 | 161,144,181 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 841 | py | from maza.core.exploit import *
from maza.modules.creds.generic.ftp_default import Exploit as FTPDefault
class Exploit(FTPDefault):
__info__ = {
"name": "Sentry360 Camera Default FTP Creds",
"description": "Module performs dictionary attack against Sentry360 Camera FTP service. "
"If valid credentials are found, they are displayed to the user.",
"authors": (
"Marcin Bury <marcin[at]threat9.com>", # routersploit module
),
"devices": (
"Sentry360 Camera",
)
}
target = OptIP("", "Target IPv4, IPv6 address or file with ip:port (file://)")
port = OptPort(21, "Target FTP port")
threads = OptInteger(1, "Number of threads")
defaults = OptWordlist("admin:1234", "User:Pass or file with default credentials (file://)")
| [
"[email protected]"
] | |
2aa88315ba210081a02274e09c7e59726276c367 | e4bceb499098281253f01f93d5c4f11284febf2e | /wakeup.py | 71ff96919c980606a2c9ddd0ded586b253c131a3 | [] | no_license | Hemie143/gc_mysteries | d2a0a363767128bd599e079a1fab01822986d7e9 | d26cf036f20f13d9a6c314000b7531e2f21d4d5e | refs/heads/master | 2021-09-15T11:25:23.775079 | 2018-05-31T11:38:08 | 2018-05-31T11:38:08 | 125,210,054 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,279 | py | import requests
import filecmp
import os
import time
import datetime
'''
curl
-A "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
-e http://geocachewakeup.byethost7.com/?ckattempt=1
-b "__test=a7f64c693f5755629af2d2c71aa06d2a;referrer="
-o wakeup%TS%.png
-H "Cache-Control: no-cache"
http://geocachewakeup.byethost7.com/image.php
'''
headers = {'referer': 'http://geocachewakeup.byethost7.com/?ckattempt=1',
'user-agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36',
'pragma': 'no-cache',
'cache-control': 'no-cache'}
cookies = {'__test': 'a7f64c693f5755629af2d2c71aa06d2a', 'referrer': ''}
i = 1
while True:
print('Trial {0}'.format(i))
res = requests.get('http://geocachewakeup.byethost7.com/image.php', cookies=cookies, headers=headers)
res.raise_for_status()
imagefile = open('uil.png', 'wb')
for chunk in res.iter_content(1000):
imagefile.write(chunk)
imagefile.close()
if not filecmp.cmp('howlsleep.png', 'uil.png', shallow=False):
os.rename('uil.png', 'uil_{:%Y%m%d_%H%M%S}.png'.format(datetime.datetime.now()))
filecmp.clear_cache()
i += 1
time.sleep(15)
| [
"[email protected]"
] | |
fb0ad1f1480d6d4839f0c76a440b978b0e071368 | 7c9f28e371e8dfa9290c05a48a9d924484b4b18c | /rgn.py | 3a668bd7122d3cbb93110cc8b215dd74a2e7f72c | [] | no_license | Pavithralakshmi/corekata | 1f9d963da44a6fdcdedaf2e39452545f6cc52e9b | 06d1c7bba25681ce12e2ab93ce461228afb6b984 | refs/heads/master | 2021-04-30T01:53:37.414318 | 2018-10-11T17:58:39 | 2018-10-11T17:58:39 | 121,491,600 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | def sum_digits(n):
ss = 0
while n:
ss += n % 10
n //= 10
return ss
| [
"[email protected]"
] | |
8cdd706344dff0e13cfd77f5b6b4f98005a35c96 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /bBExn57vLEsXgHC5m_13.py | a75afd38204a17adcd9211e34c8b31be9e80d941 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 217 | py |
def same_line(lst):
try:
return (lst[1][1] - lst[0][1]) / (lst[1][0] - lst[0][0]) == (lst[2][1] - lst[1][1]) / (lst[2][0] - lst[1][0])
except:
return lst[0][0] == 0 and lst[1][0] == 0 and lst[2][0] == 0
| [
"[email protected]"
] | |
e2e902fe476469280d8f67738cf676ce097be6c5 | 48290f34b95013e1d25b98a73fdd8f879c4b5b7a | /login-register.py | 06cfab88082c20f8d6c6d00093327aba2e0b705b | [] | no_license | anmolrajaroraa/core-python-july | 584a3b055c39140b8c79c1158b366f8bdc4a015d | 0fada23d8d969e548dadb6b9935aff1429f13a64 | refs/heads/master | 2022-12-15T23:17:19.786059 | 2020-09-13T17:37:51 | 2020-09-13T17:37:51 | 278,815,765 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,193 | py | import csv
# comma separated values
print('''
1. Login
2. Register
''')
choice = int(input("Enter choice: "))
# if choice == 1:
# isLoginSuccessful = False
# usernameOrEmail = input("Enter username/email: ")
# password = input("Enter password: ")
# with open("users.csv") as fileStream:
# reader = csv.reader(fileStream)
# for row in reader:
# if usernameOrEmail == row[0] or usernameOrEmail == row[2]:
# if password == row[3]:
# print("Login successful!")
# isLoginSuccessful = True
# break
# if not isLoginSuccessful:
# print("Login failed")
if choice == 1:
usernameOrEmail = input("Enter username/email: ")
password = input("Enter password: ")
with open("users.csv") as fileStream:
reader = csv.reader(fileStream)
for row in reader:
if usernameOrEmail == row[0] or usernameOrEmail == row[2]:
if password == row[3]:
print("Login successful!")
break
else:
print("Login failed!")
# for-else block
# else says now I'm a follower of for block
# if 'for' loop ends gracefully, else will run
# but if we break the for loop(terminate it abruptly) then else is also terminated hence 'else' block will not run
elif choice == 2:
emailExists = False
username = input("Enter username: ")
fullname = input("Enter fullname: ")
email = input("Enter email: ")
password = input("Enter password: ")
# fileStream = open("users.csv", "w")
# fileStream.close()
with open("users.csv") as fileStream:
reader = csv.reader(fileStream)
for row in reader:
# print(row)
emailFromDB = row[2]
if email == emailFromDB:
print("Email already registered..please login")
emailExists = True
break
if not emailExists:
with open("users.csv", "a", newline='') as fileStream:
writer = csv.writer(fileStream)
writer.writerow([username, fullname, email, password])
print("Registered successfully...")
| [
"[email protected]"
] | |
330a65e3a647bee90a48d3f323e928e718b549f5 | 1dc67a30f9af553243088668d51bc4e75e87d83d | /python/dazhewan/day19/打着玩/super_init.py | c76074a2ffcc59ce4d27de7f348f85e81ddb37c5 | [] | no_license | houyinhu/AID1812 | 00db45b3e8905bd069b31f2e7689f83bca3fa61f | 8eeb9f06ed9f4e742d480354ef0e336dfe8c2f17 | refs/heads/master | 2020-04-27T16:33:57.275890 | 2019-04-10T01:09:51 | 2019-04-10T01:09:51 | 174,486,040 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 730 | py | #super_init.py
#此示例示意,用super函数显示调用基类__init__初始化方法
class Human:
def __init__(self,n,a):
self.name = n
self.age = a
print("Human的__init__方法被调用")
def infos(self):
print('姓名:',self.name)
print('年龄:',self.age)
class Student(Human):
def __init__(self,n,a,s=0):
super().__init__(n,a) #显示调用父类的初始化方法
self.score = s #添加成绩属性
print("Student类的__init__方法被调用")
def infos(self):
super().infos() #调用父类的方法
print("成绩:",self.score)
s1 = Student('小张',20,100)
s1.infos()
| [
"[email protected]"
] | |
a456e19cd9296ebd7f72cb740ff6165d5b72db52 | 1384435f0e0cf706db82d0672d5fe9e3bc0cf5a8 | /agilo/ticket/tests/workflow_test.py | 4fc84cd6f5053468c692e14cb4e4a51a6219361f | [] | no_license | djangsters/agilo | 1e85d776ab4ec2fa67a6366e72206bbad2930226 | 1059b76554363004887b2a60953957f413b80bb0 | refs/heads/master | 2016-09-05T12:16:51.476308 | 2013-12-18T21:19:09 | 2013-12-18T21:19:09 | 15,294,469 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,979 | py | # -*- encoding: utf-8 -*-
# Copyright 2009 Agile42 GmbH, Berlin (Germany)
# Copyright 2011 Agilo Software GmbH All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from agilo.test import AgiloTestCase
from agilo.ticket import AgiloTicket
from agilo.ticket.workflow_support import TicketStatusManipulator, \
TransitionFinder, TicketHierarchyMover
from agilo.utils import Key, Status, Type
class TestFindTransitionInWorkflow(AgiloTestCase):
def setUp(self):
self.super()
self.task = AgiloTicket(self.env, t_type=Type.TASK)
# this task is not stored in the db on purpose - so I can check
# that no workflow does any permanent damage!
del self.task._old[Key.TYPE]
self._set_status_to(Status.NEW)
req = self.teh.mock_request('foo')
self.finder = TransitionFinder(self.env, req, self.task)
self.assert_equals({}, self.task._old)
def _set_status_to(self, status):
self.task[Key.STATUS] = status
del self.task._old[Key.STATUS]
self.assert_equals({}, self.task._old)
def test_can_find_transition_from_new_to_in_progress(self):
self.assert_equals(Status.NEW, self.task[Key.STATUS])
transition = self.finder.transition_to_in_progress_state()
self.assert_equals(['accept'], transition)
self.assert_equals({}, self.task._old)
def test_can_find_direct_transition_from_accepted_to_closed(self):
self._set_status_to(Status.ACCEPTED)
transition = self.finder.transition_to_closed_state()
self.assert_equals(['resolve'], transition)
self.assert_equals({}, self.task._old)
def test_can_find_direct_transition_from_assigned_to_new(self):
self.teh.change_workflow_config([('putback', 'assigned -> new')])
self._set_status_to(Status.ASSIGNED)
transition = self.finder.transition_to_new_state()
self.assert_equals(['putback'], transition)
self.assert_equals({}, self.task._old)
def test_can_find_even_indirect_transitions(self):
self.teh.change_workflow_config([('putback', 'assigned -> new')])
self._set_status_to(Status.ACCEPTED)
transition = self.finder.transition_to_new_state()
self.assert_equals(['reassign', 'putback'], transition)
self.assert_equals({}, self.task._old)
def test_use_shortest_transition(self):
self.teh.change_workflow_config([('ask', 'assigned -> needinfo'),
('invalidate', 'needinfo -> new'),
('putback', 'assigned -> new'),
])
self._set_status_to(Status.ACCEPTED)
transition = self.finder.transition_to_new_state()
self.assert_equals(['reassign', 'putback'], transition)
self.assert_equals({}, self.task._old)
def test_return_none_for_assigned_to_new_if_no_transition_allowed(self):
self._set_status_to('assigned')
transition = self.finder.transition_to_new_state()
self.assert_none(transition)
self.assert_equals({}, self.task._old)
def test_return_none_for_reopenend_to_new_if_no_transition_allowed(self):
self._set_status_to(Status.REOPENED)
transition = self.finder.transition_to_new_state()
self.assert_none(transition)
self.assert_equals({}, self.task._old)
def test_empty_transition_if_ticket_is_already_in_target_state(self):
self.assert_equals(Status.NEW, self.task[Key.STATUS])
self.assert_equals([], self.finder.transition_to_new_state())
self._set_status_to(Status.ACCEPTED)
self.assert_equals([], self.finder.transition_to_in_progress_state())
self._set_status_to(Status.CLOSED)
self.assert_equals([], self.finder.transition_to_closed_state())
class TestManipulateTicketStatus(AgiloTestCase):
def setUp(self):
self.super()
self.task = AgiloTicket(self.env, t_type=Type.TASK)
# this task is not stored in the db on purpose - so I can check
# that no workflow does any permanent damage!
del self.task._old[Key.TYPE]
req = self.teh.mock_request('foo')
self.manipulator = TicketStatusManipulator(self.env, req, self.task)
self.assert_equals({}, self.task._old)
def _set_status_to(self, status):
self.task[Key.STATUS] = status
del self.task._old[Key.STATUS]
self.assert_equals({}, self.task._old)
def test_ignores_workflow_if_no_valid_transition_to_new_was_found(self):
self._set_status_to('assigned')
self.manipulator.change_status_to('new')
self.assert_equals(Status.NEW, self.task[Key.STATUS])
def test_delete_owner_if_new_status_is_new(self):
self._set_status_to(Status.ACCEPTED)
self.task[Key.OWNER] = 'foo'
self.manipulator.change_status_to('new')
self.assert_equals(Status.NEW, self.task[Key.STATUS])
self.assert_equals('', self.task[Key.OWNER])
def test_delete_resolution_if_ticket_was_closed_before(self):
self._set_status_to(Status.CLOSED)
self.task[Key.OWNER] = 'foo'
self.task[Key.RESOLUTION] = Status.RES_FIXED
self.manipulator.change_status_to('in_progress')
self.assert_equals(Status.REOPENED, self.task[Key.STATUS])
self.assert_equals('', self.task[Key.RESOLUTION])
def test_can_ignore_workflow_for_transition_to_closed(self):
self.teh.change_workflow_config([('resolve', '* -> in_qa')])
self._set_status_to(Status.ACCEPTED)
self.task[Key.OWNER] = 'foo'
self.manipulator.change_status_to('closed')
self.assert_equals(Status.CLOSED, self.task[Key.STATUS])
self.assert_equals(Status.RES_FIXED, self.task[Key.RESOLUTION])
def test_can_ignore_workflow_for_transition_to_in_progress(self):
self.teh.change_workflow_config([('reopen', 'assigned -> new')])
self._set_status_to(Status.CLOSED)
self.task[Key.OWNER] = 'bar'
self.task[Key.RESOLUTION] = Status.RES_FIXED
self.manipulator.change_status_to('in_progress')
self.assert_equals(Status.ACCEPTED, self.task[Key.STATUS])
self.assert_equals('', self.task[Key.RESOLUTION])
self.assert_equals('foo', self.task[Key.OWNER])
def test_can_ignore_workflow_for_transition_custom_ticket_status(self):
self.teh.change_workflow_config([('fnordify', 'new -> fnord')])
self._set_status_to(Status.NEW)
self.manipulator.change_status_to('fnord')
self.assert_equals('fnord', self.task[Key.STATUS])
def test_will_choose_assigned_as_default_in_progress_status(self):
# not sure in what order the workflows are found, but 'abc' helped trigger this bug
# since it's alphabetically smaller than 'accept'
self.teh.change_workflow_config([('abc', 'new -> fnord')])
self._set_status_to(Status.NEW)
self.manipulator.change_status_to('in_progress')
self.assert_equals(Status.ACCEPTED, self.task[Key.STATUS])
def test_can_transition_to_custom_ticket_status(self):
self.teh.change_workflow_config([('fnordify', 'new -> fnord')])
self._set_status_to(Status.NEW)
self.manipulator.change_status_to('fnord')
self.assert_equals('fnord', self.task[Key.STATUS])
class TestMoveTicketHierarchyOnSprintChange(AgiloTestCase):
def setUp(self):
self.super()
self.old_sprint = 'Old Sprint'
self.new_sprint = 'New Sprint'
self.teh.create_sprint(self.old_sprint)
self.teh.create_sprint(self.new_sprint)
self._create_story_and_task()
def _create_story_and_task(self):
self.story = self.teh.create_story(sprint=self.old_sprint)
self.task = self.teh.create_task(sprint=self.old_sprint)
self.assert_true(self.story.link_to(self.task))
def _assert_ticket_has_sprint(self, ticket_id, sprint_name):
ticket = AgiloTicket(self.env, ticket_id)
self.assert_equals(sprint_name, ticket[Key.SPRINT])
def _assert_ticket_has_new_sprint(self, ticket_id):
self._assert_ticket_has_sprint(ticket_id, self.new_sprint)
def _assert_ticket_has_old_sprint(self, ticket_id):
self._assert_ticket_has_sprint(ticket_id, self.old_sprint)
def _assert_move_task_of_story(self):
mover = TicketHierarchyMover(self.env, self.story, self.old_sprint, self.new_sprint)
self._assert_ticket_has_old_sprint(self.task.id)
mover.execute()
self._assert_ticket_has_new_sprint(self.task.id)
def test_can_move_task_of_a_story(self):
self._assert_move_task_of_story()
def test_can_pull_in_task_of_a_story(self):
self.old_sprint = ''
self._create_story_and_task()
self._assert_move_task_of_story()
def test_can_pull_out_task_of_a_story(self):
self.new_sprint = ''
self._assert_move_task_of_story()
def test_can_have_identical_source_and_destination(self):
self.new_sprint = self.old_sprint
self._assert_move_task_of_story()
def test_does_not_move_closed_task(self):
self.task[Key.STATUS] = Status.CLOSED
self.task.save_changes(None, None)
mover = TicketHierarchyMover(self.env, self.story, self.old_sprint, self.new_sprint)
mover.execute()
self._assert_ticket_has_old_sprint(self.task.id)
def test_does_not_move_task_with_different_sprint(self):
self.teh.create_sprint('Third Sprint')
self.task[Key.SPRINT] = 'Third Sprint'
self.task.save_changes(None, None)
mover = TicketHierarchyMover(self.env, self.story, self.old_sprint, self.new_sprint)
mover.execute()
self._assert_ticket_has_sprint(self.task.id, 'Third Sprint')
def test_can_move_indirect_task(self):
bug = self.teh.create_ticket(t_type=Type.BUG, props=dict(sprint=self.old_sprint))
self.assert_true(bug.link_to(self.story))
mover = TicketHierarchyMover(self.env, bug, self.old_sprint, self.new_sprint)
mover.execute()
self._assert_ticket_has_new_sprint(self.task.id)
def test_will_store_default_author_on_changelog(self):
self._assert_move_task_of_story()
self.assert_equals("agilo", self.teh.last_changelog_author(self.task))
def test_will_store_custom_author_on_changelog(self):
mover = TicketHierarchyMover(self.env, self.story, self.old_sprint,
self.new_sprint, changelog_author="fnord")
mover.execute()
self.assert_equals("fnord", self.teh.last_changelog_author(self.task))
def test_does_not_explode_if_child_has_no_sprint_field(self):
self.teh.allow_link_from_to(Type.USER_STORY, Type.REQUIREMENT)
# recreate object because the allowed links are cached inside
self.story = AgiloTicket(self.env, self.story.id)
requirement = self.teh.create_ticket(t_type=Type.REQUIREMENT)
self.assert_true(self.story.link_to(requirement))
mover = TicketHierarchyMover(self.env, self.story, self.old_sprint, self.new_sprint)
mover.execute()
self._assert_ticket_has_sprint(requirement.id, '')
| [
"[email protected]"
] | |
0a3a0097b6cf311e49d6da4a0de4b7ef22c3ab65 | dff749b6652cc6b5ba804d491b3c981f2a9599e1 | /pytests/cbas/cbas_dcp_state.py | 03a677fa6579ceeecb3c6325aff0a53bb9c2c7d1 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | pavithra-mahamani/TAF | 7c6f40beee7fb14f28f7016a246c49d3e728b938 | ff854adcc6ca3e50d9dc64e7756ca690251128d3 | refs/heads/master | 2020-07-29T01:18:56.807783 | 2019-09-20T22:51:50 | 2019-09-20T22:51:50 | 209,612,844 | 0 | 0 | Apache-2.0 | 2019-09-19T17:35:20 | 2019-09-19T17:35:20 | null | UTF-8 | Python | false | false | 15,424 | py | import time
import json
from cbas.cbas_base import CBASBaseTest
from remote.remote_util import RemoteMachineShellConnection
from couchbase_helper.tuq_generators import JsonGenerator
class CBASDCPState(CBASBaseTest):
def setUp(self):
super(CBASDCPState, self).setUp()
self.log.info("Establish remote connection to CBAS node and Empty analytics log")
self.shell = RemoteMachineShellConnection(self.cbas_node)
self.shell.execute_command("echo '' > /opt/couchbase/var/lib/couchbase/logs/analytics*.log")
self.log.info("Load documents in the default bucket")
self.perform_doc_ops_in_all_cb_buckets(self.num_items, "create", 0, self.num_items)
self.log.info("Create connection")
self.cbas_util.createConn(self.cb_bucket_name)
self.log.info("Create dataset")
self.cbas_util.create_dataset_on_bucket(self.cb_bucket_name, self.cbas_dataset_name)
self.log.info("Add a CBAS nodes")
self.assertTrue(self.add_node(self.servers[1], services=["cbas"], rebalance=True), msg="Failed to add CBAS node")
self.log.info("Connect to Local link")
self.cbas_util.connect_link()
self.log.info("Validate count on CBAS")
self.assertTrue(self.cbas_util.validate_cbas_dataset_items_count(self.cbas_dataset_name, self.num_items), msg="Count mismatch on CBAS")
self.log.info("Kill CBAS/JAVA Process on NC node")
self.shell.kill_multiple_process(['java', 'cbas'])
"""
test_dcp_state_with_cbas_bucket_connected_kv_bucket_deleted,default_bucket=True,cb_bucket_name=default,cbas_dataset_name=ds,items=10000
"""
def test_dcp_state_with_cbas_bucket_connected_kv_bucket_deleted(self):
"""
Cover's the scenario: CBAS bucket is connected, KV bucket is deleted
Expected Behaviour: Rebalance must pass once we receive DCP state API response
"""
self.log.info("Delete KV bucket")
self.delete_bucket_or_assert(serverInfo=self.master)
self.log.info("Check DCP state")
start_time = time.time()
dcp_state_captured = False
while time.time() < start_time + 120:
try:
status, content, _ = self.cbas_util.fetch_dcp_state_on_cbas(self.cbas_dataset_name)
if status:
dcp_state_captured = True
content = json.loads(content)
break
except:
pass
self.log.info("Check DCP state is inconsistent, and rebalance passes since KV bucket does not exist and we don't care about the state")
self.assertTrue(dcp_state_captured, msg="DCP state not found. Failing the test")
self.assertFalse(content["exact"], msg="DCP state is consistent. Failing the test since subsequent rebalance will pass.")
self.log.info("Add a CBAS nodes")
self.assertTrue(self.add_node(self.servers[3], services=["cbas"], rebalance=False), msg="Failed to add CBAS node")
self.log.info("Rebalance in CBAS node")
rebalance_success = False
try:
rebalance_success = self.rebalance()
except Exception as e:
pass
self.assertTrue(rebalance_success, msg="Rebalance in of CBAS node must succeed since DCP state API returned success")
"""
test_dcp_state_with_cbas_bucket_disconnected_kv_bucket_deleted,default_bucket=True,cb_bucket_name=default,cbas_dataset_name=ds,items=10000
"""
def test_dcp_state_with_cbas_bucket_disconnected_kv_bucket_deleted(self):
"""
Cover's the scenario: CBAS bucket is disconnected, KV bucket deleted
Expected Behaviour: Rebalance must succeeds and we must see in logs "Bucket Bucket:Default.cbas doesn't exist in KV anymore... nullifying its DCP state"
"""
self.log.info("Delete KV bucket")
self.delete_bucket_or_assert(serverInfo=self.master)
self.log.info("Disconnect from CBAS bucket")
start_time = time.time()
while time.time() < start_time + 120:
try:
self.cbas_util.disconnect_link()
break
except Exception as e:
pass
self.log.info("Add a CBAS nodes")
self.assertTrue(self.add_node(self.servers[3], services=["cbas"], rebalance=False),
msg="Failed to add a CBAS node")
self.log.info("Rebalance in CBAS node")
self.assertTrue(self.rebalance(), msg="Rebalance in CBAS node failed")
self.log.info("Grep Analytics logs for message")
result, _ = self.shell.execute_command("grep 'exist in KV anymore... nullifying its DCP state' /opt/couchbase/var/lib/couchbase/logs/analytics*.log")
self.assertTrue("nullifying its DCP state" in result[0], msg="Expected message 'nullifying its DCP state' not found")
"""
test_dcp_state_with_cbas_bucket_disconnected_kv_bucket_deleted_and_recreate,default_bucket=True,cb_bucket_name=default,cbas_dataset_name=ds,items=10000
"""
def test_dcp_state_with_cbas_bucket_disconnected_kv_bucket_deleted_and_recreate(self):
"""
Cover's the scenario: CBAS bucket is disconnected, CB bucket is deleted and then recreated
Expected Behaviour: Rebalance succeeds with message in logs "Bucket Bucket:Default.cbas doesn't exist in KV anymore... nullifying its DCP state, then again after bucket is re-created, data is re-ingested from 0"
"""
self.log.info("Delete KV bucket")
self.delete_bucket_or_assert(serverInfo=self.master)
self.log.info("Disconnect from CBAS bucket")
start_time = time.time()
while time.time() < start_time + 120:
try:
self.cbas_util.disconnect_link()
break
except Exception as e:
pass
self.log.info("Add a CBAS nodes")
self.assertTrue(self.add_node(self.servers[3], services=["cbas"], rebalance=False),
msg="Failed to add a CBAS node")
self.log.info("Rebalance in CBAS node")
self.assertTrue(self.rebalance(), msg="Rebalance in CBAS node failed")
self.log.info("Grep Analytics logs for message")
result, _ = self.shell.execute_command("grep 'exist in KV anymore... nullifying its DCP state' /opt/couchbase/var/lib/couchbase/logs/analytics*.log")
self.assertTrue("nullifying its DCP state" in result[0], msg="Expected message 'nullifying its DCP state' not found")
self.log.info("Recreate KV bucket")
self.create_default_bucket()
self.log.info("Load documents in the default bucket")
self.perform_doc_ops_in_all_cb_buckets(self.num_items // 100, "create", 0, self.num_items // 100)
self.log.info("Create connection")
self.cbas_util.createConn(self.cb_bucket_name)
self.log.info("Connect to Local link")
self.cbas_util.connect_link(with_force=True)
self.log.info("Validate count on CBAS post KV bucket re-created")
self.assertTrue(self.cbas_util.validate_cbas_dataset_items_count(self.cbas_dataset_name, self.num_items // 100), msg="Count mismatch on CBAS")
"""
test_dcp_state_with_cbas_bucket_disconnected_cb_bucket_exist,default_bucket=True,cb_bucket_name=default,cbas_dataset_name=ds,items=10000,user_action=connect_cbas_bucket
test_dcp_state_with_cbas_bucket_disconnected_cb_bucket_exist,default_bucket=True,cb_bucket_name=default,cbas_dataset_name=ds,items=10000
"""
def test_dcp_state_with_cbas_bucket_disconnected_cb_bucket_exist(self):
"""
Cover's the scenario: CBAS bucket is disconnected
Expected Behaviour: Rebalance fails with user action Connect the bucket or drop the dataset"
"""
self.log.info("Disconnect from CBAS bucket")
start_time = time.time()
while time.time() < start_time + 120:
try:
self.cbas_util.disconnect_link()
break
except Exception as e:
pass
self.log.info("Add a CBAS nodes")
self.assertTrue(self.add_node(self.servers[3], services=["cbas"], rebalance=False),
msg="Failed to add a CBAS node")
self.log.info("Rebalance in CBAS node")
rebalance_success = False
try:
rebalance_success = self.rebalance()
except Exception as e:
pass
if rebalance_success == False:
self.log.info("Grep Analytics logs for user action as rebalance in Failed")
result, _ = self.shell.execute_command("grep 'Datasets in different partitions have different DCP states.' /opt/couchbase/var/lib/couchbase/logs/analytics*.log")
self.assertTrue("User action: Connect the bucket:" in result[0] and "or drop the dataset: Default.ds" in result[0], msg="User action not found.")
user_action = self.input.param("user_action", "drop_dataset")
if user_action == "connect_cbas_bucket":
self.log.info("Connect back Local link")
self.cbas_util.connect_link()
self.sleep(15, message="Wait for link to be connected")
else:
self.log.info("Dropping the dataset")
self.cbas_util.drop_dataset(self.cbas_dataset_name)
self.log.info("Rebalance in CBAS node")
self.assertTrue(self.rebalance(), msg="Rebalance in CBAS node must succeed after user has taken the specified action.")
else:
self.log.info("Rebalance was successful as DCP state were consistent")
def tearDown(self):
super(CBASDCPState, self).tearDown()
class CBASPendingMutations(CBASBaseTest):
def setUp(self):
super(CBASPendingMutations, self).setUp()
"""
cbas.cbas_dcp_state.CBASPendingMutations.test_pending_mutations_idle_kv_system,default_bucket=True,cb_bucket_name=default,cbas_bucket_name=cbas,cbas_dataset_name=ds,items=200000
"""
def test_pending_mutations_idle_kv_system(self):
self.log.info("Load documents in KV")
self.perform_doc_ops_in_all_cb_buckets(self.num_items, "create", 0, self.num_items, batch_size=5000)
self.log.info("Create dataset on the CBAS")
self.cbas_util.create_dataset_on_bucket(
self.cb_bucket_name, self.cbas_dataset_name)
self.log.info("Connect link")
self.cbas_util.connect_link()
self.log.info("Fetch cluster remaining mutations")
aggregate_remaining_mutations_list = []
while True:
status, content, _ = self.cbas_util.fetch_pending_mutation_on_cbas_cluster()
self.assertTrue(status, msg="Fetch pending mutations failed")
content = json.loads(content)
if content:
aggregate_remaining_mutations_list.append(content["Default.ds"])
total_count, _ = self.cbas_util.get_num_items_in_cbas_dataset(self.cbas_dataset_name)
if total_count == self.num_items:
break
self.log.info("Verify remaining mutation count is reducing as ingestion progress's")
self.log.info(aggregate_remaining_mutations_list)
is_remaining_mutation_count_reducing = True
for i in range(len(aggregate_remaining_mutations_list)):
if aggregate_remaining_mutations_list[i] > self.num_items or aggregate_remaining_mutations_list[i] < 0:
self.fail("Remaining mutation count must not be greater than total documents and must be non -ve")
for i in range(1, len(aggregate_remaining_mutations_list)):
if not aggregate_remaining_mutations_list[i-1] >= aggregate_remaining_mutations_list[i]:
is_remaining_mutation_count_reducing = False
break
self.log.info("Assert mutation progress API response")
self.assertTrue(self.cbas_util.validate_cbas_dataset_items_count(self.cbas_dataset_name, self.num_items), msg="Count mismatch on CBAS")
self.assertTrue(len(aggregate_remaining_mutations_list) > 1, msg="Found no remaining mutations during ingestion")
self.assertTrue(is_remaining_mutation_count_reducing, msg="Remaining mutation count must reduce as ingestion progress's")
"""
cbas.cbas_dcp_state.CBASPendingMutations.test_pending_mutations_busy_kv_system,default_bucket=True,cb_bucket_name=default,cbas_bucket_name=cbas,cbas_dataset_name=ds,items=100000
"""
def test_pending_mutations_busy_kv_system(self):
self.log.info("Load documents in KV")
self.perform_doc_ops_in_all_cb_buckets(self.num_items, "create", 0, self.num_items)
self.log.info("Create dataset on the CBAS")
self.cbas_util.create_dataset_on_bucket(self.cb_bucket_name, self.cbas_dataset_name)
self.log.info("Connect link")
self.cbas_util.connect_link()
self.log.info("Perform async doc operations on KV")
json_generator = JsonGenerator()
generators = json_generator.generate_docs_simple(docs_per_day=self.num_items * 4, start=self.num_items)
kv_task = self._async_load_all_buckets(self.master, generators, "create", 0, batch_size=3000)
self.log.info("Fetch cluster remaining mutations")
aggregate_remaining_mutations_list = []
while True:
status, content, _ = self.cbas_util.fetch_pending_mutation_on_cbas_cluster()
self.assertTrue(status, msg="Fetch pending mutations failed")
content = json.loads(content)
if content:
aggregate_remaining_mutations_list.append(content["Default.ds"])
total_count, _ = self.cbas_util.get_num_items_in_cbas_dataset(self.cbas_dataset_name)
if total_count == self.num_items * 4:
break
self.log.info("Get KV ops result")
for task in kv_task:
task.get_result()
self.log.info("Verify remaining mutation count is reducing as ingestion progress's")
self.log.info(aggregate_remaining_mutations_list)
is_remaining_mutation_count_reducing = True
for i in range(len(aggregate_remaining_mutations_list)):
if aggregate_remaining_mutations_list[i] < 0:
self.fail("Remaining mutation count must be non -ve")
for i in range(1, len(aggregate_remaining_mutations_list)):
if not aggregate_remaining_mutations_list[i-1] >= aggregate_remaining_mutations_list[i]:
is_remaining_mutation_count_reducing = False
break
self.log.info("Assert mutation progress API response")
self.assertTrue(self.cbas_util.validate_cbas_dataset_items_count(self.cbas_dataset_name, self.num_items * 4), msg="Count mismatch on CBAS")
self.assertTrue(len(aggregate_remaining_mutations_list) > 1, msg="Found no items during ingestion")
self.assertFalse(is_remaining_mutation_count_reducing, msg="Remaining mutation must increase as ingestion progress's")
def tearDown(self):
super(CBASPendingMutations, self).tearDown() | [
"[email protected]"
] | |
f137634b0821b9597b08027552f1db74ad9bc5dc | 9508ccf2802becb4d19dd049b3496cf19d5f7b15 | /tensorflow_probability/python/mcmc/internal/leapfrog_integrator_test.py | 6ff15635b716756dbc78a4c7c242e3ac27e9390a | [
"Apache-2.0"
] | permissive | etarakci-hvl/probability | e89485968e4660050424944b0ffdbbf617533fe4 | 7a0ce5e5beff91051028258dfbc7bc6cf0c4998d | refs/heads/master | 2020-11-25T20:39:53.290761 | 2019-12-18T02:13:04 | 2019-12-18T02:14:04 | 228,835,497 | 1 | 0 | Apache-2.0 | 2019-12-18T12:27:56 | 2019-12-18T12:27:56 | null | UTF-8 | Python | false | false | 3,873 | py | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for `leapfrog_integrator.py`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.internal import test_util
from tensorflow_probability.python.mcmc.internal import leapfrog_integrator as leapfrog_impl
@test_util.test_all_tf_execution_regimes
class LeapfrogIntegratorTest(test_util.TestCase):
def setUp(self):
self._shape_param = 5.
self._rate_param = 10.
tf1.random.set_random_seed(10003)
np.random.seed(10003)
def assertAllFinite(self, x):
self.assertAllEqual(np.ones_like(x).astype(bool), np.isfinite(x))
def _log_gamma_log_prob(self, x, event_dims=()):
"""Computes log-pdf of a log-gamma random variable.
Args:
x: Value of the random variable.
event_dims: Dimensions not to treat as independent.
Returns:
log_prob: The log-pdf up to a normalizing constant.
"""
return tf.reduce_sum(
self._shape_param * x - self._rate_param * tf.exp(x),
axis=event_dims)
def _integrator_conserves_energy(self, x, independent_chain_ndims):
event_dims = tf.range(independent_chain_ndims, tf.rank(x))
target_fn = lambda x: self._log_gamma_log_prob(x, event_dims)
m = tf.random.normal(tf.shape(x))
log_prob_0 = target_fn(x)
old_energy = -log_prob_0 + 0.5 * tf.reduce_sum(m**2., axis=event_dims)
event_size = np.prod(
self.evaluate(x).shape[independent_chain_ndims:])
integrator = leapfrog_impl.SimpleLeapfrogIntegrator(
target_fn,
step_sizes=[0.09 / event_size],
num_steps=1000)
[[new_m], [_], log_prob_1, [_]] = integrator([m], [x])
new_energy = -log_prob_1 + 0.5 * tf.reduce_sum(new_m**2., axis=event_dims)
old_energy_, new_energy_ = self.evaluate([old_energy, new_energy])
tf1.logging.vlog(
1, 'average energy relative change: {}'.format(
(1. - new_energy_ / old_energy_).mean()))
self.assertAllClose(old_energy_, new_energy_, atol=0., rtol=0.02)
def _integrator_conserves_energy_wrapper(self, independent_chain_ndims):
"""Tests the long-term energy conservation of the leapfrog integrator.
The leapfrog integrator is symplectic, so for sufficiently small step
sizes it should be possible to run it more or less indefinitely without
the energy of the system blowing up or collapsing.
Args:
independent_chain_ndims: Python `int` scalar representing the number of
dims associated with independent chains.
"""
x = tf.constant(np.random.rand(50, 10, 2), np.float32)
self._integrator_conserves_energy(x, independent_chain_ndims)
def testIntegratorEnergyConservationNullShape(self):
self._integrator_conserves_energy_wrapper(0)
def testIntegratorEnergyConservation1(self):
self._integrator_conserves_energy_wrapper(1)
def testIntegratorEnergyConservation2(self):
self._integrator_conserves_energy_wrapper(2)
def testIntegratorEnergyConservation3(self):
self._integrator_conserves_energy_wrapper(3)
if __name__ == '__main__':
tf.test.main()
| [
"[email protected]"
] | |
23df7434ab536aa03632fc11bee9095c7e4d847e | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/64/usersdata/208/37039/submittedfiles/atm.py | fffc892d1e1eeab4114a3f231991c77f9faa11fd | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | # -*- coding: utf-8 -*-
v=int(input('digite o valor que deseja sacar:'))
n1=v//20
n2=(v%20)//10
n3=((v%20)%10)//5
n4=(((v%20)%10)%5//2
n5=((((v%20)%10)%5)%2)//1
print('%d'%n1)
print('%d'%n2)
print('%d'%n3)
print('%d'%n4)
print('%d'%n5) | [
"[email protected]"
] | |
2f52aaac7e20119310401c3ba628d0ea489c2a5b | 0845b9e00b0046c409eff2b55c835c331190a2dc | /Example_code/bullets.py | 9f4598ab62b9538ba20843eecbbaf22de1f1f74d | [] | no_license | crazcalm/Learn_Pygame | edba44f4ff89add764ee3f6b2558172465f9cc26 | e93c482fb9eb392912627855b11ff2c36c22a191 | refs/heads/master | 2020-04-05T22:56:29.928626 | 2014-09-27T07:08:29 | 2014-09-27T07:08:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,486 | py | """
Show how to fire bullets.
Sample Python/Pygame Programs
Simpson College Computer Science
http://programarcadegames.com/
http://simpson.edu/computer-science/
Explanation video: http://youtu.be/PpdJjaiLX6A
"""
import pygame
import random
# Define some colors
BLACK = ( 0, 0, 0)
WHITE = ( 255, 255, 255)
RED = ( 255, 0, 0)
BLUE = ( 0, 0, 255)
# --- Classes
class Block(pygame.sprite.Sprite):
""" This class represents the block. """
def __init__(self, color):
# Call the parent class (Sprite) constructor
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface([20, 15])
self.image.fill(color)
self.rect = self.image.get_rect()
class Player(pygame.sprite.Sprite):
""" This class represents the Player. """
def __init__(self):
""" Set up the player on creation. """
# Call the parent class (Sprite) constructor
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface([20, 20])
self.image.fill(RED)
self.rect = self.image.get_rect()
def update(self):
""" Update the player's position. """
# Get the current mouse position. This returns the position
# as a list of two numbers.
pos = pygame.mouse.get_pos()
# Set the player x position to the mouse x position
self.rect.x = pos[0]
class Bullet(pygame.sprite.Sprite):
""" This class represents the bullet . """
def __init__(self):
# Call the parent class (Sprite) constructor
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface([4, 10])
self.image.fill(BLACK)
self.rect = self.image.get_rect()
def update(self):
""" Move the bullet. """
self.rect.y -= 3
# --- Create the window
# Initialize Pygame
pygame.init()
# Set the height and width of the screen
screen_width = 700
screen_height = 400
screen = pygame.display.set_mode([screen_width, screen_height])
# --- Sprite lists
# This is a list of every sprite. All blocks and the player block as well.
all_sprites_list = pygame.sprite.Group()
# List of each block in the game
block_list = pygame.sprite.Group()
# List of each bullet
bullet_list = pygame.sprite.Group()
# --- Create the sprites
for i in range(50):
# This represents a block
block = Block(BLUE)
# Set a random location for the block
block.rect.x = random.randrange(screen_width)
block.rect.y = random.randrange(350)
# Add the block to the list of objects
block_list.add(block)
all_sprites_list.add(block)
# Create a red player block
player = Player()
all_sprites_list.add(player)
#Loop until the user clicks the close button.
done = False
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
score = 0
player.rect.y = 370
# -------- Main Program Loop -----------
while not done:
# --- Event Processing
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
elif event.type == pygame.MOUSEBUTTONDOWN:
# Fire a bullet if the user clicks the mouse button
bullet = Bullet()
# Set the bullet so it is where the player is
bullet.rect.x = player.rect.x
bullet.rect.y = player.rect.y
# Add the bullet to the lists
all_sprites_list.add(bullet)
bullet_list.add(bullet)
# --- Game logic
# Call the update() method on all the sprites
all_sprites_list.update()
# Calculate mechanics for each bullet
for bullet in bullet_list:
# See if it hit a block
block_hit_list = pygame.sprite.spritecollide(bullet, block_list, True)
# For each block hit, remove the bullet and add to the score
for block in block_hit_list:
bullet_list.remove(bullet)
all_sprites_list.remove(bullet)
score += 1
print(score)
# Remove the bullet if it flies up off the screen
if bullet.rect.y < -10:
bullet_list.remove(bullet)
all_sprites_list.remove(bullet)
# --- Draw a frame
# Clear the screen
screen.fill(WHITE)
# Draw all the spites
all_sprites_list.draw(screen)
# Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# --- Limit to 20 frames per second
clock.tick(60)
pygame.quit()
| [
"[email protected]"
] | |
047b96b5f23a5f677481e75436be67e963f16f40 | 0c110eb32f2eaea5c65d40bda846ddc05757ced6 | /scripts/mastersort/scripts_dir/p7542_run2L4.py | d95b7a8dccc8565de88643a95318f55cc9b4a0c0 | [] | no_license | nyspisoccog/ks_scripts | 792148a288d1a9d808e397c1d2e93deda2580ff4 | 744b5a9dfa0f958062fc66e0331613faaaee5419 | refs/heads/master | 2021-01-18T14:22:25.291331 | 2018-10-15T13:08:24 | 2018-10-15T13:08:24 | 46,814,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,197 | py | from __future__ import with_statement
import os, csv, shutil,tarfile, uf, dcm_ops
dest_root = '/ifs/scratch/pimri/soccog/test_working'
dst_path_lst = ['7542', 'run2L4']
uf.buildtree(dest_root, dst_path_lst)
uf.copytree('/ifs/scratch/pimri/soccog/old/SocCog_Raw_Data_By_Exam_Number/2727/E2727_e363504/s414724_1904_2L4_s23', '/ifs/scratch/pimri/soccog/test_working/7542/run2L4')
t = tarfile.open(os.path.join('/ifs/scratch/pimri/soccog/test_working/7542/run2L4','MRDC_files.tar.gz'), 'r')
t.extractall('/ifs/scratch/pimri/soccog/test_working/7542/run2L4')
for f in os.listdir('/ifs/scratch/pimri/soccog/test_working/7542/run2L4'):
if 'MRDC' in f and 'gz' not in f:
old = os.path.join('/ifs/scratch/pimri/soccog/test_working/7542/run2L4', f)
new = os.path.join('/ifs/scratch/pimri/soccog/test_working/7542/run2L4', f + '.dcm')
os.rename(old, new)
qsub_cnv_out = dcm_ops.cnv_dcm('/ifs/scratch/pimri/soccog/test_working/7542/run2L4', '7542_run2L4', '/ifs/scratch/pimri/soccog/scripts/mastersort/scripts_dir/cnv')
#qsub_cln_out = dcm_ops.cnv_dcm('/ifs/scratch/pimri/soccog/test_working/7542/run2L4', '7542_run2L4', '/ifs/scratch/pimri/soccog/scripts/mastersort/scripts_dir/cln')
| [
"[email protected]"
] | |
d80cceffb15b6da1f32f7e511c4fb6433d8b362f | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/doc3d/Ablation4_ch016_ep003/W_w_M_to_C_pyr/pyr_6s/L3/step10_a.py | 33a90e07ba8ad4d1d18aa65e7efbaf7acd074ac4 | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51,406 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
code_dir = "\\".join(code_exe_path_element[:-1])
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
sys.path.append(code_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" code_dir:", code_dir)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
kong_to_py_layer = len(code_exe_path_element) - 1 - kong_layer ### 中間 -1 是為了長度轉index
# print(" kong_to_py_layer:", kong_to_py_layer)
if (kong_to_py_layer == 0): template_dir = ""
elif(kong_to_py_layer == 2): template_dir = code_exe_path_element[kong_layer + 1][0:] ### [7:] 是為了去掉 step1x_, 後來覺得好像改有意義的名字不去掉也行所以 改 0
elif(kong_to_py_layer == 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] ### [5:] 是為了去掉 mask_ ,前面的 mask_ 是為了python 的 module 不能 數字開頭, 隨便加的這樣子, 後來覺得 自動排的順序也可以接受, 所以 改0
elif(kong_to_py_layer > 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] + "/" + "/".join(code_exe_path_element[kong_layer + 3: -1])
# print(" template_dir:", template_dir) ### 舉例: template_dir: 7_mask_unet/5_os_book_and_paper_have_dtd_hdr_mix_bg_tv_s04_mae
#############################################################################################################################################################################################################
exp_dir = template_dir
#############################################################################################################################################################################################################
from step06_a_datas_obj import *
from step09_6side_L3 import *
from step10_a2_loss_info_obj import *
from step10_b2_exp_builder import Exp_builder
rm_paths = [path for path in sys.path if code_dir in path]
for rm_path in rm_paths: sys.path.remove(rm_path)
rm_moduless = [module for module in sys.modules if "step09" in module]
for rm_module in rm_moduless: del sys.modules[rm_module]
#############################################################################################################################################################################################################
'''
exp_dir 是 決定 result_dir 的 "上一層"資料夾 名字喔! exp_dir要巢狀也沒問題~
比如:exp_dir = "6_mask_unet/自己命的名字",那 result_dir 就都在:
6_mask_unet/自己命的名字/result_a
6_mask_unet/自己命的名字/result_b
6_mask_unet/自己命的名字/...
'''
use_db_obj = type8_blender_kong_doc3d_in_W_and_I_gt_F
use_loss_obj = [mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_W").copy()] ### z, y, x 順序是看 step07_b_0b_Multi_UNet 來對應的喔
#############################################################
### 為了resul_analyze畫空白的圖,建一個empty的 Exp_builder
empty = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1__2side_1__3side_1_4side_1_5s1_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_1__2side_1__3side_1_4side_1_5s1_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="為了resul_analyze畫空白的圖,建一個empty的 Exp_builder")
#############################################################
###################
############# 1s1
######### 2s1
### 3s1
ch032_1side_1__2side_1__3side_1_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1__2side_1__3side_1_4side_1_5s1_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_1__2side_1__3side_1_4side_1_5s1_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
###################
############# 1s2
######### 2s1
### 3s1
ch032_1side_2__2side_1__3side_1_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_1__3side_1_4side_1_5s1_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_1__3side_1_4side_1_5s1_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
######### 2s1
### 3s1
ch032_1side_2__2side_2__3side_1_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_1_4side_1_5s1_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_2__3side_1_4side_1_5s1_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
### 3s2
ch032_1side_2__2side_2__3side_2_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_2_4side_1_5s1_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_2__3side_2_4side_1_5s1_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_2__2side_2__3side_2_4side_2_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_2_4side_2_5s1_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_2__3side_2_4side_2_5s1_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_2__2side_2__3side_2_4side_2_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_2_4side_2_5s2_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_2__3side_2_4side_2_5s2_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_2__2side_2__3side_2_4side_2_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_2_4side_2_5s2_6s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_2__3side_2_4side_2_5s2_6s2.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
###################
############# 1s3
######### 2s1
### 3s1
ch032_1side_3__2side_1__3side_1_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_1__3side_1_4side_1_5s1_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_1__3side_1_4side_1_5s1_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
######### 2s2
### 3s1
ch032_1side_3__2side_2__3side_1_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_1_4side_1_5s1_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_2__3side_1_4side_1_5s1_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
### 3s2
ch032_1side_3__2side_2__3side_2_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_2_4side_1_5s1_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_2__3side_2_4side_1_5s1_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_2__3side_2_4side_2_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_2_4side_2_5s1_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_2__3side_2_4side_2_5s1_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_2__3side_2_4side_2_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_2_4side_2_5s2_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_2__3side_2_4side_2_5s2_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_2__3side_2_4side_2_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_2_4side_2_5s2_6s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_2__3side_2_4side_2_5s2_6s2.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
######### 2s3
### 3s1
ch032_1side_3__2side_3__3side_1_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_1_4side_1_5s1_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_1_4side_1_5s1_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
### 3s2
ch032_1side_3__2side_3__3side_2_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_2_4side_1_5s1_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_2_4side_1_5s1_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_2_4side_2_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_2_4side_2_5s1_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_2_4side_2_5s1_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_2_4side_2_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_2_4side_2_5s2_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_2_4side_2_5s2_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_2_4side_2_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_2_4side_2_5s2_6s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_2_4side_2_5s2_6s2.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
### 3s3
ch032_1side_3__2side_3__3side_3_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_1_5s1_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_3_4side_1_5s1_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_2_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_2_5s1_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_3_4side_2_5s1_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_2_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_2_5s2_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_3_4side_2_5s2_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_2_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_2_5s2_6s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_3_4side_2_5s2_6s2.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_3_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s1_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s1_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_3_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s2_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s2_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_3_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s2_6s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s2_6s2.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_3_5s3_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s3_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s3_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_3_5s3_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s3_6s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s3_6s2.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_3_5s3_6s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s3_6s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s3_6s3.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
###################
############# 1s4
######### 2s1
### 3s1
ch032_1side_4__2side_1__3side_1_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_1__3side_1_4side_1_5s1_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_1__3side_1_4side_1_5s1_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
######### 2s2
### 3s1
ch032_1side_4__2side_2__3side_1_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_1_4side_1_5s1_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_2__3side_1_4side_1_5s1_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
### 3s2
ch032_1side_4__2side_2__3side_2_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_2_4side_1_5s1_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_2__3side_2_4side_1_5s1_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_2__3side_2_4side_2_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_2_4side_2_5s1_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_2__3side_2_4side_2_5s1_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_2__3side_2_4side_2_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_2_4side_2_5s2_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_2__3side_2_4side_2_5s2_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_2__3side_2_4side_2_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_2_4side_2_5s2_6s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_2__3side_2_4side_2_5s2_6s2.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
######### 2s3
### 3s1
ch032_1side_4__2side_3__3side_1_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_1_4side_1_5s1_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_1_4side_1_5s1_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
### 3s2
ch032_1side_4__2side_3__3side_2_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_2_4side_1_5s1_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_2_4side_1_5s1_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_2_4side_2_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_2_4side_2_5s1_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_2_4side_2_5s1_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_2_4side_2_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_2_4side_2_5s2_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_2_4side_2_5s2_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_2_4side_2_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_2_4side_2_5s2_6s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_2_4side_2_5s2_6s2.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
### 3s3
ch032_1side_4__2side_3__3side_3_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_1_5s1_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_3_4side_1_5s1_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_2_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_2_5s1_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_3_4side_2_5s1_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_2_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_2_5s2_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_3_4side_2_5s2_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_2_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_2_5s2_6s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_3_4side_2_5s2_6s2.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_3_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s1_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s1_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_3_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s2_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s2_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_3_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s2_6s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s2_6s2.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_3_5s3_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s3_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s3_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_3_5s3_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s3_6s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s3_6s2.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_3_5s3_6s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s3_6s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s3_6s3.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
######### 2s4
### 3s1
ch032_1side_4__2side_4__3side_1_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_1_4side_1_5s1_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_1_4side_1_5s1_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
### 3s2
ch032_1side_4__2side_4__3side_2_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_2_4side_1_5s1_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_2_4side_1_5s1_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_2_4side_2_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_2_4side_2_5s1_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_2_4side_2_5s1_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_2_4side_2_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_2_4side_2_5s2_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_2_4side_2_5s2_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_2_4side_2_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_2_4side_2_5s2_6s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_2_4side_2_5s2_6s2.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
### 3s3
ch032_1side_4__2side_4__3side_3_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_1_5s1_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_3_4side_1_5s1_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_2_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_2_5s1_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_3_4side_2_5s1_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_2_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_2_5s2_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_3_4side_2_5s2_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_2_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_2_5s2_6s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_3_4side_2_5s2_6s2.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_3_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s1_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s1_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_3_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s2_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s2_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_3_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s2_6s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s2_6s2.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_3_5s3_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s3_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s3_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_3_5s3_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s3_6s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s3_6s2.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_3_5s3_6s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s3_6s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s3_6s3.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
### 3s4
ch032_1side_4__2side_4__3side_4_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_1_5s1_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_1_5s1_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_2_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_2_5s1_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_2_5s1_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_2_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_2_5s2_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_2_5s2_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_2_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_2_5s2_6s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_2_5s2_6s2.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_3_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s1_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s1_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_3_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s2_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s2_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_3_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s2_6s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s2_6s2.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_3_5s3_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s3_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s3_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_3_5s3_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s3_6s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s3_6s2.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_3_5s3_6s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s3_6s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s3_6s3.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_4_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s1_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s1_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_4_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s2_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s2_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_4_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s2_6s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s2_6s2.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_4_5s3_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s3_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s3_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_4_5s3_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s3_6s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s3_6s2.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_4_5s3_6s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s3_6s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s3_6s3.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_4_5s4_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s4_6s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s4_6s1.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_4_5s4_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s4_6s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s4_6s2.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_4_5s4_6s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s4_6s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s4_6s3.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_4_5s4_6s4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s4_6s4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s4_6s4.kong_model.model_describe) .set_train_args(epochs= 3) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
#############################################################
if(__name__ == "__main__"):
print("build exps cost time:", time.time() - start_time)
if len(sys.argv) < 2:
############################################################################################################
### 直接按 F5 或打 python step10_b1_exp_obj_load_and_train_and_test.py,後面沒有接東西喔!才不會跑到下面給 step10_b_subprocss.py 用的程式碼~~~
ch032_1side_1__2side_1__3side_1_4side_1_5s1_6s1.build().run()
# print('no argument')
sys.exit()
### 以下是給 step10_b_subprocess.py 用的,相當於cmd打 python step10_b1_exp_obj_load_and_train_and_test.py 某個exp.build().run()
eval(sys.argv[1])
| [
"[email protected]"
] | |
259bd4310a5b596b737a9ccae5087953d99928ea | 74bc48ba64859a63855d204f1efd31eca47a223f | /Nature/9999.Reference_DataAugmentation.py | 6ab2dd8feba11d86b67d6f5132136efada9cf3c9 | [] | no_license | PraveenAdepu/kaggle_competitions | 4c53d71af12a615d5ee5f34e5857cbd0fac7bc3c | ed0111bcecbe5be4529a2a5be2ce4c6912729770 | refs/heads/master | 2020-09-02T15:29:51.885013 | 2020-04-09T01:50:55 | 2020-04-09T01:50:55 | 219,248,958 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,841 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 01 19:08:48 2017
@author: SriPrav
"""
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 09 20:02:03 2017
@author: SriPrav
"""
import numpy as np
np.random.seed(2016)
import os
import glob
import cv2
import datetime
import pandas as pd
import time
import warnings
warnings.filterwarnings("ignore")
from sklearn.cross_validation import KFold
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.optimizers import SGD
from keras.callbacks import EarlyStopping
from keras.utils import np_utils
from sklearn.metrics import log_loss
from keras.preprocessing.image import ImageDataGenerator
from keras import __version__ as keras_version
ROWS = 64
COLUMNS = 64
CHANNELS = 3
VERBOSEFLAG = 1
def get_im_cv2(path):
img = cv2.imread(path)
resized = cv2.resize(img, (ROWS, COLUMNS), cv2.INTER_LINEAR)
return resized
def load_train():
X_train = []
X_train_id = []
y_train = []
start_time = time.time()
print('Read train images')
folders = ['ALB', 'BET', 'DOL', 'LAG', 'NoF', 'OTHER', 'SHARK', 'YFT']
for fld in folders:
index = folders.index(fld)
print('Load folder {} (Index: {})'.format(fld, index))
path = os.path.join('C:\Users\SriPrav\Documents\R\\18Nature', 'input', 'train', fld, '*.jpg')
files = glob.glob(path)
for fl in files:
flbase = os.path.basename(fl)
img = get_im_cv2(fl)
X_train.append(img)
X_train_id.append(flbase)
y_train.append(index)
print('Read train data time: {} seconds'.format(round(time.time() - start_time, 2)))
return X_train, y_train, X_train_id
def load_test():
path = os.path.join('C:\Users\SriPrav\Documents\R\\18Nature', 'input', 'test_stg1', '*.jpg')
files = sorted(glob.glob(path))
X_test = []
X_test_id = []
for fl in files:
flbase = os.path.basename(fl)
img = get_im_cv2(fl)
X_test.append(img)
X_test_id.append(flbase)
return X_test, X_test_id
def create_submission(predictions, test_id, info):
result1 = pd.DataFrame(predictions, columns=['ALB', 'BET', 'DOL', 'LAG', 'NoF', 'OTHER', 'SHARK', 'YFT'])
result1.loc[:, 'image'] = pd.Series(test_id, index=result1.index)
imgcolumn = result1['image']
result1.drop(labels=['image'], axis=1,inplace = True)
result1.insert(0, 'image', imgcolumn)
now = datetime.datetime.now()
sub_file = 'submissions\submission_' + info + '_' + str(now.strftime("%Y-%m-%d-%H-%M")) + '.csv'
result1.to_csv(sub_file, index=False)
def read_and_normalize_train_data():
train_data, train_target, train_id = load_train()
print('Convert to numpy...')
train_data = np.array(train_data, dtype=np.uint8)
train_target = np.array(train_target, dtype=np.uint8)
print('Reshape...')
train_data = train_data.transpose((0, 3, 1, 2))
print('Convert to float...')
train_data = train_data.astype('float32')
train_data = train_data / 255
train_target = np_utils.to_categorical(train_target, 8)
print('Train shape:', train_data.shape)
print(train_data.shape[0], 'train samples')
return train_data, train_target, train_id
def read_and_normalize_test_data():
start_time = time.time()
test_data, test_id = load_test()
test_data = np.array(test_data, dtype=np.uint8)
test_data = test_data.transpose((0, 3, 1, 2))
test_data = test_data.astype('float32')
test_data = test_data / 255
print('Test shape:', test_data.shape)
print(test_data.shape[0], 'test samples')
print('Read and process test data time: {} seconds'.format(round(time.time() - start_time, 2)))
return test_data, test_id
def dict_to_list(d):
ret = []
for i in d.items():
ret.append(i[1])
return ret
def merge_several_folds_mean(data, nfolds):
a = np.array(data[0])
for i in range(1, nfolds):
a += np.array(data[i])
a /= nfolds
return a.tolist()
#def create_model():
# model = Sequential()
# model.add(ZeroPadding2D((1, 1), input_shape=(CHANNELS,ROWS, COLUMNS), dim_ordering='th'))
# model.add(Convolution2D(4, 3, 3, activation='relu', dim_ordering='th'))
# model.add(ZeroPadding2D((1, 1), dim_ordering='th'))
# model.add(Convolution2D(4, 3, 3, activation='relu', dim_ordering='th'))
# model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), dim_ordering='th'))
#
# model.add(ZeroPadding2D((1, 1), dim_ordering='th'))
# model.add(Convolution2D(8, 3, 3, activation='relu', dim_ordering='th'))
# model.add(ZeroPadding2D((1, 1), dim_ordering='th'))
# model.add(Convolution2D(8, 3, 3, activation='relu', dim_ordering='th'))
# model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), dim_ordering='th'))
#
# model.add(Flatten())
# model.add(Dense(32, activation='relu'))
# model.add(Dropout(0.5))
# model.add(Dense(32, activation='relu'))
# model.add(Dropout(0.5))
# model.add(Dense(8, activation='softmax'))
#
# sgd = SGD(lr=1e-2, decay=1e-6, momentum=0.9, nesterov=True)
# model.compile(optimizer=sgd, loss='categorical_crossentropy')
#
# return model
def create_model():
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(CHANNELS,ROWS, COLUMNS), dim_ordering='th'))
model.add(Convolution2D(8, 3, 3, activation='relu', dim_ordering='th', init='he_uniform'))
model.add(Dropout(0.2))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), dim_ordering='th'))
model.add(ZeroPadding2D((1, 1), dim_ordering='th'))
model.add(Convolution2D(16, 3, 3, activation='relu', dim_ordering='th', init='he_uniform'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), dim_ordering='th'))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(96, activation='relu',init='he_uniform'))
model.add(Dropout(0.4))
model.add(Dense(24, activation='relu',init='he_uniform'))
model.add(Dropout(0.2))
model.add(Dense(8, activation='softmax'))
sgd = SGD(lr=1e-2, decay=1e-4, momentum=0.89, nesterov=False)
model.compile(optimizer=sgd, loss='categorical_crossentropy')
return model
def get_validation_predictions(train_data, predictions_valid):
pv = []
for i in range(len(train_data)):
pv.append(predictions_valid[i])
return pv
datagen = ImageDataGenerator(
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True
)
def run_cross_validation_create_models(nfolds=10):
# input image dimensions
batch_size = 32
nb_epoch = 13
random_state = 2017
train_data, train_target, train_id = read_and_normalize_train_data()
yfull_train = dict()
kf = KFold(len(train_id), n_folds=nfolds, shuffle=True, random_state=random_state)
num_fold = 0
sum_score = 0
models = []
for train_index, test_index in kf:
model = create_model()
X_train = train_data[train_index]
Y_train = train_target[train_index]
X_valid = train_data[test_index]
Y_valid = train_target[test_index]
num_fold += 1
print('Start KFold number {} from {}'.format(num_fold, nfolds))
print('Split train: ', len(X_train), len(Y_train))
print('Split valid: ', len(X_valid), len(Y_valid))
callbacks = [
EarlyStopping(monitor='val_loss', patience=3, verbose=VERBOSEFLAG),
]
# model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
# shuffle=True, verbose=VERBOSEFLAG, validation_data=(X_valid, Y_valid),
# callbacks=callbacks)
# model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size), nb_epoch=nb_epoch,
# verbose=VERBOSEFLAG, validation_data=(X_valid, Y_valid)
# )
model.fit_generator(datagen.flow(X_train, Y_train, batch_size=32),
samples_per_epoch=len(X_train), nb_epoch=nb_epoch,validation_data=(X_valid, Y_valid),verbose=VERBOSEFLAG)
predictions_valid = model.predict(X_valid.astype('float32'), batch_size=batch_size, verbose=2)
score = log_loss(Y_valid, predictions_valid)
print('Score log_loss: ', score)
sum_score += score*len(test_index)
# Store valid predictions
for i in range(len(test_index)):
yfull_train[test_index[i]] = predictions_valid[i]
models.append(model)
score = sum_score/len(train_data)
print("Log_loss train independent avg: ", score)
info_string = 'loss_' + str(score) + '_folds_' + str(nfolds) + '_ep_' + str(nb_epoch)
return info_string, models
def run_cross_validation_process_test(info_string, models):
batch_size = 16
num_fold = 0
yfull_test = []
test_id = []
nfolds = len(models)
for i in range(nfolds):
model = models[i]
num_fold += 1
print('Start KFold number {} from {}'.format(num_fold, nfolds))
test_data, test_id = read_and_normalize_test_data()
test_prediction = model.predict(test_data, batch_size=batch_size, verbose=VERBOSEFLAG)
yfull_test.append(test_prediction)
test_res = merge_several_folds_mean(yfull_test, nfolds)
info_string = 'loss_' + info_string \
+ '_folds_' + str(nfolds)
create_submission(test_res, test_id, info_string)
if __name__ == '__main__':
print('Keras version: {}'.format(keras_version))
num_folds = 5
info_string, models = run_cross_validation_create_models(num_folds)
run_cross_validation_process_test(info_string, models) | [
"[email protected]"
] | |
452bb4a716c3bd8adec7df2878cd13e873b5b57d | d75fbceb28ad14b07ae4057a8b23ec0bd3682628 | /code/chap08/ZombieMobGame.py | 1ca5ebc7b9173aa046b6f7eb320226cfc169fb42 | [] | no_license | wubinbai/pygame-book | 0707a0b36f41bc6f0b1282707e6c4f6cbed9c87a | 9de1f7516a2aec940ffa97f9686cc0520bad2deb | refs/heads/master | 2020-12-21T15:51:08.397619 | 2020-01-30T12:37:52 | 2020-01-30T12:37:52 | 236,478,999 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,667 | py | # Zombie Mob Game
# Chapter 8
import itertools, sys, time, random, math, pygame
from pygame.locals import *
from MyLibrary import *
def calc_velocity(direction, vel=1.0):
velocity = Point(0,0)
if direction == 0: #north
velocity.y = -vel
elif direction == 2: #east
velocity.x = vel
elif direction == 4: #south
velocity.y = vel
elif direction == 6: #west
velocity.x = -vel
return velocity
def reverse_direction(sprite):
if sprite.direction == 0:
sprite.direction = 4
elif sprite.direction == 2:
sprite.direction = 6
elif sprite.direction == 4:
sprite.direction = 0
elif sprite.direction == 6:
sprite.direction = 2
#main program begins
pygame.init()
screen = pygame.display.set_mode((800,600))
pygame.display.set_caption("Collision Demo")
font = pygame.font.Font(None, 36)
timer = pygame.time.Clock()
#create sprite groups
player_group = pygame.sprite.Group()
zombie_group = pygame.sprite.Group()
health_group = pygame.sprite.Group()
#create the player sprite
player = MySprite()
player.load("farmer walk.png", 96, 96, 8)
player.position = 80, 80
player.direction = 4
player_group.add(player)
#create the zombie sprite
zombie_image = pygame.image.load("zombie walk.png").convert_alpha()
for n in range(0, 10):
zombie = MySprite()
zombie.load("zombie walk.png", 96, 96, 8)
zombie.position = random.randint(0,700), random.randint(0,500)
zombie.direction = random.randint(0,3) * 2
zombie_group.add(zombie)
#create heath sprite
health = MySprite()
health.load("health.png", 32, 32, 1)
health.position = 400,300
health_group.add(health)
game_over = False
player_moving = False
player_health = 100
#repeating loop
while True:
timer.tick(30)
ticks = pygame.time.get_ticks()
for event in pygame.event.get():
if event.type == QUIT: sys.exit()
keys = pygame.key.get_pressed()
if keys[K_ESCAPE]: sys.exit()
elif keys[K_UP] or keys[K_w]:
player.direction = 0
player_moving = True
elif keys[K_RIGHT] or keys[K_d]:
player.direction = 2
player_moving = True
elif keys[K_DOWN] or keys[K_s]:
player.direction = 4
player_moving = True
elif keys[K_LEFT] or keys[K_a]:
player.direction = 6
player_moving = True
else:
player_moving = False
if not game_over:
#set animation frames based on player's direction
player.first_frame = player.direction * player.columns
player.last_frame = player.first_frame + player.columns-1
if player.frame < player.first_frame:
player.frame = player.first_frame
if not player_moving:
#stop animating when player is not pressing a key
player.frame = player.first_frame = player.last_frame
else:
#move player in direction
player.velocity = calc_velocity(player.direction, 1.5)
player.velocity.x *= 1.5
player.velocity.y *= 1.5
#update player sprite
player_group.update(ticks, 50)
#manually move the player
if player_moving:
player.X += player.velocity.x
player.Y += player.velocity.y
if player.X < 0: player.X = 0
elif player.X > 700: player.X = 700
if player.Y < 0: player.Y = 0
elif player.Y > 500: player.Y = 500
#update zombie sprites
zombie_group.update(ticks, 50)
#manually iterate through all the zombies
for z in zombie_group:
#set the zombie's animation range
z.first_frame = z.direction * z.columns
z.last_frame = z.first_frame + z.columns-1
if z.frame < z.first_frame:
z.frame = z.first_frame
z.velocity = calc_velocity(z.direction)
#keep the zombie on the screen
z.X += z.velocity.x
z.Y += z.velocity.y
if z.X < 0 or z.X > 700 or z.Y < 0 or z.Y > 500:
reverse_direction(z)
#check for collision with zombies
attacker = None
attacker = pygame.sprite.spritecollideany(player, zombie_group)
if attacker != None:
#we got a hit, now do a more precise check
if pygame.sprite.collide_rect_ratio(0.5)(player,attacker):
player_health -= 10
if attacker.X < player.X: attacker.X -= 10
elif attacker.X > player.X: attacker.X += 10
else:
attacker = None
#update the health drop
health_group.update(ticks, 50)
#check for collision with health
if pygame.sprite.collide_rect_ratio(0.5)(player,health):
player_health += 30
if player_health > 100: player_health = 100
health.X = random.randint(0,700)
health.Y = random.randint(0,500)
#is player dead?
if player_health <= 0:
game_over = True
#clear the screen
screen.fill((50,50,100))
#draw sprites
health_group.draw(screen)
zombie_group.draw(screen)
player_group.draw(screen)
#draw energy bar
pygame.draw.rect(screen, (50,150,50,180), Rect(300,570,player_health*2,25))
pygame.draw.rect(screen, (100,200,100,180), Rect(300,570,200,25), 2)
if game_over:
print_text(font, 300, 100, "G A M E O V E R")
pygame.display.update()
| [
"[email protected]"
] | |
1d4abd406e5975787c37fe68fdc30ace92e9a531 | e262e64415335060868e9f7f73ab8701e3be2f7b | /.history/pyexcel_20201111162515.py | eb28ae94c2e1ecfb48fe15a06c26ed612a6d0851 | [] | no_license | Allison001/developer_test | 6e211f1e2bd4287ee26fd2b33baf1c6a8d80fc63 | b8e04b4b248b0c10a35e93128a5323165990052c | refs/heads/master | 2023-06-18T08:46:40.202383 | 2021-07-23T03:31:54 | 2021-07-23T03:31:54 | 322,807,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 899 | py | from openpyxl import Workbook
from openpyxl.utils import get_column_letter
wb = Workbook()
dest_filename = 'empty_book.xlsx'
ws1 = wb.active
ws1.title = "range names"
for row in range(1, 40):
ws1.append(range(600))
ws2 = wb.create_sheet(title="Pi")
ws2['F5'] = 3.14
ws3 = wb.create_sheet(title="Data")
for row in range(10, 20):
for col in range(27, 54):
_ = ws3.cell(column=col, row=row, value="{0}".format(get_column_letter(col)))
print(ws3['AA10'].value)
ws4 = wb.create_sheet(title="test")
for i in range(1,11):
ws4.cell(column=i,row=1).value="用例编号"
ws5 = wb.create_sheet(title="Test1")
title1 = ("用例编号","用例模块","用例标题","用例级别","测试环境","测试输入","执行操作","预期结果","验证结果","备注")
for i in range(1,11):
for j in title1:
ws5.cell(column=1,row=).value=j
wb.save(filename = dest_filename) | [
"[email protected]"
] | |
318b865387216d39ed0bd4472857241446accb5c | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02390/s595695585.py | 37891011aec61d584d08c47440668ca2b1330dfd | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | s = int(input())
min = int(s/60)
sec = s%60
hour = int(min/60)
min = min%60
print("{0}:{1}:{2}".format(hour, min, sec)) | [
"[email protected]"
] | |
d14ec836cc015536b3f4fbbea3e42722d329fe10 | fc276597c51509a13bf2c622c91123eb4987d6b2 | /setup.py | d45a7fa220a59b4a2f456f2596709f0d4c052620 | [
"Apache-2.0"
] | permissive | zaquestion/requests-mv-integrations | df1150b0efb8bf4d97979e8ed3499737d98fa16d | b8f3332c1cd564ef106e725e0ee3436913fa8e19 | refs/heads/master | 2020-06-29T11:12:30.990262 | 2016-11-22T01:56:56 | 2016-11-22T01:56:56 | 74,429,832 | 0 | 0 | null | 2016-12-06T18:57:52 | 2016-11-22T03:21:53 | Python | UTF-8 | Python | false | false | 2,180 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @namespace pycountry-convert
from __future__ import with_statement
# To install the tune-mv-integration-python library, open a Terminal shell,
# then run this file by typing:
#
# python setup.py install
#
import sys
import re
from setuptools import setup
REQUIREMENTS = [
req for req in open('requirements.txt')
.read().split('\n')
if req != ''
]
PACKAGES = [
'requests_mv_integrations',
'requests_mv_integrations.support',
'requests_mv_integrations.errors'
]
CLASSIFIERS = [
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries :: Python Modules'
]
with open('requests_mv_integrations/__init__.py', 'r') as fd:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
if len(sys.argv) < 2 or sys.argv[1] == 'version':
print(version)
sys.exit()
setup(
name='requests-mv-integrations',
version=version,
description='',
author='TUNE Inc., TuneLab',
author_email='[email protected]',
url='https://github.com/TuneLab/requests-mv-integrations',
install_requires=REQUIREMENTS,
packages=PACKAGES,
package_dir={'requests-mv-integrations': 'requests-mv-integrations'},
include_package_data=True,
license='Apache 2.0',
zip_safe=False,
classifiers=CLASSIFIERS,
long_description="""
-----------------------------------------------------
"""
)
| [
"[email protected]"
] | |
81146b5b73fa9c515a476c8bf531f60c1e4b6b89 | a8289cb7273245e7ec1e6079c7f266db4d38c03f | /Anthony_Flask_Tutorials/Flask_GETAPI/run.py | 4b5fb6680b2b2838fcc12f2b4cce3d759ec957a0 | [] | no_license | palmarytech/Python_Snippet | 6acbd572d939bc9d5d765800f35a0204bc044708 | 41b4ebe15509d166c82edd23b713a1f3bf0458c5 | refs/heads/master | 2022-10-06T22:51:00.469383 | 2020-03-13T08:32:11 | 2020-03-13T08:32:11 | 272,350,189 | 1 | 0 | null | 2020-06-15T05:30:44 | 2020-06-15T05:30:44 | null | UTF-8 | Python | false | false | 617 | py | from flask import Flask, jsonify, request
app = Flask(__name__)
languages = [{"name": "Javascript"}, {"name": "Python"}, {"name": "Ruby"}]
@app.route("/", methods=["GET"])
def test():
return jsonify({"message": "API works"})
@app.route("/languages", methods=["GET"])
def returnAll():
return jsonify({"languages": languages})
@app.route("/languages/<string:name>", methods=["GET"])
def returnOne(name):
_langs = [language for language in languages if language["name"] == name]
return jsonify({"language": _langs[0]})
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0", port=5000) | [
"[email protected]"
] | |
74107df377adab78ed6ad99a7cdafb3fe88dfef6 | a5016c90fb13caaf8ce4e2c48dc842017f195822 | /src/0008_StringToInteger.py | c800458b3678b28a95706de4e3e5ff1d26c81c80 | [] | no_license | lixinchn/LeetCode | c21efc2d715da637374871d36d3d183ea08b9c31 | 4060d525f007c10a3a55d874f7953a0a1d98c8fd | refs/heads/master | 2020-04-03T10:27:16.068777 | 2017-01-05T02:31:28 | 2017-01-05T02:31:28 | 50,000,305 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,513 | py | class Solution(object):
def myAtoi(self, str):
"""
:type str: str
:rtype: int
"""
ret_int = 0
negative = False
MAX_INT = 2147483647
MIN_INT = 2147483648
i = 0
for i in range(len(str)):
if str[i] == ' ' or str[i] == '\t':
continue
break
if i < len(str) and (str[i] == '-' or str[i] == '+'):
negative = str[i] == '-'
i += 1
str = str[i:]
for i in range(len(str)):
try:
char_int = int(str[i])
except:
break
ret_int = ret_int * 10 + char_int
if not negative and ret_int > MAX_INT:
return MAX_INT
if negative and ret_int > MIN_INT:
return MIN_INT * -1
if negative:
ret_int *= -1
return ret_int
if __name__ == "__main__":
solution = Solution()
str = '100'
print solution.myAtoi(str)
str = '-1'
print solution.myAtoi(str)
str = '0'
print solution.myAtoi(str)
str = '007'
print solution.myAtoi(str)
str = '-007'
print solution.myAtoi(str)
str = ''
print solution.myAtoi(str)
str = ' '
print solution.myAtoi(str)
str = 'a123'
print solution.myAtoi(str)
str = '12aa3'
print solution.myAtoi(str)
str = '-2147483648'
print solution.myAtoi(str)
str = '-2147483649'
print solution.myAtoi(str)
| [
"[email protected]"
] | |
787e223f49a9ab2eeab5f45a7afcebfb8907c122 | 26e468e4f99ffc0ccd6da43c1ae9f363ec3010e6 | /msr.py | 32c9ecdd0dc265971335ef45b9e1bfc43fb7fe41 | [] | no_license | mtoqeerpk/rshalib | d86d36216fe9a4008a710af6c0e118c34c73dab4 | 9ef631ca3690c158693445a52d59c2583f8bd67e | refs/heads/master | 2023-01-28T00:25:14.992405 | 2020-12-10T09:16:41 | 2020-12-10T09:16:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 753 | py | """
Magnitude scaling relations
"""
from __future__ import absolute_import, division, print_function, unicode_literals
try:
## Python 2
basestring
except:
## Python 3
basestring = str
__all__ = ['get_oq_msr']
def get_oq_msr(msr_or_name):
"""
Get OpenQuake magnitude scaling relationship object
:param msr_or_name:
str or instance of :class:`oqhazlib.scalerel.BaseMSR`
:return:
instance of :class:`oqhazlib.scalerel.BaseMSR`
"""
from . import oqhazlib
if isinstance(msr_or_name, oqhazlib.scalerel.BaseMSR):
msr = msr_or_name
elif isinstance(msr_or_name, basestring):
#if msr_or_name[-3:] != 'MSR':
# msr_or_name += 'MSR'
msr = getattr(oqhazlib.scalerel, msr_or_name)()
return msr
| [
"[email protected]"
] | |
6ec27707f06f70599fd009e6b1515054ddf675be | 029948b3fd0e41d80d66c84d808abff4fcb24ac8 | /dnac_api_client/models/response.py | 21c419cccc99217f50f5b75be892b1baee6f818a | [] | no_license | yijxiang/dnac-api-client | 842d1da9e156820942656b8f34342d52c96d3c37 | 256d016e2df8fc1b3fdad6e28f441c6005b43b07 | refs/heads/master | 2021-09-25T21:10:09.502447 | 2018-10-25T14:39:57 | 2018-10-25T14:39:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,249 | py | # coding: utf-8
"""
Cisco DNA Center Platform v. 1.2.x (EFT)
REST API (EFT) # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class Response(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""Response - a model defined in OpenAPI""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Response):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
34e474332837d41d7be8e3b1b8180a049fb43e1b | 45de7d905486934629730945619f49281ad19359 | /xlsxwriter/test/comparison/test_chart_layout02.py | 22e9a8e8599e84e984544c6755d61220a43f21fa | [
"BSD-2-Clause"
] | permissive | jmcnamara/XlsxWriter | 599e1d225d698120ef931a776a9d93a6f60186ed | ab13807a1be68652ffc512ae6f5791d113b94ee1 | refs/heads/main | 2023-09-04T04:21:04.559742 | 2023-08-31T19:30:52 | 2023-08-31T19:30:52 | 7,433,211 | 3,251 | 712 | BSD-2-Clause | 2023-08-28T18:52:14 | 2013-01-04T01:07:06 | Python | UTF-8 | Python | false | false | 1,659 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2023, John McNamara, [email protected]
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_layout02.xlsx")
def test_create_file(self):
"""Test the creation of an XlsxWriter file with user defined layout."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [68311296, 69198208]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
chart.set_legend(
{
"layout": {
"x": 0.80197353455818021,
"y": 0.37442403032954213,
"width": 0.12858202099737534,
"height": 0.25115157480314959,
}
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| [
"[email protected]"
] | |
58c3c301b8c5ffe6496f032137b6c97b8eabbefb | 19a32440205b2caeec67c73c10d917b5fb30a86a | /isi_sdk/models/settings_notification_create_params.py | 55f76156fd70615ae7d5ef6ff537e1aba68c0417 | [
"Apache-2.0",
"MIT"
] | permissive | marrotte/isilon_sdk_python | 480e84312f5924a506aeb09c9c7cae79a2b9b7f4 | 91039da803ae37ed4abf8d2a3f59c333f3ef1866 | refs/heads/master | 2020-03-23T07:31:40.376316 | 2016-06-07T23:44:31 | 2016-06-07T23:44:31 | 141,277,076 | 1 | 0 | MIT | 2018-07-17T11:02:08 | 2018-07-17T11:02:08 | null | UTF-8 | Python | false | false | 9,762 | py | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
class SettingsNotificationCreateParams(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
SettingsNotificationCreateParams - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'action_alert': 'bool',
'action_email_address': 'str',
'action_email_owner': 'bool',
'condition': 'str',
'email_template': 'str',
'holdoff': 'int',
'schedule': 'str',
'threshold': 'str'
}
self.attribute_map = {
'action_alert': 'action_alert',
'action_email_address': 'action_email_address',
'action_email_owner': 'action_email_owner',
'condition': 'condition',
'email_template': 'email_template',
'holdoff': 'holdoff',
'schedule': 'schedule',
'threshold': 'threshold'
}
self._action_alert = None
self._action_email_address = None
self._action_email_owner = None
self._condition = None
self._email_template = None
self._holdoff = None
self._schedule = None
self._threshold = None
@property
def action_alert(self):
"""
Gets the action_alert of this SettingsNotificationCreateParams.
Send alert when rule matches.
:return: The action_alert of this SettingsNotificationCreateParams.
:rtype: bool
"""
return self._action_alert
@action_alert.setter
def action_alert(self, action_alert):
"""
Sets the action_alert of this SettingsNotificationCreateParams.
Send alert when rule matches.
:param action_alert: The action_alert of this SettingsNotificationCreateParams.
:type: bool
"""
self._action_alert = action_alert
@property
def action_email_address(self):
"""
Gets the action_email_address of this SettingsNotificationCreateParams.
Email a specific email address when rule matches.
:return: The action_email_address of this SettingsNotificationCreateParams.
:rtype: str
"""
return self._action_email_address
@action_email_address.setter
def action_email_address(self, action_email_address):
"""
Sets the action_email_address of this SettingsNotificationCreateParams.
Email a specific email address when rule matches.
:param action_email_address: The action_email_address of this SettingsNotificationCreateParams.
:type: str
"""
self._action_email_address = action_email_address
@property
def action_email_owner(self):
"""
Gets the action_email_owner of this SettingsNotificationCreateParams.
Email quota domain owner when rule matches.
:return: The action_email_owner of this SettingsNotificationCreateParams.
:rtype: bool
"""
return self._action_email_owner
@action_email_owner.setter
def action_email_owner(self, action_email_owner):
"""
Sets the action_email_owner of this SettingsNotificationCreateParams.
Email quota domain owner when rule matches.
:param action_email_owner: The action_email_owner of this SettingsNotificationCreateParams.
:type: bool
"""
self._action_email_owner = action_email_owner
@property
def condition(self):
"""
Gets the condition of this SettingsNotificationCreateParams.
The condition detected.
:return: The condition of this SettingsNotificationCreateParams.
:rtype: str
"""
return self._condition
@condition.setter
def condition(self, condition):
"""
Sets the condition of this SettingsNotificationCreateParams.
The condition detected.
:param condition: The condition of this SettingsNotificationCreateParams.
:type: str
"""
allowed_values = ["exceeded", "denied", "violated", "expired"]
if condition not in allowed_values:
raise ValueError(
"Invalid value for `condition`, must be one of {0}"
.format(allowed_values)
)
self._condition = condition
@property
def email_template(self):
"""
Gets the email_template of this SettingsNotificationCreateParams.
Path of optional /ifs template file used for email actions.
:return: The email_template of this SettingsNotificationCreateParams.
:rtype: str
"""
return self._email_template
@email_template.setter
def email_template(self, email_template):
"""
Sets the email_template of this SettingsNotificationCreateParams.
Path of optional /ifs template file used for email actions.
:param email_template: The email_template of this SettingsNotificationCreateParams.
:type: str
"""
self._email_template = email_template
@property
def holdoff(self):
"""
Gets the holdoff of this SettingsNotificationCreateParams.
Time to wait between detections for rules triggered by user actions.
:return: The holdoff of this SettingsNotificationCreateParams.
:rtype: int
"""
return self._holdoff
@holdoff.setter
def holdoff(self, holdoff):
"""
Sets the holdoff of this SettingsNotificationCreateParams.
Time to wait between detections for rules triggered by user actions.
:param holdoff: The holdoff of this SettingsNotificationCreateParams.
:type: int
"""
self._holdoff = holdoff
@property
def schedule(self):
"""
Gets the schedule of this SettingsNotificationCreateParams.
Schedule for rules that repeatedly notify.
:return: The schedule of this SettingsNotificationCreateParams.
:rtype: str
"""
return self._schedule
@schedule.setter
def schedule(self, schedule):
"""
Sets the schedule of this SettingsNotificationCreateParams.
Schedule for rules that repeatedly notify.
:param schedule: The schedule of this SettingsNotificationCreateParams.
:type: str
"""
self._schedule = schedule
@property
def threshold(self):
"""
Gets the threshold of this SettingsNotificationCreateParams.
The quota threshold detected.
:return: The threshold of this SettingsNotificationCreateParams.
:rtype: str
"""
return self._threshold
@threshold.setter
def threshold(self, threshold):
"""
Sets the threshold of this SettingsNotificationCreateParams.
The quota threshold detected.
:param threshold: The threshold of this SettingsNotificationCreateParams.
:type: str
"""
allowed_values = ["hard", "soft", "advisory"]
if threshold not in allowed_values:
raise ValueError(
"Invalid value for `threshold`, must be one of {0}"
.format(allowed_values)
)
self._threshold = threshold
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
] | |
490e341e942d8a60011545ea7657bda519f70f4e | ac32bac45df77083f4ef3115e747038a6753936c | /methods/stack/custom/adapters/layer_custom.py | e3220b5ba16f66f4e881fd6331e8a0efa9f0f191 | [] | no_license | Yujin-Yujin/rexpert | 13e1d5c4ca55664dd9fbb9a765ea5157a2e0893f | ed8628dc053194fee40e593b1cc5ec45a26c8073 | refs/heads/main | 2023-06-22T05:58:42.269923 | 2021-07-23T06:35:43 | 2021-07-23T06:35:43 | 373,423,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,825 | py | from abc import ABC, abstractmethod
from typing import List, Mapping, Union
import torch
from torch import nn
from transformers.adapters.composition import AdapterCompositionBlock, Fuse, Parallel, Split, Stack, parse_composition
from custom.adapters.modeling_custom import Adapter, BertFusion
class AdapterLayerBaseMixin(ABC):
"""
An abstract base implementation of adapter integration into a Transformer block. In BERT, subclasses of this module
are placed in the BertSelfOutput module and in the BertOutput module.
"""
# override this property if layer norm has a different name
@property
def layer_norm(self):
return self.LayerNorm
@property
@abstractmethod
def adapter_config_key(self):
"""Gets the name of the key by which this adapter location is identified in the adapter configuration."""
pass
@property
def layer_idx(self):
return getattr(self, "_layer_idx", -1)
@layer_idx.setter
def layer_idx(self, layer_idx):
idx = getattr(self, "_layer_idx", layer_idx)
assert idx == layer_idx
setattr(self, "_layer_idx", idx)
def _init_adapter_modules(self):
self.adapters = nn.ModuleDict(dict())
self.adapter_fusion_layer = nn.ModuleDict(dict())
def add_adapter(self, adapter_name: str, layer_idx: int):
self.layer_idx = layer_idx
adapter_config = self.config.adapters.get(adapter_name)
if adapter_config and adapter_config.get(self.adapter_config_key, None):
reduction_factor = adapter_config["reduction_factor"]
if isinstance(reduction_factor, Mapping):
if str(self.layer_idx) in reduction_factor:
reduction_factor = reduction_factor[str(self.layer_idx)]
elif "default" in reduction_factor:
reduction_factor = reduction_factor["default"]
else:
raise KeyError(
"The given reduction factor mapping does not give a default value and does not specify each "
"reduction factor individually. You need to provide a default value like this: "
'{"1": 16, "default": 16}'
)
adapter = Adapter(
input_size=self.config.hidden_size,
down_sample=self.config.hidden_size // reduction_factor,
add_layer_norm_before=adapter_config["ln_before"],
add_layer_norm_after=adapter_config["ln_after"],
non_linearity=adapter_config["non_linearity"],
residual_before_ln=adapter_config["adapter_residual_before_ln"],
)
adapter.train(self.training) # make sure training mode is consistent
self.adapters[adapter_name] = adapter
def add_fusion_layer(self, adapter_names: Union[List, str]):
"""See BertModel.add_fusion_layer"""
adapter_names = adapter_names if isinstance(adapter_names, list) else adapter_names.split(",")
if self.config.adapters.common_config_value(adapter_names, self.adapter_config_key):
fusion = BertFusion(self.config)
fusion.train(self.training) # make sure training mode is consistent
self.adapter_fusion_layer[",".join(adapter_names)] = fusion
def enable_adapters(self, adapter_setup: AdapterCompositionBlock, unfreeze_adapters: bool, unfreeze_fusion: bool):
"""
Unfreezes a given list of adapters, the adapter fusion layer, or both
Args:
adapter_names: names of adapters to unfreeze (or names of adapters part of the fusion layer to unfreeze)
unfreeze_adapters: whether the adapters themselves should be unfreezed
unfreeze_fusion: whether the adapter attention layer for the given adapters should be unfreezed
"""
if unfreeze_adapters:
for adapter_name in adapter_setup.flatten():
if adapter_name in self.adapters:
for param in self.adapters[adapter_name].parameters():
param.requires_grad = True
if unfreeze_fusion:
if isinstance(adapter_setup, Fuse):
if adapter_setup.name in self.adapter_fusion_layer:
for param in self.adapter_fusion_layer[adapter_setup.name].parameters():
param.requires_grad = True
for sub_setup in adapter_setup:
if isinstance(sub_setup, Fuse):
if sub_setup.name in self.adapter_fusion_layer:
for param in self.adapter_fusion_layer[sub_setup.name].parameters():
param.requires_grad = True
def get_adapter_preparams(
self,
adapter_config,
hidden_states,
input_tensor,
):
"""
Retrieves the hidden_states, query (for Fusion), and residual connection according to the set configuratio
Args:
adapter_config: config file according to what the parameters are passed
hidden_states: output of previous layer
input_tensor: residual connection before FFN
Returns: hidden_states, query, residual
"""
query = None
if adapter_config["residual_before_ln"]:
residual = hidden_states
if hasattr(self.config, "adapter_fusion") and self.config.adapter_fusion["query_before_ln"]:
query = hidden_states
if adapter_config["original_ln_before"]:
if self.layer_norm:
hidden_states = self.layer_norm(hidden_states + input_tensor)
else:
hidden_states = hidden_states + input_tensor
if not adapter_config["residual_before_ln"]:
residual = hidden_states
if hasattr(self.config, "adapter_fusion") and not self.config.adapter_fusion["query_before_ln"]:
query = hidden_states
return hidden_states, query, residual
def adapter_stack(self, adapter_setup: Stack, hidden_states, input_tensor, lvl=0):
"""
Forwards the given input through the given stack of adapters.
"""
for i, adapter_stack_layer in enumerate(adapter_setup):
# Break if setup is too deep
if isinstance(adapter_stack_layer, AdapterCompositionBlock) and lvl >= 1:
raise ValueError(
"Specified adapter setup is too deep. Cannot have {} at level {}".format(
adapter_stack_layer.__class__.__name__, lvl
)
)
# Case 1: We have a nested fusion layer -> call fusion method
if isinstance(adapter_stack_layer, Fuse):
hidden_states = self.adapter_fusion(adapter_stack_layer, hidden_states, input_tensor, lvl=lvl + 1)
# Case 2: We have a nested split layer -> call split method
elif isinstance(adapter_stack_layer, Split):
hidden_states = self.adapter_split(adapter_stack_layer, hidden_states, input_tensor, lvl=lvl + 1)
# Case 3: We have a nested parallel layer -> call parallel method
elif isinstance(adapter_stack_layer, Parallel):
hidden_states, input_tensor = self.adapter_parallel(
adapter_stack_layer, hidden_states, input_tensor, lvl=lvl + 1
)
# Case 4: We have a single adapter which is part of this module -> forward pass
elif adapter_stack_layer in self.adapters:
adapter_layer = self.adapters[adapter_stack_layer]
adapter_config = self.config.adapters.get(adapter_stack_layer)
hidden_states, _, residual = self.get_adapter_preparams(adapter_config, hidden_states, input_tensor)
hidden_states, _, up = adapter_layer(hidden_states, residual_input=residual)
# as this stack might be part of a fusion block, return the adapter up-projection output here
# together with the final output (with potential residuals & norms) if we reached the last block of the stack
if i == len(adapter_setup) - 1:
return hidden_states, up, input_tensor
# Case X: No adapter which is part of this module -> ignore
# If we got here, we either had another nested composition block
# or no adapter was found. In both cases, we don't need to set the second return value for fusion
return hidden_states, None, input_tensor
def adapter_fusion(self, adapter_setup: Fuse, hidden_states, input_tensor, lvl=0):
"""
Performs adapter fusion with the given adapters for the given input.
"""
# config of _last_ fused adapter is significant
adapter_config = self.config.adapters.get(adapter_setup.last())
hidden_states, query, residual = self.get_adapter_preparams(adapter_config, hidden_states, input_tensor)
up_list = []
for adapter_block in adapter_setup:
# Case 1: We have a nested stack -> call stack method
if isinstance(adapter_block, Stack):
_, up, _ = self.adapter_stack(adapter_block, hidden_states, input_tensor, lvl=lvl + 1)
if up is not None: # could be none if stack is empty
up_list.append(up)
# Case 2: We have a single adapter which is part of this module -> forward pass
elif adapter_block in self.adapters:
adapter_layer = self.adapters[adapter_block]
_, _, up = adapter_layer(hidden_states, residual_input=residual)
up_list.append(up)
# Case 3: nesting other composition blocks is invalid
elif isinstance(adapter_block, AdapterCompositionBlock):
raise ValueError(
"Invalid adapter setup. Cannot nest {} in {}".format(
adapter_block.__class__.__name__, adapter_setup.__class__.__name__
)
)
# Case X: No adapter which is part of this module -> ignore
if len(up_list) > 0:
up_list = torch.stack(up_list)
up_list = up_list.permute(1, 2, 0, 3)
hidden_states = torch.zeros(up_list.shape[0], up_list.shape[1], up_list.shape[3]).to(up_list.get_device())
# pooh stack
for i in range(up_list.shape[2]):
hidden_states += up_list[:,:,i,:]
hidden_states = self.adapter_fusion_layer[adapter_setup.name](
query,
hidden_states
)
return hidden_states
def adapter_split(self, adapter_setup: Split, hidden_states, input_tensor, lvl=0):
"""
Splits the given input between the given adapters.
"""
# config of _first_ of splitted adapters is significant
adapter_config = self.config.adapters.get(adapter_setup.first())
hidden_states, query, residual = self.get_adapter_preparams(adapter_config, hidden_states, input_tensor)
# split hidden representations and residuals at split index
split_hidden_states = [
hidden_states[:, : adapter_setup.split_index, :],
hidden_states[:, adapter_setup.split_index :, :],
]
split_input_tensor = [
input_tensor[:, : adapter_setup.split_index, :],
input_tensor[:, adapter_setup.split_index :, :],
]
split_residual = [
residual[:, : adapter_setup.split_index, :],
residual[:, adapter_setup.split_index :, :],
]
for i, adapter_block in enumerate(adapter_setup):
# Case 1: We have a nested stack -> call stack method
if isinstance(adapter_block, Stack):
split_hidden_states[i], _, _ = self.adapter_stack(
adapter_block, split_hidden_states[i], split_input_tensor[i], lvl=lvl + 1
)
# Case 2: We have a nested split -> recursively call split
elif isinstance(adapter_block, Split):
split_hidden_states[i] = self.adapter_split(
adapter_block, split_hidden_states[i], split_input_tensor[i], lvl=lvl + 1
)
# Case 3: We have a single adapter which is part of this module -> forward pass
elif adapter_block in self.adapters:
adapter_layer = self.adapters[adapter_block]
split_hidden_states[i], _, _ = adapter_layer(split_hidden_states[i], residual_input=split_residual[i])
# Case 4: nesting other composition blocks is invalid
elif isinstance(adapter_block, AdapterCompositionBlock):
raise ValueError(
"Invalid adapter setup. Cannot nest {} in {}".format(
adapter_block.__class__.__name__, adapter_setup.__class__.__name__
)
)
# Case X: No adapter which is part of this module -> ignore
hidden_states = torch.cat(split_hidden_states, dim=1)
return hidden_states
def adapter_parallel(self, adapter_setup: Parallel, hidden_states, input_tensor, lvl=0):
"""
For parallel execution of the adapters on the same input. This means that the input is repeated N times before
feeding it to the adapters (where N is the number of adapters).
"""
# We assume that all adapters have the same config
adapter_config = self.config.adapters.get(adapter_setup.first())
if not self.config.adapters.is_parallelized:
orig_batch_size = input_tensor.shape[0]
input_tensor = input_tensor.repeat(self.config.adapters.active_setup.parallel_channels, 1, 1)
hidden_states = hidden_states.repeat(self.config.adapters.active_setup.parallel_channels, 1, 1)
self.config.adapters.is_parallelized = True
else:
# The base model should handle replication of input.
# Therefore, we assume the (replicated) input batch to be divisible by the number of parallel channels.
if hidden_states.shape[0] % adapter_setup.parallel_channels != 0:
raise ValueError(
"The total input batch size in a Parallel adapter block must be divisible by the number of parallel channels."
)
orig_batch_size = hidden_states.shape[0] // adapter_setup.parallel_channels
hidden_states, _, residual = self.get_adapter_preparams(adapter_config, hidden_states, input_tensor)
# sequentially feed different parts of the blown-up batch into different adapters
children_hidden = []
for i, child in enumerate(adapter_setup):
# Case 1: We have a nested stack -> call stack method
if isinstance(child, Stack):
child_hidden_states, _, _ = self.adapter_stack(
child,
hidden_states[i * orig_batch_size : (i + 1) * orig_batch_size],
input_tensor[i * orig_batch_size : (i + 1) * orig_batch_size],
lvl=lvl + 1,
)
children_hidden.append(child_hidden_states)
# Case 2: We have a single adapter which is part of this module -> forward pass
elif child in self.adapters:
adapter_layer = self.adapters[child]
child_hidden_states, _, _ = adapter_layer(
hidden_states[i * orig_batch_size : (i + 1) * orig_batch_size],
residual_input=residual[i * orig_batch_size : (i + 1) * orig_batch_size],
)
children_hidden.append(child_hidden_states)
# Case 3: nesting other composition blocks is invalid
elif isinstance(child, AdapterCompositionBlock):
raise ValueError(
"Invalid adapter setup. Cannot nest {} in {}".format(
child.__class__.__name__, adapter_setup.__class__.__name__
)
)
# Case X: No adapter which is part of this module -> ignore
else:
children_hidden.append(hidden_states[i * orig_batch_size : (i + 1) * orig_batch_size])
# concatenate all outputs and return
hidden_states = torch.cat(children_hidden, 0)
return hidden_states, input_tensor
def adapters_forward(self, hidden_states, input_tensor, **kwargs):
"""
Called for each forward pass through adapters.
"""
if hasattr(self.config, "adapters"):
# First check for given arguments before falling back to defined setup
adapter_setup = kwargs.pop("adapter_names", None)
if adapter_setup is not None:
adapter_setup = parse_composition(adapter_setup)
else:
adapter_setup = self.config.adapters.active_setup
else:
adapter_setup = None
skip_adapters = adapter_setup is None or (
self.config.adapters.skip_layers is not None and self.layer_idx in self.config.adapters.skip_layers
)
if not skip_adapters and (len(set(self.adapters.keys()) & adapter_setup.flatten()) > 0):
if isinstance(adapter_setup, Stack):
hidden_states, _, input_tensor = self.adapter_stack(adapter_setup, hidden_states, input_tensor)
elif isinstance(adapter_setup, Fuse):
hidden_states = self.adapter_fusion(adapter_setup, hidden_states, input_tensor)
elif isinstance(adapter_setup, Split):
hidden_states = self.adapter_split(adapter_setup, hidden_states, input_tensor)
elif isinstance(adapter_setup, Parallel):
# notice that we are overriding input tensor here to keep the same dim as hidden_states for the residual
# in case we were blowing up the batch for parallel processing of multiple adapters for the same input
hidden_states, input_tensor = self.adapter_parallel(adapter_setup, hidden_states, input_tensor)
else:
raise ValueError(f"Invalid adapter setup {adapter_setup}")
last_config = self.config.adapters.get(adapter_setup.last())
if last_config["original_ln_after"]:
if self.layer_norm:
hidden_states = self.layer_norm(hidden_states + input_tensor)
else:
hidden_states = hidden_states + input_tensor
elif self.layer_norm:
hidden_states = self.layer_norm(hidden_states + input_tensor)
else:
hidden_states = hidden_states + input_tensor
return hidden_states
| [
"[email protected]"
] | |
afbde06d9758d8f6cb99f0c165487171b932e4a9 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03399/s459988463.py | c485ed9fa7f3a1cbfbac8d9420a062d84304b2e3 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | # 数値の取得
A = int(input())
B = int(input())
C = int(input())
D = int(input())
# 料金の最安値を出力
train = min(A,B)
bus = min(C,D)
tbsum = train + bus
print(tbsum) | [
"[email protected]"
] | |
f19b8be273679764a4d21955b4e6283bcba5d52c | 41e3065d6f29449251f1cc79cb340fa273ac5c61 | /0x11-python-network_1/4-hbtn_status.py | 9760dd4785428c240c719c71c1c380eb25f80b8c | [] | no_license | BD20171998/holbertonschool-higher_level_programming | 856fa3a7fcfafd3e17ebd7dd4cf9d3e5a609fd1f | bfa78d25bd4527e06cf1bf54cbc00722449d9a30 | refs/heads/master | 2021-07-16T01:58:42.911959 | 2020-11-15T07:18:19 | 2020-11-15T07:18:19 | 226,976,859 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 262 | py | #!/usr/bin/python3
import requests
if __name__ == "__main__":
html = requests.get('https://intranet.hbtn.io/status')
print("Body response:")
print("{}{}".format("\t- type: ", type(html.text)))
print("{}{}".format("\t- content: ", html.text))
| [
"[email protected]"
] | |
be67b3d78dfe581cfe41e8bf0ac8acd188a3da8e | 2ad7e88305a7d2215a816e1aa3a82ef50b685b23 | /dshop/main/utilities.py | eacf75e1b907eea2ad161776d182389a5405e407 | [] | no_license | KeksikProg/shop_chairs | 5c9fb01f47676bb118fcc8161be1854e23271550 | 4bb09c409450cf2bb024c69d51d9f046520e9349 | refs/heads/master | 2023-06-26T17:21:27.678778 | 2020-08-06T16:28:29 | 2020-08-06T16:28:29 | 281,150,953 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,336 | py | from datetime import datetime
from os.path import splitext
from django.template.loader import render_to_string
from django.core.signing import Signer # Это для цифровой подписи
from dshop.settings import ALLOWED_HOSTS
from django.dispatch import Signal
from django.db.models.signals import post_save
def get_timestamp_path(instance, filename): # Тк эта функция не относится не к редакторам не к контроллерами не к моделям ,мы просто запишем её сюда
return f'{datetime.now().timestamp()}{splitext(filename)[1]}'
signer = Signer()
def send_activation_notification(user):
if ALLOWED_HOSTS:
host = 'http://' + ALLOWED_HOSTS[0]
else:
host = 'http://localhost:8000'
context = {'user':user, 'host':host, 'sign':signer.sign(user.username)}
subj = render_to_string('email/activation_letter_subj.txt', context)
body = render_to_string('email/activation_letter_body.txt', context)
user.email_user(subj, body)
user_registrated = Signal(providing_args = ['instance']) # Тут мы из всех сигналов берем определенный по его ключу
def user_registrated_dispatcher(sender, **kwargs):
send_activation_notification(kwargs['instance'])
user_registrated.connect(user_registrated_dispatcher)
| [
"[email protected]"
] | |
d676188c2588e84de56202458db7503191525a1e | 715966248566909e4e8978230b37458d031418c5 | /01.jumptopython/chap03/책/126-02.py | 9e55500f0b6a62346082cb03961b4920774aabf8 | [] | no_license | itminha123/jumptopython | 1101f4e065ce2f8e0a1c68bb818b14b562fc43d1 | 8b25ae84f795eab5e7bcfa609646a2736ac2a98f | refs/heads/master | 2021-09-19T08:29:33.686384 | 2018-07-25T18:59:55 | 2018-07-25T18:59:55 | 112,169,738 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | coffee=10
while True:
money=int(input("돈을 넣어 주세요:"))
if money== 300:
print("커피를 줍니다.")
coffee=coffee-1
elif money>300:
print("거스름돈 %d를 주고 커피를 줍니다."%(money-300))
coffee=coffee-1
else:
print("돈을 다시 돌려주고 커피를 주지 않습니다.")
print("남은 커피의 양은 %d개 입니다."%coffee)
if not coffee:
print("커피가 다 떨어졌습니다. 판매를 중지합니다.")
break
| [
"[email protected]"
] | |
408491e006322a62395f8c6b6a009ef13abe8b3c | c566ceb33bfea62f4be98dd2f9536deaee46ac3e | /api/utils/custom_exception.py | 27388bf098fb1fd47d3af3feec46065ce054813d | [] | no_license | Saviodiow95/wallet_test | 1ad0e8f1699803ecca0ebf8c6a96b10efea980a3 | 4c2bf80332458b39197de134e914af669bbcc355 | refs/heads/main | 2023-07-04T02:25:11.321675 | 2021-08-02T12:28:40 | 2021-08-02T12:28:40 | 390,503,421 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | from rest_framework.exceptions import APIException
class InsufficientFundsException(APIException):
"""
Exceção criada para retornar uma mensagem quando não houver saldo de um ativo para realizar o resgate
"""
status_code = 304
default_detail = 'Não é possível realizar o Resgate, Saldo Insuficiente'
| [
"[email protected]"
] | |
fd0c93ec47ca70c94d9bc8e470186eac42081257 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_counselled.py | 4ca2e9c494bd4785c5c7bc4a66f6f1113eba6619 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py |
#calss header
class _COUNSELLED():
def __init__(self,):
self.name = "COUNSELLED"
self.definitions = counsel
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['counsel']
| [
"[email protected]"
] | |
52bad13f94b0f90846709fba274572fa370e643a | c085578abc19db18ee0766e1f9598d79a3acdbe1 | /18-4Sum/solution.py | c5448eab17a5a88cac8e2880a4fc80d1a1c7a04b | [
"MIT"
] | permissive | Tanych/CodeTracking | efb6245edc036d7edf85e960972c34d03b8c707a | 86f1cb98de801f58c39d9a48ce9de12df7303d20 | refs/heads/master | 2020-05-21T17:40:10.105759 | 2016-10-09T18:20:42 | 2016-10-09T18:20:42 | 60,616,356 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,830 | py | class Solution(object):
def nsum(self,nums,start,n,target):
nlen=len(nums)
res=[]
if nums[start]*n>target or target>nums[nlen-1]*n:
return res
for i in xrange(start,nlen-n+1):
if i>start and nums[i-1]==nums[i]:
continue
if n==1:
if target<nums[i]:break
if target>nums[i]:continue
res.append([target])
break
for li in self.nsum(nums,i+1,n-1,target-nums[i]):
li.append(nums[i])
res.append(li)
return res
def fourSum(self, nums,target):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
num_len=len(nums)
if num_len<4:
return []
nums.sort()
return self.nsum(nums,0,4,target)
res_list=[]
hash_dict={}
for m in xrange(num_len-3):
if 4*nums[m]>target:
return res_list
for i in xrange(m+1,num_len-2):
start=i+1
end=num_len-1
while start<end:
if nums[m]+nums[i]+nums[start]+nums[end]==target:
if not hash_dict.has_key((nums[m],nums[i],nums[start],nums[end])):
res_list.append([nums[m],nums[i],nums[start],nums[end]])
hash_dict[(nums[m],nums[i],nums[start],nums[end])]=1
start+=1
end-=1
elif nums[m]+nums[i]+nums[start]+nums[end]<target:
start+=1
elif nums[m]+nums[i]+nums[start]+nums[end]>target:
end-=1
return res_list
| [
"[email protected]"
] | |
0e10bb119dad92159c84d65e61fbe16a80bca333 | 10f0193389a161c447061d06a87c4fae8fc31bb5 | /huobi/model/orderupdatenew.py | 51c567837fd5cfda68b30d5a52efdf3ac769b7be | [
"Apache-2.0"
] | permissive | neosun100/huobi_Python | 1d9fca2c24673076516d582c263445c17626bd8e | 70b280f751e6159b76f0cc43251896a754c1b559 | refs/heads/master | 2021-01-14T16:47:30.471992 | 2020-02-24T08:45:40 | 2020-02-24T08:45:40 | 242,685,209 | 0 | 0 | Apache-2.0 | 2020-02-24T08:38:33 | 2020-02-24T08:38:32 | null | UTF-8 | Python | false | false | 2,896 | py | from huobi.model.constant import *
class OrderUpdateNew:
"""
The detail order information.
:member
match_id: The Match id for make order.
order_id: The order id.
symbol: The symbol, like "btcusdt".
state: The order state: submitted, partial-filled, cancelling, filled, canceled.
role: value is taker or maker
price: The limit price of limit order.
order_type: The order type, possible values are: buy-market, sell-market, buy-limit, sell-limit, buy-ioc, sell-ioc, buy-limit-maker, sell-limit-maker.
filled_amount: The amount which has been filled.
filled_cash_amount: The filled total in quote currency.
unfilled_amount: The amount which is unfilled.
"""
def __init__(self):
self.match_id = 0
self.order_id = 0
self.symbol = ""
self.state = OrderState.INVALID
self.role = ""
self.price = 0.0
self.filled_amount = 0.0
self.filled_cash_amount = 0.0
self.unfilled_amount = 0.0
self.client_order_id = ""
self.order_type = OrderType.INVALID
@staticmethod
def json_parse(json_data):
order_upd = OrderUpdateNew()
order_upd.match_id = json_data.get_int("match-id")
order_upd.order_id = json_data.get_int("order-id")
order_upd.symbol = json_data.get_string("symbol")
order_upd.state = json_data.get_string("order-state")
order_upd.role = json_data.get_string("role")
order_upd.price = json_data.get_float("price")
order_upd.order_type = json_data.get_string("order-type")
order_upd.filled_amount = json_data.get_float("filled-amount")
order_upd.filled_cash_amount = json_data.get_float("filled-cash-amount")
order_upd.unfilled_amount = json_data.get_float("unfilled-amount")
order_upd.client_order_id = json_data.get_string("client-order-id")
return order_upd
def print_object(self, format_data=""):
from huobi.base.printobject import PrintBasic
PrintBasic.print_basic(self.match_id, format_data + "Match Id")
PrintBasic.print_basic(self.order_id, format_data + "Order Id")
PrintBasic.print_basic(self.symbol, format_data + "Symbol")
PrintBasic.print_basic(self.state, format_data + "Order State")
PrintBasic.print_basic(self.role, format_data + "Role")
PrintBasic.print_basic(self.price, format_data + "Price")
PrintBasic.print_basic(self.filled_amount, format_data + "Filled Amount")
PrintBasic.print_basic(self.filled_cash_amount, format_data + "Filled Cash Amount")
PrintBasic.print_basic(self.unfilled_amount, format_data + "Unfilled Amount")
PrintBasic.print_basic(self.client_order_id, format_data + "Client Order Id")
PrintBasic.print_basic(self.order_type, format_data + "Order Type")
| [
"[email protected]"
] | |
1ad8e97dd542ba0a2310670381e40b114cef0bc8 | cd67fbaec6ba71fdd10ac8dd23e034d7df706aa3 | /bootstrap/urls.py | 1db8a20ee1f67439cdb7324c6c4e7e6e93525396 | [] | no_license | ssyctlm/feb27 | 009310d09bc866d7432576d4a867b63591009cbb | 8c666cf584e1589d06be16ba8b1266cb27646f39 | refs/heads/master | 2020-04-25T15:16:55.957159 | 2019-03-19T13:41:07 | 2019-03-19T13:41:07 | 172,873,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | from django.urls import path
from .views import (
index_view,
about_view,
services_view,
contact_view
)
# app_name = 'articles'
urlpatterns = [
path('',index_view,name='home'),
path('about',about_view,name = 'about'),
path('services',services_view,name = 'services'),
path('contact',contact_view,name = 'contact'),
]
| [
"none"
] | none |
720efd4d9680f6c52878542ad045aff577e5aa38 | a5a99f646e371b45974a6fb6ccc06b0a674818f2 | /RecoBTag/Configuration/test/test_cfg.py | 7fc7d9770ab2228f60b31accaf28c59b6b4e80a7 | [
"Apache-2.0"
] | permissive | cms-sw/cmssw | 4ecd2c1105d59c66d385551230542c6615b9ab58 | 19c178740257eb48367778593da55dcad08b7a4f | refs/heads/master | 2023-08-23T21:57:42.491143 | 2023-08-22T20:22:40 | 2023-08-22T20:22:40 | 10,969,551 | 1,006 | 3,696 | Apache-2.0 | 2023-09-14T19:14:28 | 2013-06-26T14:09:07 | C++ | UTF-8 | Python | false | false | 843 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("GeometryTest")
process.load("Configuration.StandardSequences.Reconstruction_cff")
process.load("Configuration.StandardSequences.FakeConditions_cff")
process.load("Configuration.EventContent.EventContent_cff")
process.load("Configuration.StandardSequences.MagneticField_cff")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('/store/relval/2008/5/20/RelVal-RelValTTbar-1211209682-FakeConditions-2nd/0000/08765709-5826-DD11-9CE8-000423D94700.root')
)
process.RECO = cms.OutputModule("PoolOutputModule",
process.AODSIMEventContent,
fileName = cms.untracked.string('reco.root')
)
process.p1 = cms.Path(process.btagging)
process.p = cms.EndPath(process.RECO)
| [
"[email protected]"
] | |
f9348d48ee596bc1cd89fca043043b4b52b931d2 | b144c5142226de4e6254e0044a1ca0fcd4c8bbc6 | /ixnetwork_restpy/testplatform/sessions/ixnetwork/impairment/profile/accumulateandburst/accumulateandburst.py | 091ec9d83b6e627d30bda819aaea8952ba46111d | [
"MIT"
] | permissive | iwanb/ixnetwork_restpy | fa8b885ea7a4179048ef2636c37ef7d3f6692e31 | c2cb68fee9f2cc2f86660760e9e07bd06c0013c2 | refs/heads/master | 2021-01-02T17:27:37.096268 | 2020-02-11T09:28:15 | 2020-02-11T09:28:15 | 239,721,780 | 0 | 0 | NOASSERTION | 2020-02-11T09:20:22 | 2020-02-11T09:20:21 | null | UTF-8 | Python | false | false | 7,559 | py | # MIT LICENSE
#
# Copyright 1997 - 2019 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class AccumulateAndBurst(Base):
"""Accumulates packets in a queue and transmit groups of packets as a burst. It can only be used on a profile if delayVariation and customDelayVariation are disabled.
The AccumulateAndBurst class encapsulates a required accumulateAndBurst resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'accumulateAndBurst'
def __init__(self, parent):
super(AccumulateAndBurst, self).__init__(parent)
@property
def BurstSize(self):
"""Represents the burst octet size. The default value is 1014.
Returns:
number
"""
return self._get_attribute('burstSize')
@BurstSize.setter
def BurstSize(self, value):
self._set_attribute('burstSize', value)
@property
def BurstSizeUnit(self):
"""The burst size unit is either megabytes or kilobytes. The default unit is kilobytes.
Returns:
str(kilobytes|kKilobytes|kMegabytes|megabytes)
"""
return self._get_attribute('burstSizeUnit')
@BurstSizeUnit.setter
def BurstSizeUnit(self, value):
self._set_attribute('burstSizeUnit', value)
@property
def BurstTimeout(self):
"""The burst timeout.The default value is 5 seconds.
Returns:
str
"""
return self._get_attribute('burstTimeout')
@BurstTimeout.setter
def BurstTimeout(self, value):
self._set_attribute('burstTimeout', value)
@property
def BurstTimeoutUnit(self):
"""Seconds(default) / milliseconds / mm:ss.fff time format.
Returns:
str(kMilliseconds|kSeconds|kTimeFormat|milliseconds|seconds|timeFormat)
"""
return self._get_attribute('burstTimeoutUnit')
@BurstTimeoutUnit.setter
def BurstTimeoutUnit(self, value):
self._set_attribute('burstTimeoutUnit', value)
@property
def Enabled(self):
"""If true, received packets are queued and transmitted in bursts.
Returns:
bool
"""
return self._get_attribute('enabled')
@Enabled.setter
def Enabled(self, value):
self._set_attribute('enabled', value)
@property
def InterBurstGap(self):
"""Tail to head (default) / Head to head.
Returns:
str(headToHead|kHeadToHead|kTailToHead|tailToHead)
"""
return self._get_attribute('interBurstGap')
@InterBurstGap.setter
def InterBurstGap(self, value):
self._set_attribute('interBurstGap', value)
@property
def InterBurstGapValue(self):
"""The InterBurst gap value. The default value is 20 ms.
Returns:
number
"""
return self._get_attribute('interBurstGapValue')
@InterBurstGapValue.setter
def InterBurstGapValue(self, value):
self._set_attribute('interBurstGapValue', value)
@property
def InterBurstGapValueUnit(self):
"""Seconds / milliseconds (default).
Returns:
str(kMilliseconds|kSeconds|milliseconds|seconds)
"""
return self._get_attribute('interBurstGapValueUnit')
@InterBurstGapValueUnit.setter
def InterBurstGapValueUnit(self, value):
self._set_attribute('interBurstGapValueUnit', value)
@property
def PacketCount(self):
"""Represents the burst packet count. The default value is 1000 packets.
Returns:
number
"""
return self._get_attribute('packetCount')
@PacketCount.setter
def PacketCount(self, value):
self._set_attribute('packetCount', value)
@property
def QueueAutoSize(self):
"""Gets the automatically calculated queue size when queueAutoSizeEnable is true or zero when queueAutoSizeEnable is false.
Returns:
number
"""
return self._get_attribute('queueAutoSize')
@property
def QueueAutoSizeEnabled(self):
"""Automatically calculate queue size. The default value is true.
Returns:
bool
"""
return self._get_attribute('queueAutoSizeEnabled')
@QueueAutoSizeEnabled.setter
def QueueAutoSizeEnabled(self, value):
self._set_attribute('queueAutoSizeEnabled', value)
@property
def QueueSize(self):
"""The accumulate-and-burst queue size expressed in MB. The default value is 1.
Returns:
number
"""
return self._get_attribute('queueSize')
@QueueSize.setter
def QueueSize(self, value):
self._set_attribute('queueSize', value)
def update(self, BurstSize=None, BurstSizeUnit=None, BurstTimeout=None, BurstTimeoutUnit=None, Enabled=None, InterBurstGap=None, InterBurstGapValue=None, InterBurstGapValueUnit=None, PacketCount=None, QueueAutoSizeEnabled=None, QueueSize=None):
"""Updates a child instance of accumulateAndBurst on the server.
Args:
BurstSize (number): Represents the burst octet size. The default value is 1014.
BurstSizeUnit (str(kilobytes|kKilobytes|kMegabytes|megabytes)): The burst size unit is either megabytes or kilobytes. The default unit is kilobytes.
BurstTimeout (str): The burst timeout.The default value is 5 seconds.
BurstTimeoutUnit (str(kMilliseconds|kSeconds|kTimeFormat|milliseconds|seconds|timeFormat)): Seconds(default) / milliseconds / mm:ss.fff time format.
Enabled (bool): If true, received packets are queued and transmitted in bursts.
InterBurstGap (str(headToHead|kHeadToHead|kTailToHead|tailToHead)): Tail to head (default) / Head to head.
InterBurstGapValue (number): The InterBurst gap value. The default value is 20 ms.
InterBurstGapValueUnit (str(kMilliseconds|kSeconds|milliseconds|seconds)): Seconds / milliseconds (default).
PacketCount (number): Represents the burst packet count. The default value is 1000 packets.
QueueAutoSizeEnabled (bool): Automatically calculate queue size. The default value is true.
QueueSize (number): The accumulate-and-burst queue size expressed in MB. The default value is 1.
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
self._update(locals())
| [
"[email protected]"
] | |
2582be4a1861d40ece795a0beb0a30470be1b7b5 | 1d49c3b135c04a58ca25fd9d8e7e073daa26ff51 | /steam/pipelines.py | cefa2f6f0193df72b56a0b20af253b1ae532b675 | [] | no_license | clioo/steam-scraping | 08dabb1968080e1951cc9a1dac0279865fa24e8f | 9f401e331dcecc51238fa1c52e9c6ffb4c6a483b | refs/heads/master | 2022-10-05T23:33:16.544375 | 2020-06-05T17:42:56 | 2020-06-05T17:42:56 | 269,166,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 117 | py | # -*- coding: utf-8 -*-
class SteamPipeline(object):
def process_item(self, item, spider):
return item
| [
"[email protected]"
] | |
531a249844fca891544a673ad19ecb26f8145614 | 9cb43a47faef0d3f5c7a6986cb2b21a0a89b6972 | /file-operations-02/clonefootage.py | 5e3b060fc83ff787e47778f2f1c91a95909bb04b | [] | no_license | fsiddi/generic-tools | 3e0f2c7ecaf469dcb8f173e191cd7d891fff8bc6 | 432463ec468a695551d7093c4851d5248f1d7764 | refs/heads/master | 2021-03-12T19:56:53.373367 | 2013-01-13T14:17:15 | 2013-01-13T14:17:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,357 | py | import subprocess
import os
import shutil
FOLDER_SRC = "/Users/fsiddi/Desktop/clonefootage/footage_src"
FOLDER_DST = "/Users/fsiddi/Desktop/clonefootage/footage_dst"
for dirname, dirnames, filenames in os.walk(FOLDER_SRC):
for filename in filenames:
if "linear_hd" in dirname:
filename_src = os.path.join(dirname, filename)
dirname_dst = dirname.replace(FOLDER_SRC, FOLDER_DST)
''''if filename.endswith(".png"):
if not os.path.exists(dirname_dst):
os.makedirs(dirname_dst)
filename_jpg = filename.replace(".png", ".jpg")
filename_dst = os.path.join(dirname_dst, filename_jpg)
print filename_src + " >> " + filename_dst
elif filename.endswith(".jpg"):
if not os.path.exists(dirname_dst):
os.makedirs(dirname_dst)
filename_dst = os.path.join(dirname_dst, filename)
print filename_src + " >> " + filename_dst'''
if filename.endswith(".exr"):
if not os.path.exists(dirname_dst):
#pass
os.makedirs(dirname_dst)
filename_dst = os.path.join(dirname_dst, filename)
if not os.path.exists(filename_dst):
print filename_src + " >> " + filename_dst
shutil.copy(filename_src, filename_dst)
else:
print "skipping " + filename_src
else:
pass
#subprocess.call(["convert", filename_src, "-resize", "1280x1280", filename_dst])
else:
print "skipping " + dirname | [
"[email protected]"
] | |
a2ac5030018622f024f8ca2435442ccd4f597f21 | 8ffc07a5240be5e6bb3106b20e11aee38cb8808a | /syloga/utils/symbols.py | aaf3a96c6a1d1b704aa3654833e56b44a3d8a098 | [
"MIT"
] | permissive | xaedes/python-symbolic-logic-to-gate | 315a242d2000123bf09ea15a439dc6437ea457cb | a0dc9be9e04290008cf709fac789d224ab8c14b0 | refs/heads/main | 2023-09-02T15:37:53.750722 | 2021-11-08T12:51:51 | 2021-11-08T12:51:51 | 425,550,839 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py |
from syloga.ast.core import Symbol
from syloga.ast.containers import Tuple
def symbols(string):
return Tuple(*map(Symbol,string.split(" ")))
| [
"[email protected]"
] | |
fc6b16aacd2c5eb9924c98c120e85092a8d4ec26 | 233b2958c853dc57dfa5d54caddbc1520dcc35c8 | /ava/cmds/pod.py | 51fe03691017ccf91af0b199173ecdc252918607 | [] | no_license | eavatar/ava.node | 6295ac6ed5059ebcb6ce58ef6e75adf1bfa24ed7 | 71e3304d038634ef13f44d245c3838d276a275e6 | refs/heads/master | 2021-01-19T06:13:01.127585 | 2015-06-03T03:10:59 | 2015-06-03T03:10:59 | 33,645,210 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,159 | py | # -*- coding: utf-8 -*-
"""
Command for managing local pod directory.
"""
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import os
import shutil
import click
from ava.runtime import environ
from .cli import cli
@cli.group()
def pod():
""" Pod management.
"""
pass
@pod.command()
@click.argument("folder", type=click.Path(exists=False))
def init(folder):
"""
Constructs the skeleton of directories if it not there already.
:return:
"""
if os.path.exists(folder):
click.echo("Folder %s is not empty!" % folder, err=True)
return
os.makedirs(folder)
src_dir = environ.pod_dir()
# copy files from base_dir to user_dir
subdirs = os.listdir(src_dir)
for d in subdirs:
src_path = os.path.join(src_dir, d)
dst_path = os.path.join(folder, d)
if os.path.isdir(src_path):
shutil.copytree(src_path, dst_path)
else:
shutil.copy2(src_path, dst_path)
@pod.command()
def open():
""" Open Pod folder in a file explorer or the like.
"""
click.launch(environ.pod_dir()) | [
"[email protected]"
] | |
64765314abb3f1daa11e18f0f3f06465242daf37 | ed0ead8389adb7cd81ade57f972afea7de896ffc | /haffa/Badge.py | 14d280ec8b20140e125365d399cdae00e046556e | [] | no_license | rblack42/haffa | 4e55664e1274b9ceec25fdfbc4603a03592229c0 | 526814e4024c1289cb2a79115d173772c82b5a88 | refs/heads/master | 2022-11-18T14:00:52.443335 | 2020-07-12T13:28:55 | 2020-07-12T13:28:55 | 273,238,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,706 | py | from svgelements import *
from Shapes import paths, boxes
from svgelements import Path
SVGDIR = "svg"
class Badge(object):
def __init__(self, height):
""" load shape and position data"""
print("Generating SVG file for height %d" % height)
self.paths = paths
self.boxes = boxes
def normalize(self, path):
""" normalize path to height of 1000"""
box = path.bbox()
x1, y1, x2, y2 = box
dx = x2 - x1
dy = y2 - y1
scale = 1000 / dy
t = "translate(%s,%s)" % (-x1, -y1)
s = "scale(%s)" % scale
tp = path * t
sp = tp * s
return sp
def transform(self, shape, x, y, w, h):
print("transforming", x, y, w, h)
bbox = shape.bbox()
print(bbox)
x1, y1, x2, y2 = bbox
bdx = x2 - x1
bdy = y2 - y1
scalex = w/bdx
scaley = h/bdy
print(bdx, bdy)
s = 'scale(%s,%s)' % (scalex, scaley)
t = 'translate(%s,%s)' % (x, y)
print(s, t)
sc = shape * s
tc = sc * t
return tc
def gen_raw_svg(self):
"""generate standard view of shapes"""
for s in self.paths:
shape_path = paths[s]
sp = Path(shape_path)
sp = self.normalize(sp)
bb = sp.bbox()
x1, y1, x2, y2 = bb
dx = x2 - x1
dy = y2 - y1
sp = self.transform(sp, 20, 20, dx*0.6, dy*0.6)
d = sp.d()
db = "M 20,20 h %s v %s H 20 V 20 Z" % (dx*0.6, dy*0.6)
svg = """<svg width="%d" height="%d"
xmlns="http://www.w3.org/2000/svg" >""" % (dx, dy)
svg += """ <path style="fill:none"
stroke="black" stroke-width="3" d="%s" />""" % db
svg += """
<path style="fill:none" stroke="red" stroke-width="3" d="%s" />""" % d
svg += """"
</svg>"""
fname = "%s/raw_%s.svg" % (SVGDIR, s)
with open(fname, "w") as fout:
fout.write(svg)
def gen_placement(self):
cx1, cy1, cx2, cy2 = self.boxes["canvas"]
width = cx2 - cx1 + 10
height = cy2 - cy1 + 10
svg = """<svg width="%d" height="%d"
xmlns="http://www.w3.org/2000/svg"
>""" % (width, height)
for b in self.boxes:
if b == "canvas":
continue
shape = b
if len(b) == 2:
shape = shape[0]
print("placing ", b, " with shape: ", shape)
path = self.paths[shape]
x1, y1, x2, y2 = self.boxes[b]
w = x2 - x1
h = y2 - y1
print(x1, y1, x2, y2, w, h)
sp = Path(path)
sp = self.normalize(sp)
sp = self.transform(sp, x1, y1, w, h)
print("shape box:", sp.bbox())
d = sp.d()
svg += """
<rect x="%d" y="%d"
width="%d" height="%d"
stroke="black" stroke-width="2"
fill="none" />""" % (x1, y1, w, h)
svg += """
<path style="fill:none" stroke="red" stroke-width="3" d="%s" />""" % d
svg += "</svg>"
with open("svg/layout.svg", "w") as fout:
fout.write(svg)
def get_logo_placement(self, size):
"""calculate scale and x,y to fit in circle of radius=size"""
x1, y1, x2, y2 = boxes["canvas"]
width = x2 - x1
height = y2 - y1
ar = width / height
if __name__ == "__main__":
l = Logo(1000)
heart = paths["heart"]
bbox = boxes["heart"]
print(heart)
p = Path(heart)
print(p.d())
print(p.bbox())
x1, y1, x2, y2 = bbox
print(x1, y1, x2, y2)
l.gen_raw_svg()
l.gen_placement()
| [
"[email protected]"
] | |
acb8348fecf068802da63c27c4edb3dfd4a38d12 | 95495baeb47fd40b9a7ecb372b79d3847aa7a139 | /test/test_i_ospfv3_neighbor_configuration.py | 63449143673653979dfb3179399a45495a40dc62 | [] | no_license | pt1988/fmc-api | b1d8ff110e12c13aa94d737f3fae9174578b019c | 075f229585fcf9bd9486600200ff9efea5371912 | refs/heads/main | 2023-01-07T09:22:07.685524 | 2020-10-30T03:21:24 | 2020-10-30T03:21:24 | 308,226,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,308 | py | # coding: utf-8
"""
Cisco Firepower Management Center Open API Specification
**Specifies the REST URLs and methods supported in the Cisco Firepower Management Center API. Refer to the version specific [REST API Quick Start Guide](https://www.cisco.com/c/en/us/support/security/defense-center/products-programming-reference-guides-list.html) for additional information.** # noqa: E501
OpenAPI spec version: 1.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.i_ospfv3_neighbor_configuration import IOspfv3NeighborConfiguration # noqa: E501
from swagger_client.rest import ApiException
class TestIOspfv3NeighborConfiguration(unittest.TestCase):
"""IOspfv3NeighborConfiguration unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testIOspfv3NeighborConfiguration(self):
"""Test IOspfv3NeighborConfiguration"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.i_ospfv3_neighbor_configuration.IOspfv3NeighborConfiguration() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
f7e0ca4f8faa4f0f72846a1c2ba9af5e5b7bac40 | 2f36dc886195b67fd6fe0de48984e0268b3e0d71 | /invoke_retry/code/src/zato/invoke_retry/__init__.py | 762926718385ed9c1c066b15d87f47485bebbe93 | [] | no_license | aek/zato-labs | 84d52b3d1eea515c91fa6e7d4a439af3ee35ca05 | 302e40b8825f8fba5e3ea065280af742904fb25b | refs/heads/master | 2020-12-26T08:17:38.664195 | 2014-02-16T21:30:27 | 2014-02-16T21:30:27 | 16,894,545 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,393 | py | # -*- coding: utf-8 -*-
"""
Copyright (C) 2013 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
from traceback import format_exc
# anyjson
from anyjson import dumps, loads
# bunch
from bunch import Bunch
# gevent
from gevent import sleep, spawn, spawn_later
# Zato
from zato.common import ZatoException
from zato.server.service import Service
def _retry_failed_msg(so_far, retry_repeats, service_name, retry_seconds, orig_cid, e):
return '({}/{}) Retry failed for:[{}], retry_seconds:[{}], orig_cid:[{}], e:[{}]'.format(
so_far, retry_repeats, service_name, retry_seconds, orig_cid, format_exc(e))
def _retry_limit_reached_msg(retry_repeats, service_name, retry_seconds, orig_cid):
return '({}/{}) Retry limit reached for:[{}], retry_seconds:[{}], orig_cid:[{}]'.format(
retry_repeats, retry_repeats, service_name, retry_seconds, orig_cid)
class NeedsRetry(ZatoException):
def __init__(self, cid, inner_exc):
self.cid = cid
self.inner_exc = inner_exc
def __repr__(self):
return '<{} at {} cid:[{}] inner_exc:[{}]>'.format(
self.__class__.__name__, hex(id(self)), self.cid, format_exc(self.inner_exc) if self.inner_exc else None)
class RetryFailed(ZatoException):
def __init__(self, remaining, inner_exc):
self.remaining = remaining
self.inner_exc = inner_exc
def __repr__(self):
return '<{} at {} remaining:[{}] inner_exc:[{}]>'.format(
self.__class__.__name__, hex(id(self)), self.remaining, format_exc(self.inner_exc) if self.inner_exc else None)
class _InvokeRetry(Service):
name = 'zato.labs._invoke-retry'
def _retry(self, remaining):
try:
response = self.invoke(self.req_bunch.target, *self.req_bunch.args, **self.req_bunch.kwargs)
except Exception, e:
msg = _retry_failed_msg(
(self.req_bunch.retry_repeats-remaining)+1, self.req_bunch.retry_repeats,
self.req_bunch.target, self.req_bunch.retry_seconds, self.req_bunch.orig_cid, e)
self.logger.info(msg)
raise RetryFailed(remaining-1, e)
else:
return response
def _notify_callback(self, is_ok):
callback_request = {
'ok': is_ok,
'orig_cid': self.req_bunch.orig_cid,
'target': self.req_bunch.target,
'retry_seconds': self.req_bunch.retry_seconds,
'retry_repeats': self.req_bunch.retry_repeats,
'context': self.req_bunch.callback_context
}
self.invoke_async(self.req_bunch.callback, dumps(callback_request))
def _on_retry_finished(self, g):
""" A callback method invoked when a retry finishes. Will decide whether it should be
attempted to retry the invocation again or give up notifying the uses via callback
service if retry limit is reached.
"""
# Was there any exception caught when retrying?
e = g.exception
if e:
# Can we retry again?
if e.remaining:
g = spawn_later(self.req_bunch.retry_seconds, self._retry, e.remaining)
g.link(self._on_retry_finished)
# Reached the limit, warn users in logs, notify callback service and give up.
else:
msg = _retry_limit_reached_msg(self.req_bunch.retry_repeats,
self.req_bunch.target, self.req_bunch.retry_seconds, self.req_bunch.orig_cid)
self.logger.warn(msg)
self._notify_callback(False)
# Let the callback know it's all good
else:
self._notify_callback(True)
def handle(self):
# Convert to bunch so it's easier to read everything
self.req_bunch = Bunch(loads(self.request.payload))
# Initial retry linked to a retry callback
g = spawn(self._retry, self.req_bunch.retry_repeats)
g.link(self._on_retry_finished)
class InvokeRetry(Service):
""" Provides invoke_retry service that lets one invoke service with parametrized
retries.
"""
name = 'zato.labs.invoke-retry'
def _get_retry_settings(self, target, **kwargs):
async_fallback = kwargs.get('async_fallback')
callback = kwargs.get('callback')
callback_context = kwargs.get('callback_context')
retry_repeats = kwargs.get('retry_repeats')
retry_seconds = kwargs.get('retry_seconds')
retry_minutes = kwargs.get('retry_minutes')
if async_fallback:
items = ('callback', 'retry_repeats')
for item in items:
value = kwargs.get(item)
if not value:
msg = 'Could not invoke [{}], {}:[{}] was not given'.format(target, item, value)
self.logger.error(msg)
raise ValueError(msg)
if retry_seconds and retry_minutes:
msg = 'Could not invoke [{}], only one of retry_seconds:[{}] and retry_minutes:[{}] can be given'.format(
target, retry_seconds, retry_minutes)
self.logger.error(msg)
raise ValueError(msg)
if not(retry_seconds or retry_minutes):
msg = 'Could not invoke [{}], exactly one of retry_seconds:[{}] or retry_minutes:[{}] must be given'.format(
target, retry_seconds, retry_minutes)
self.logger.error(msg)
raise ValueError(msg)
try:
self.server.service_store.name_to_impl_name[callback]
except KeyError, e:
msg = 'Service:[{}] does not exist, e:[{}]'.format(callback, format_exc(e))
self.logger.error(msg)
raise ValueError(msg)
# Get rid of arguments our superclass doesn't understand
for item in('async_fallback', 'callback', 'callback_context', 'retry_repeats', 'retry_seconds', 'retry_minutes'):
kwargs.pop(item, True)
# Note that internally we use seconds only.
return async_fallback, callback, callback_context, retry_repeats, retry_seconds or retry_minutes * 60, kwargs
def _invoke_async_retry(self, target, retry_repeats, retry_seconds, orig_cid, callback, callback_context, args, kwargs):
# Request to invoke the background service with ..
retry_request = {
'target': target,
'retry_repeats': retry_repeats,
'retry_seconds': retry_seconds,
'orig_cid': orig_cid,
'callback': callback,
'callback_context': callback_context,
'args': args,
'kwargs': kwargs
}
return self.invoke_async(_InvokeRetry.get_name(), dumps(retry_request))
def invoke_async_retry(self, target, *args, **kwargs):
async_fallback, callback, callback_context, retry_repeats, retry_seconds, kwargs = self._get_retry_settings(target, **kwargs)
return self._invoke_async_retry(target, retry_repeats, retry_seconds, self.cid, callback, callback_context, args, kwargs)
def invoke_retry(self, target, *args, **kwargs):
async_fallback, callback, callback_context, retry_repeats, retry_seconds, kwargs = self._get_retry_settings(target, **kwargs)
# Let's invoke the service and find out if it works, maybe we don't need
# to retry anything.
try:
result = self.invoke(target, *args, **kwargs)
except Exception, e:
msg = 'Could not invoke:[{}], cid:[{}], e:[{}]'.format(target, self.cid, format_exc(e))
self.logger.warn(msg)
# How we handle the exception depends on whether the caller wants us
# to block or prefers if we retry in background.
if async_fallback:
# .. invoke the background service and return CID to the caller.
cid = self._invoke_async_retry(target, retry_repeats, retry_seconds, self.cid, callback, callback_context, args, kwargs)
raise NeedsRetry(cid, e)
# We are to block while repeating
else:
# Repeat the given number of times sleeping for as many seconds as we are told
remaining = retry_repeats
result = None
while remaining > 0:
try:
result = self.invoke(target, *args, **kwargs)
except Exception, e:
msg = _retry_failed_msg((retry_repeats-remaining)+1, retry_repeats, target, retry_seconds, self.cid, e)
self.logger.info(msg)
sleep(retry_seconds)
remaining -= 1
# OK, give up now, there's nothing more we can do
if not result:
msg = _retry_limit_reached_msg(retry_repeats, target, retry_seconds, self.cid)
self.logger.warn(msg)
raise ZatoException(None, msg)
else:
# All good, simply return the response
return result
| [
"[email protected]"
] | |
0030fcf22dbc77c896437faa18ec33a89a2f6c56 | 29625c33dc9642d984d7cf68763d57a9de62743e | /Bot/cogs/snipe.py | cd84e9a5d14d06e388ec6620f7a1eaee678360d7 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | DevRKJha/EpicBot | 7fe508d828ad6cde087213cdd1fbb3a480529905 | 33eb8b7d5ee1120865da91b9a31dc559657c318c | refs/heads/main | 2023-06-07T14:12:26.975039 | 2021-07-04T09:51:53 | 2021-07-04T09:51:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,698 | py | import discord
from discord.ext import commands
class Snipe(commands.Cog):
def __init__(self, client):
self.client = client
self.client.sniped_messages = {}
self.client.edit_sniped_messages = {}
@commands.Cog.listener()
async def on_message_delete(self, message):
if message.author.bot:
return
self.client.sniped_messages[message.guild.id, message.channel.id] = (
message.content, message.author, message.channel.name,
message.created_at, message.attachments)
@commands.Cog.listener()
async def on_message_edit(self, before, after):
if before.author.bot:
return
self.client.edit_sniped_messages[before.guild.id, before.channel.id] = (
before.content,
after.content,
before.author,
before.channel.name
)
@commands.command(aliases=['s'])
async def snipe(self, ctx):
try:
contents, author, channel_name, time, attachments = self.client.sniped_messages[
ctx.guild.id, ctx.channel.id]
files = ""
for file in attachments:
files += f"[{file.filename}]({file.proxy_url})" + "\n"
embed = discord.Embed(
description=contents, color=0x00FFFF, timestamp=time)
embed.set_author(
name=f"{author.name}#{author.discriminator}",
icon_url=author.avatar_url)
embed.add_field(
name="Attachments",
value=files[:-1] if len(attachments) != 0 else "None"
)
embed.set_footer(text=f"Deleted in #{channel_name}")
await ctx.send(embed=embed)
except:
await ctx.send("No messages were deleted here.")
@commands.command(aliases = ['es'])
async def editsnipe(self, ctx):
try:
before_content, after_content, author, channel_name = self.client.edit_sniped_messages[ctx.guild.id, ctx.channel.id]
embed = discord.Embed(description = f"**Before:**\n{before_content}\n\n**After:**\n{after_content}", color=0x00FFFF)
embed.set_author(name=f"{author.name}#{author.discriminator}", icon_url=author.avatar_url)
embed.set_footer(text=f"Edited in #{channel_name}")
await ctx.send(embed=embed)
except:
await ctx.send("No messages were edited here.")
def setup(client):
client.add_cog(Snipe(client))
| [
"[email protected]"
] | |
bdc0e3f4bf90ef80c3e1cbf6474771ad81912cc5 | be1762141886e27e2e542324ffb4650546aee58d | /setup.py | 24ce1ea64205555595d2b03b54e784c4b012fea5 | [] | no_license | rgleason/pypilot | 71c2aad9a9894c84a1a9819078887ea041ff0e7b | 66eabcc63a11c96b84f58588c87b6ef710ed5826 | refs/heads/master | 2023-06-12T09:26:42.846470 | 2021-06-29T06:38:37 | 2021-06-29T06:39:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,879 | py | #!/usr/bin/env python
#
# Copyright (C) 2017 Sean D'Epagnier
#
# This Program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
import sys
import os, os.path
if sys.version_info[0] < 3:
print('pypilot requires python version 3. python version is', sys.version)
exit(1)
if not os.path.exists('deps'):
import dependencies
try:
from setuptools import setup, Extension
except ImportError:
from distutils.core import setup, Extension
linebuffer_module = Extension('pypilot/linebuffer/_linebuffer',
sources=['pypilot/linebuffer/linebuffer.cpp', 'pypilot/linebuffer/linebuffer.i'],
extra_compile_args=['-Wno-unused-result'],
swig_opts=['-c++']
)
arduino_servo_module = Extension('pypilot/arduino_servo/_arduino_servo',
sources=['pypilot/arduino_servo/arduino_servo.cpp', 'pypilot/arduino_servo/arduino_servo_eeprom.cpp', 'pypilot/arduino_servo/arduino_servo.i'],
extra_compile_args=['-Wno-unused-result'],
swig_opts=['-c++']
)
ugfx_defs = ['-DWIRINGPI']
try:
import RPi.GPIO
ugfx_libraries=['wiringPi']
except:
try:
import OPi.GPIO
ugfx_libraries=['wiringPi']
except:
print('no RPi.GPIO library for ugfx')
ugfx_libraries=[]
ugfx_defs = []
ugfx_module = Extension('pypilot/hat/ugfx/_ugfx',
sources=['hat/ugfx/ugfx.cpp',
'hat/ugfx/ugfx.i'],
extra_compile_args=['-Wno-unused-result'] + ugfx_defs,
libraries=ugfx_libraries,
swig_opts=['-c++'] + ugfx_defs
)
locale_files = []
for walk in os.walk('hat/locale'):
path, dirs, files = walk
path = path[len('hat/'):]
for file in files:
if file[len(file)-3:] == '.mo':
locale_files.append(os.path.join(path, file))
from pypilot import version
packages = ['pypilot', 'pypilot/pilots', 'pypilot/arduino_servo', 'ui', 'hat', 'web', 'pypilot/linebuffer', 'hat/ugfx']
try:
from setuptools import find_packages
packages = find_packages()
except:
pass
# ensure all packages are under pypilot
package_dirs = {}
for package in list(packages):
if not package.startswith('pypilot'):
packages.remove(package)
packages.append('pypilot.'+package)
package_dirs['pypilot.'+package] = package.replace('.', '/')
setup (name = 'pypilot',
version = version.strversion,
description = 'pypilot sailboat autopilot',
license = 'GPLv3',
author="Sean D'Epagnier",
url='http://pypilot.org/',
packages=packages,
package_dir=package_dirs,
ext_modules = [arduino_servo_module, linebuffer_module, ugfx_module],
package_data={'pypilot.hat': ['font.ttf', 'static/*', 'templates/*'] + locale_files,
'pypilot.ui': ['*.png', '*.mtl', '*.obj'],
'pypilot.web': ['static/*', 'templates/*']},
entry_points={
'console_scripts': [
'pypilot=pypilot.autopilot:main',
'pypilot_boatimu=pypilot.boatimu:main',
'pypilot_servo=pypilot.servo:main',
'pypilot_web=pypilot.web.web:main',
'pypilot_hat=pypilot.hat.hat:main',
'pypilot_control=pypilot.ui.autopilot_control:main',
'pypilot_calibration=pypilot.ui.autopilot_calibration:main',
'pypilot_client=pypilot.client:main',
'pypilot_scope=pypilot.ui.scope_wx:main',
'pypilot_client_wx=pypilot.ui.client_wx:main'
]
}
)
| [
"[email protected]"
] | |
73bc81c737025f384a2d55f27dbb83a3292b5dc9 | ba9387ad04a79e5e89204b2f292d01323c7198ad | /backend/chat_user_profile/migrations/0001_initial.py | 89ad1019538532877afb9c0211c55783492d29f1 | [] | no_license | crowdbotics-apps/message52-19836 | b9ec7d032a1548ba71153c443486a3b7c38da5f9 | fde8c1c0b0de9e939e156e29bb2cb4dce1607cad | refs/heads/master | 2022-12-04T23:48:19.002965 | 2020-08-28T02:37:50 | 2020-08-28T02:37:50 | 290,932,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,659 | py | # Generated by Django 2.2.15 on 2020-08-28 02:37
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mobile_number', models.CharField(max_length=20)),
('pin', models.CharField(max_length=100)),
('photo', models.URLField()),
('status', models.CharField(max_length=50)),
('birthdate', models.DateField()),
('gender', models.CharField(max_length=1)),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('last_login', models.DateTimeField()),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='VerificationCode',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=255)),
('is_verified', models.BooleanField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('timestamp_verified', models.DateTimeField()),
('sent_to', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='verificationcode_sent_to', to='chat_user_profile.Profile')),
],
),
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_blocked', models.BooleanField()),
('is_favorite', models.BooleanField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('added_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='contact_added_by', to=settings.AUTH_USER_MODEL)),
('added_profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='contact_added_profile', to='chat_user_profile.Profile')),
],
),
]
| [
"[email protected]"
] | |
016c75557647665c5a3773b8cf354ade5c11502f | 941c912f44beff33a072e086c1f561f6cdd64626 | /LeetCode/codes/22.py | 84f0db4955118accd480b9d684a7ae03a363e1dc | [] | no_license | adreena/MyStudyCorner | 3a13a743769ed144965b767f547c16df4d0fa0dd | 355c0dbd32ad201800901f1bcc110550696bc96d | refs/heads/master | 2023-02-20T07:39:32.391421 | 2021-01-25T01:46:21 | 2021-01-25T01:46:21 | 255,104,133 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | # time catalan numbers (2n n)*1/n
# space: catalan numbers
class Solution:
def generateParenthesis(self, n: int) -> List[str]:
self.outputs = []
def helper(n_left, n_right, output):
if n_left == 0 and n_right == 0:
self.outputs.append(output)
else:
if n_left>0:
helper(n_left-1, n_right, output+'(')
if n_right>n_left:
helper(n_left, n_right-1, output+')')
helper(n,n,'')
return self.outputs | [
"[email protected]"
] | |
c114e9e4c5fbe43f5efbc36d8ddc04c35dd32490 | af82475dc7eb45c478414372c222e7b6016359d4 | /python书籍/Python For Finance Code/Code of Python For Finance/4375OS_08_Code/4375OS_08_12_Series.py | f279f6cc3587504d87af31fda1b21a119cea0200 | [] | no_license | enfangzhong/PythonBaseCode | 8f58c8b817eb9f4b0f0a5be437a52d5b5fab3433 | 9ab4a578b2692fdbb6aeeacb310251d51f72e953 | refs/heads/master | 2020-05-17T16:26:02.598344 | 2019-04-27T20:49:40 | 2019-04-27T20:49:40 | 183,817,172 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 365 | py | """
Name : 4375OS_08_12_Series.py
Book : Python for Finance
Publisher: Packt Publishing Ltd.
Author : Yuxing Yan
Date : 12/26/2013
email : [email protected]
[email protected]
"""
import pandas as pd
x = pd.date_range('1/1/2013', periods=252)
data = pd.Series(randn(len(x)), index=x)
print data.head()
print data.tail()
| [
"[email protected]"
] | |
5f78d5f22130ef95b5451dbb67e83853d93a80b0 | a6566ebc69ed5e7a17e2091bdb10e7b6523eefc9 | /py/notifier/config.py | 49ae8e99337c9f08bbbe648c1ba901d19f2924d8 | [
"MIT"
] | permissive | mabotech/mabo.task | 916e71650b45a24bb3852206a3755a7fd0342e47 | 96752a5ae94349a46e3b6f9369cc0933d5e37be0 | refs/heads/master | 2020-06-05T13:05:02.768838 | 2015-11-29T08:18:10 | 2015-11-29T08:18:10 | 23,750,849 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py |
import toml
from singleton import Singleton
class Config(object):
__metacass__ = Singleton
def __init__(self):
conf_fn = "conf.toml"
with open(conf_fn) as conf_fh:
toml_str = conf_fh.read()
self.conf = toml.loads(toml_str)
def get_conf(self):
return self.conf | [
"[email protected]"
] | |
a08866fdeb02d1584aca3775017ebe2a118292d7 | 04252676935223f4d03eff0393ba921cb00be1e5 | /api/__init__.py | 59d9087892b08ac0854226a2346b05ea5f5dff99 | [] | no_license | kongp3/cross4u | 4083aec3e5fe9de1d351d25609cbdf996df6abe3 | 8bd4dcfe8ae8fee5a3f169428b138b1294633da0 | refs/heads/master | 2020-11-29T23:11:17.838675 | 2019-12-30T06:56:20 | 2019-12-30T06:56:20 | 230,235,707 | 0 | 0 | null | 2019-12-26T09:33:09 | 2019-12-26T09:31:52 | Python | UTF-8 | Python | false | false | 1,419 | py | # -*- coding: utf-8 -*-
import traceback
from flask import jsonify
from functools import wraps
class RestResponse(object):
""" 标准的接口Response类, 所有的api必须返回这个类的对象, 以便统一处理返回 """
def __init__(self,):
pass
def fail(self, code=500, message="Server Got A Exception"):
d = {'meta': {
'success': False, 'status_code': code,
'message': message
}}
json_response = jsonify(d, )
return json_response
def success(self, code=200, data=None):
d = {'meta': {
'success': True, 'status_code': code,
'message': "Requset Successes"
}, 'data': data}
json_response = jsonify(d)
return json_response
def error_handler(f):
"""
统一处理异常和返回错误信息, 增加了未知的耦合
就目前来看还是没问题的
:param f:
:return:
"""
@wraps(f)
def decorated_function(*args, **kwargs):
response = RestResponse()
try:
result = f(response=response, *args, **kwargs)
return result
except ValueError as e:
traceback.print_exc(limit=5)
return response.fail(400, e.message)
except Exception as e:
traceback.print_exc(limit=5)
return response.fail(500, message=e.message)
return decorated_function
| [
"kongp3@outlook"
] | kongp3@outlook |
f63660d7a58a51a6d96d21c74bf21e35e3469584 | 6bfda75657070e177fa620a43c917096cbd3c550 | /kubernetes/test/test_v1_job_status.py | f929582f971cf175bfa94a502d44fde0352fba4f | [
"Apache-2.0"
] | permissive | don41382/client-python | 8e7e747a62f9f4fc0402eea1a877eab1bb80ab36 | e69d4fe204b98f7d7ee3ada3996b4f5fbceae5fe | refs/heads/master | 2021-01-19T23:15:50.172933 | 2017-04-18T18:00:48 | 2017-04-18T18:00:48 | 88,943,866 | 0 | 0 | null | 2017-04-21T05:19:52 | 2017-04-21T05:19:52 | null | UTF-8 | Python | false | false | 827 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_job_status import V1JobStatus
class TestV1JobStatus(unittest.TestCase):
""" V1JobStatus unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1JobStatus(self):
"""
Test V1JobStatus
"""
model = kubernetes.client.models.v1_job_status.V1JobStatus()
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
4e2a5aac3f2d8596ac600f55307f6e113b1f375b | 71da259f71428648d4285b1b4863ec2b7641e58c | /ecom/website/filters.py | 561533e588086d70c2830bed74278b2c4dbe38da | [] | no_license | rafimuhammad01/e-com | 8a58d0ccff27516da260b41c180c703fa22e76b3 | 75d451bfc10075090d88d5a16dbd03f626ff72ef | refs/heads/master | 2023-02-18T08:48:26.144483 | 2021-01-15T08:56:54 | 2021-01-15T08:56:54 | 291,625,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | import django_filters
from .models import Product
class ProductFilter(django_filters.FilterSet) :
class Meta:
model = Product
fields = {
'price' : ['lt', 'gt'],
'review__rate' : ['iexact']
}
class ProductSearch(django_filters.FilterSet) :
class Meta:
model = Product
fields = {
'name' : ['icontains'],
} | [
"[email protected]"
] | |
6d5a2f9b2ddc3bfe891a3c7d27a364e5c4cb78eb | cdbb11473dc8d34767a5916f9f85cb68eb2ca3f2 | /sde/migrations/0036_auto_20180729_1518.py | a313a94deec723d4c8b6e96c080fc6923b8eec30 | [] | no_license | skyride/evestats | fb2a1a248952771731dcfecadab7d02b1f08cd4b | 4bd2153f65c084b478272513733dcc78f9a0ef98 | refs/heads/master | 2020-03-23T13:50:19.216870 | 2018-08-05T19:19:47 | 2018-08-05T19:19:47 | 141,640,834 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 587 | py | # Generated by Django 2.0.7 on 2018-07-29 15:18
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('sde', '0035_auto_20180729_1456'),
]
operations = [
migrations.RemoveField(
model_name='attributetype',
name='unit_id',
),
migrations.AddField(
model_name='attributetype',
name='unit',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='sde.Unit'),
),
]
| [
"[email protected]"
] | |
693b5d23fcf762329e9caaaf81cb5e80096e70d6 | f26d67e3e9f8b90e5d6243279a1c2ce87fa41d46 | /src/prodstats/cq/tasks.py | f48b077503fffa1d9e9d88a31617f973023e977b | [
"MIT"
] | permissive | OCB-DS/prodstats | cf554e3abee651463e9f81606d4b633f464658a7 | 4ff5a6e0b0d6152af2d7e1f3844ede2d33ad4824 | refs/heads/master | 2022-11-25T15:30:06.988683 | 2020-08-02T16:08:05 | 2020-08-02T16:08:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,803 | py | import asyncio
import itertools
from datetime import timedelta
from typing import Coroutine, Dict, List, Union
import pandas as pd
from celery.utils.log import get_task_logger
from celery.utils.time import humanize_seconds
import calc.prod # noqa
import config as conf
import cq.signals # noqa
import cq.util
import db.models
import ext.metrics as metrics
import util
from collector import IHSClient
from const import HoleDirection, IHSPath, Provider
from cq.worker import celery_app
from executors import BaseExecutor, GeomExecutor, ProdExecutor, WellExecutor # noqa
logger = get_task_logger(__name__)
RETRY_BASE_DELAY = 15
# TODO: add retries
# TODO: tenacity?
# TODO: asynchronously fracture failed batches
# TODO: circuit breakers?
# TODO: add task meta
@celery_app.task
def log():
"""Print some log messages"""
logger.warning("task-check")
@celery_app.task
def smoke_test():
""" Verify an arbitrary Celery task can run """
return "verified"
def run_executors(
hole_dir: HoleDirection,
api14s: List[str] = None,
api10s: List[str] = None,
executors: List[BaseExecutor] = None,
batch_size: int = None,
log_vs: float = None,
log_hs: float = None,
):
executors = executors or [WellExecutor, GeomExecutor, ProdExecutor]
batch_size = batch_size or conf.TASK_BATCH_SIZE
if api14s is not None:
id_name = "api14s"
ids = api14s
elif api10s is not None:
id_name = "api10s"
ids = api10s
else:
raise ValueError("One of [api14s, api10s] must be specified")
# TODO: move chunking to run_executor?
for idx, chunk in enumerate(util.chunks(ids, n=batch_size)):
for executor in executors:
kwargs = {
"hole_dir": hole_dir,
"executor_name": executor.__name__,
id_name: chunk,
}
countdown = cq.util.spread_countdown(idx, vs=log_vs, hs=log_hs)
logger.info(
f"({executor.__name__}[{hole_dir.value}]) submitting task: {id_name}={len(chunk)} countdown={countdown}" # noqa
)
run_executor.apply_async(
args=[],
kwargs=kwargs,
countdown=countdown,
ignore_result=False,
routing_key=hole_dir,
)
@celery_app.task(is_eager=True)
def post_heartbeat():
""" Send heartbeat to metrics backend"""
return metrics.post_heartbeat()
@celery_app.task
def run_executor(hole_dir: HoleDirection, executor_name: str, **kwargs):
# logger.warning(f"running {executor_name=} {hole_dir=} {kwargs=}")
executor = globals()[executor_name]
count, dataset = executor(hole_dir).run(**kwargs)
@celery_app.task
def run_next_available(
hole_dir: Union[HoleDirection, str], force: bool = False, **kwargs
):
""" Run next available area """
# TODO: set task meta
hole_dir = HoleDirection(hole_dir)
async def coro():
# await db.startup()
# hole_dir = HoleDirection.H
# TODO: move to Router
if hole_dir == HoleDirection.H:
ids_path = IHSPath.well_h_ids
else:
ids_path = IHSPath.well_v_ids
area_obj, attr, is_ready, cooldown_hours = await db.models.Area.next_available(
hole_dir
)
utcnow = util.utcnow()
prev_run = getattr(area_obj, attr)
if is_ready or force:
api14s: List[str] = await IHSClient.get_ids_by_area(
path=ids_path, area=area_obj.area
) # pull from IDMaster once implmented
# api14s = api14s[:10]
run_executors(hole_dir=hole_dir, api14s=api14s, **kwargs)
await area_obj.update(**{attr: utcnow}).apply()
prev_run = (
prev_run.strftime(util.dt.formats.no_seconds) if prev_run else None
)
utcnow = utcnow.strftime(util.dt.formats.no_seconds)
print(
f"({db.models.Area.__name__}[{hole_dir}]) updated {area_obj.area}.{attr}: {prev_run} -> {utcnow}" # noqa
)
else:
next_run_in_seconds = (
(prev_run + timedelta(hours=cooldown_hours)) - utcnow
).total_seconds()
print(
f"({db.models.Area.__name__}[{hole_dir}]) Skipping {area_obj.area} next available for run in {humanize_seconds(next_run_in_seconds)}" # noqa
) # noqa
return util.aio.async_to_sync(coro())
@celery_app.task()
def sync_area_manifest(): # FIXME: change to use Counties endpoint (and add Counties endpoint to IHS service :/) # noqa
""" Ensure the local list of areas is up to date """
loop = asyncio.get_event_loop()
async def wrapper(path: IHSPath, hole_dir: HoleDirection) -> List[Dict]:
records: List[Dict] = []
areas = await IHSClient.get_areas(path=path, name_only=False)
records = [
{"area": area["name"], "providers": [Provider.IHS]} for area in areas
]
return records
coros: List[Coroutine] = []
for args in [
(IHSPath.well_h_ids, HoleDirection.H),
(IHSPath.well_v_ids, HoleDirection.V),
(IHSPath.prod_h_ids, HoleDirection.H),
(IHSPath.prod_v_ids, HoleDirection.V),
]:
coros.append(wrapper(*args))
results = loop.run_until_complete(asyncio.gather(*coros))
inbound_df = pd.DataFrame(list(itertools.chain(*results))).set_index("area")
inbound_areas = inbound_df.groupby(level=0).first().sort_index()
existing_areas = util.aio.async_to_sync(db.models.Area.df()).sort_index()
# get unique area names that dont already exist
for_insert = inbound_areas[~inbound_areas.isin(existing_areas)].dropna()
# for_insert["h_last_run_at"] = util.utcnow()
if for_insert.shape[0] > 0:
coro = db.models.Area.bulk_upsert(
for_insert,
update_on_conflict=True,
reset_index=True,
conflict_constraint=db.models.Area.constraints["uq_areas_area"],
)
affected = loop.run_until_complete(coro)
logger.info(
f"({db.models.Area.__name__}) synchronized manifest: added {affected} areas"
)
else:
logger.info(f"({db.models.Area.__name__}) synchronized manifest: no updates")
@celery_app.task
def sync_known_entities(hole_dir: HoleDirection):
hole_dir = HoleDirection(hole_dir)
if hole_dir == HoleDirection.H:
path = IHSPath.well_h_ids
else:
path = IHSPath.well_v_ids
areas: List[Dict] = util.aio.async_to_sync(IHSClient.get_areas(path=path))
for idx, area in enumerate(areas):
sync_known_entities_for_area.apply_async(
args=(hole_dir, area), kwargs={}, countdown=idx + 30
)
@celery_app.task
def sync_known_entities_for_area(hole_dir: HoleDirection, area: str):
async def wrapper(hole_dir: HoleDirection, area: str):
hole_dir = HoleDirection(hole_dir)
index_cols = ["entity_id", "entity_type"]
if hole_dir == HoleDirection.H:
path = IHSPath.well_h_ids
else:
path = IHSPath.well_v_ids
# fetch ids from remote service
ids = await IHSClient.get_ids_by_area(path, area=area)
df = pd.Series(ids, name="entity_id").to_frame()
df["ihs_last_seen_at"] = util.utcnow()
df["entity_type"] = "api14"
df = df.set_index(index_cols)
# query matching records existing in the known_entities model
objs: List[db.models.KnownEntity] = await db.models.KnownEntity.query.where(
db.models.KnownEntity.entity_id.in_(ids)
).gino.all()
obj_df = pd.DataFrame([x.to_dict() for x in objs]).set_index(index_cols)
fresh = pd.DataFrame(index=obj_df.index, columns=obj_df.columns)
# merge the records, prioritizing new values from the remote service
combined = fresh.combine_first(df).combine_first(obj_df)
combined = combined.drop(columns=["created_at", "updated_at"])
# persist the new records
await db.models.KnownEntity.bulk_upsert(combined, batch_size=1000)
util.aio.async_to_sync(wrapper(hole_dir, area))
@celery_app.task
def run_for_apilist(
hole_dir: HoleDirection,
api14s: List[str] = None,
api10s: List[str] = None,
**kwargs,
):
run_executors(HoleDirection.H, api14s=api14s, api10s=api10s, **kwargs)
@celery_app.task
def run_driftwood(hole_dir: HoleDirection, **kwargs):
hole_dir = HoleDirection(hole_dir)
executors = [WellExecutor, GeomExecutor, ProdExecutor]
if hole_dir == HoleDirection.H:
api14s = [
"42461409160000",
"42383406370000",
"42461412100000",
"42461412090000",
"42461411750000",
"42461411740000",
"42461411730000",
"42461411720000",
"42461411600000",
"42461411280000",
"42461411270000",
"42461411260000",
"42383406650000",
"42383406640000",
"42383406400000",
"42383406390000",
"42383406380000",
"42461412110000",
"42383402790000",
]
elif hole_dir == HoleDirection.V:
api14s = [
"42461326620001",
"42461326620000",
"42461328130000",
"42461343960001",
"42461352410000",
"42383362120000",
"42383362080000",
"42383362090000",
"42383374140000",
"42383374130000",
"42383362060000",
]
else:
raise ValueError(f"Invalid hole direction: {hole_dir=}")
run_executors(hole_dir, api14s=api14s, executors=executors, **kwargs)
if __name__ == "__main__":
from db import db
util.aio.async_to_sync(db.startup())
import db.models
import loggers
import cq.tasks
from const import HoleDirection
loggers.config()
hole_dir = HoleDirection.H
# cq.tasks.sync_area_manifest.apply_async()
# cq.tasks.run_next_available(HoleDirection.H, log_vs=10, log_hs=None)
api14s = [
"42475014800000",
"42475014810000",
"42475014810001",
"42475014820000",
"42475014820001",
"42475014830000",
"42475014840000",
"42475014850000",
"42475014860000",
"42475014860001",
"42475014860002",
"42475014870000",
"42475014870001",
"42475014880000",
"42475014880001",
"42475014890000",
"42475014890001",
"42475014900000",
"42475014900001",
"42475014900002",
"42475014910000",
"42475014910001",
"42475014920000",
"42475014920001",
"42475014920002",
]
holedir = HoleDirection.V
async def run_wells(holedir: HoleDirection, api14s: List[str]):
wexec = WellExecutor(holedir)
wellset = await wexec.download(api14s=api14s)
wellset = await wexec.process(wellset)
await wexec.persist(wellset)
async def run_geoms(holedir: HoleDirection, api14s: List[str]):
gexec = GeomExecutor(holedir)
geomset = await gexec.download(api14s=api14s)
geomset = await gexec.process(geomset)
await gexec.persist(geomset)
loggers.config(formatter="funcname")
async def run_production(holedir: HoleDirection, api14s: List[str]):
pexec = ProdExecutor(holedir)
prodset = await pexec.download(api14s=api14s)
prodset = await pexec.process(prodset)
await pexec.persist(prodset)
async def async_wrapper():
hole_dir = HoleDirection.H
IHSPath.well_h_ids
sync_known_entities_for_area(hole_dir, "tx-upton")
| [
"[email protected]"
] | |
045c865fc678eba2750f62646d81f6c24d5e15cb | 7e93b1c33045b4c03054f42b6a2b800279b12a9b | /core/cache/backends/redis/compressors/base.py | 8d9e74deabf56d11d14abf6ab944ca71c3f9526c | [
"MIT"
] | permissive | anthill-arch/framework | 6f8036980667843f2be1414850255cf6a10e2dcd | a6c238a62ae9c3fb319d12e77f7e9047aab75e8d | refs/heads/master | 2020-05-09T06:01:31.186830 | 2019-08-23T13:52:43 | 2019-08-23T13:52:43 | 180,988,916 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | class BaseCompressor(object):
def __init__(self, options):
self._options = options
def compress(self, value):
raise NotImplementedError
def decompress(self, value):
raise NotImplementedError
| [
"[email protected]"
] | |
f6c7e47f18fecf5204af653cae821b0dbc934729 | 3a60b8935f809e300405214a66d949f0042e7e46 | /src/map/tile.py | de575e0a0d16478ca90b3e9f8119073989688f77 | [] | no_license | stellarlib/centaurus | e71fe5c98b94e8e575d00e32f55ba39fe71799e6 | 896ae73165f3f44dfb87378ef2635d447ccbccae | refs/heads/master | 2020-08-29T00:02:47.294370 | 2020-07-06T20:06:02 | 2020-07-06T20:06:02 | 217,860,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,355 | py | from random import randint
from src.map.hex_map_properties import EdgeID
class Tile(object):
n = 15
GRASS, WOODS, WATER, ROCKS, SAND, CLAY, ROAD, WALL, EXIT,\
EXIT0, EXIT1, EXIT2, EXIT3, EXIT4, EXIT5 = range(n)
OPEN = {GRASS, ROAD}
IMPASSABLE = {WALL, ROCKS, WATER}
OBSTACLE = {WALL, ROCKS}
SLOWS_CHARGE = {WOODS, WATER, SAND, CLAY}
DEADLY = {WATER}
SHELTERED = {WOODS, WALL, ROCKS, WATER}
EXIT_TILES = {EXIT, EXIT0, EXIT1, EXIT2, EXIT3, EXIT4, EXIT5}
IMPASSABLE.update(EXIT_TILES)
OBSTACLE.update(EXIT_TILES)
SHELTERED.update(EXIT_TILES)
EDGE_ID_TO_EXIT = {
EdgeID.Ae: EXIT0,
EdgeID.Be: EXIT1,
EdgeID.Ce: EXIT2,
EdgeID.De: EXIT3,
EdgeID.Ee: EXIT4,
EdgeID.Fe: EXIT5,
}
@classmethod
def random_tile(cls):
return randint(0, cls.n-1)
@classmethod
def is_open(cls, t):
return t not in cls.OPEN
@classmethod
def is_passable(cls, t):
return t not in cls.IMPASSABLE
@classmethod
def is_targetable(cls, t):
return t not in cls.SHELTERED
@classmethod
def is_obstacle(cls, t):
return t in cls.OBSTACLE
@classmethod
def is_slowing(cls, t):
return t in cls.SLOWS_CHARGE
@classmethod
def is_deadly(cls, t):
return t in cls.DEADLY
| [
"[email protected]"
] | |
be1a846751a9b1a0055e8a81bbfe3ca6df2c437e | 173f6c30ec4e5e3eb99ecf493e9019828270f5d3 | /main.py | ac93f58ecffa8176285a3e5857c06808694f3832 | [
"MIT"
] | permissive | DrLuke/FEMM-bode | 14d1468d7f623a0fa874b9bb7d25b0feb9eb79f3 | 851397f914fbb114707ffa1202f9c3ac3a37bc37 | refs/heads/master | 2021-01-19T16:48:37.216579 | 2017-07-05T13:27:22 | 2017-07-05T13:27:22 | 88,288,695 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,234 | py | import subprocess
import matplotlib as mpl
import matplotlib
matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
import numpy as np
import re
import math, cmath
from scipy.interpolate import griddata
import os
from femm import FEMM, FEMMfem, FEMMans
from PyQt5.QtWidgets import QApplication, QMainWindow, QMenu, QAction, QFileDialog, QSizePolicy
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
import sys
import gui
import math
import appdirs
import json
from pathlib import Path
def customexcepthook(type, value, traceback):
print(traceback.print_exc())
raise(Exception())
sys.excepthook = customexcepthook
class FEMMCanvas(FigureCanvasQTAgg):
def __init__(self, fig):
fig = matplotlib.figure.Figure(figsize=(4, 4), dpi=100)
self.axes = fig.add_subplot(111)
super(FEMMCanvas, self).__init__(fig)
self.lastdrawnfreq = None
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
#self.updateGeometry()
self.draw_idle()
def updateFEMM(self, freq: float, solutions: dict):
keys = list(solutions.keys())
freqdist = [abs(x - freq) for x in keys]
idx = freqdist.index(min(freqdist))
if not self.lastdrawnfreq == keys[idx]:
solution = solutions[keys[idx]]
self.lastdrawnfreq = keys[idx]
self.axes.clear()
self.axes.imshow(solution["imdata"], extent=(math.floor(solution["ans"].x.min()), math.ceil(solution["ans"].x.max()), math.floor(solution["ans"].y.min()), math.ceil(solution["ans"].y.max())))
self.draw()
self.repaint()
class BodeCanvas(FigureCanvasQTAgg):
def __init__(self, fig):
fig = matplotlib.figure.Figure(figsize=(4, 4), dpi=100)
self.axes = fig.add_subplot(111)
self.axes.set_xlabel("Frequency [Hz]")
self.axes.set_ylabel("Magnetic Flux Dampening [dB]")
self.axes2 = self.axes.twinx()
self.axes2.set_ylabel("Magnetic Flux Phase [radians]")
super(BodeCanvas, self).__init__(fig)
self.lastdrawnfreq = None
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
#self.updateGeometry()
self.draw_idle()
def updateBode(self, freqs: list, bodevalues: list):
dcval = None
if freqs[0] == 0:
freqs = freqs[1:]
dcval = bodevalues[0]
bodevalues = bodevalues[1:]
ampli = list(map(abs, bodevalues))
if dcval is not None:
for i in range(len(ampli)):
ampli[i] = 20*math.log10(abs(ampli[i]) / abs(dcval))
phase = list(map(cmath.phase, bodevalues))
for i in range(len(phase)):
phase[i] = phase[i] / (2*math.pi) * 360
print(phase)
print(freqs)
self.axes.clear()
self.axes.grid()
self.axes.semilogx(freqs, ampli, "b")
self.axes2.clear()
self.axes2.semilogx(freqs, phase, "r-")
self.draw()
self.repaint()
class FEMMSolutionManager:
def __init__(self, canvas: FEMMCanvas, bodecanvas: BodeCanvas, ui: gui.Ui_MainWindow, femmfile: FEMMfem, config: dict):
self.canvas = canvas
self.bodecanvas = bodecanvas
self.ui = ui
self.femmfile = femmfile
self.config = config
# Initialize UI
self.ui.generateButton.setEnabled(True)
self.ui.freqSlider.setEnabled(False)
self.ui.freqSpinBox.setEnabled(False)
# initialize some healthy values
self.minfreq = self.ui.minfreqSpinBox.value()
self.maxfreq = self.ui.maxfreqSpinBox.value()
self.decadesteps = self.ui.decadestepsSpinBox.value()
self.viewfreq = self.minfreq
self.ui.freqSlider.setMinimum(math.floor(math.log10(self.minfreq) * 100))
self.ui.freqSlider.setMaximum(math.ceil(math.log10(self.maxfreq) * 100))
# Signals
self.ui.minfreqSpinBox.valueChanged.connect(self.minmaxchange)
self.ui.maxfreqSpinBox.valueChanged.connect(self.minmaxchange)
self.ui.decadestepsSpinBox.valueChanged.connect(self.stepchange)
self.ui.freqSlider.valueChanged.connect(self.freqsliderchange)
self.ui.freqSpinBox.valueChanged.connect(self.freqspinboxchange)
self.ui.generateButton.pressed.connect(self.gensolutions)
# Matplotlib Signals
self.mplsignal = self.canvas.mpl_connect("button_press_event", self.canvasClicked)
self.solutions = {}
def minmaxchange(self, value):
self.minfreq = self.ui.minfreqSpinBox.value()
self.maxfreq = self.ui.maxfreqSpinBox.value()
self.ui.freqSlider.setMinimum(math.floor(math.log10(self.minfreq)*100))
self.ui.freqSlider.setMaximum(math.ceil(math.log10(self.maxfreq)*100))
def stepchange(self, value):
# This is a bit more complicated. When the step size changes, all previous solutions have to be discarded.
self.decadesteps = self.ui.decadestepsSpinBox.value()
self.solutions = {} # Dump all solutions when the stepsize is changes, otherwise data recycling will become too complicated
self.ui.generateButton.setEnabled(True)
self.ui.freqSlider.setEnabled(False)
self.ui.freqSpinBox.setEnabled(False)
def freqsliderchange(self, value):
if self.ui.freqSlider.hasFocus():
self.ui.freqSpinBox.setValue(10**(value/100))
self.canvas.updateFEMM(10**(value/100), self.solutions)
def freqspinboxchange(self, value):
if self.ui.freqSpinBox.hasFocus():
self.ui.freqSlider.setValue(math.log10(value)*100)
self.canvas.updateFEMM(value, self.solutions)
def genlogrange(self):
start = math.log10(self.minfreq)
stop = math.log10(self.maxfreq)
num = int((stop-start)*self.decadesteps)
if num <= 0:
num = 1
return np.concatenate((np.array([0]), np.logspace(start, stop, num, endpoint=True))) # Add frequency 0 aswell
def gensolutions(self):
logrange = self.genlogrange()
with open(os.path.join(self.config["cdrivepath"], "TEMP.lua"), "w") as f:
f.write("open(\"C:\\TEMP.FEM\"); mi_setfocus(\"TEMP.FEM\"); mi_analyze(); quit(1)")
for freq in logrange:
if freq not in self.solutions:
with open(os.path.join(self.config["cdrivepath"], "TEMP.FEM"), "w") as f:
f.write(self.femmfile.setfreq(freq))
# TODO: Let wine (optionally) run in fake screenbuffer for maximum efficiency
subprocess.call(["wine", self.config["femmexe"], "C:\\TEMP.FEM", "-lua-script=C:\\TEMP.lua"])
ans = FEMMans.readans(os.path.join(self.config["cdrivepath"], "TEMP.ans"))
self.solutions[freq] = {"ans": ans, "imdata": ans.generateimdata(100)}
os.remove(os.path.join(self.config["cdrivepath"], "TEMP.lua"))
os.remove(os.path.join(self.config["cdrivepath"], "TEMP.FEM"))
os.remove(os.path.join(self.config["cdrivepath"], "TEMP.ans"))
self.ui.generateButton.setEnabled(False)
self.ui.freqSlider.setEnabled(True)
self.ui.freqSpinBox.setEnabled(True)
def canvasClicked(self, event):
xcoord = event.xdata
ycoord = event.ydata
freqs = []
vals = []
for freq in self.solutions:
freqs.append(freq)
vals.append(self.solutions[freq]["ans"].getValueAtPoint(xcoord, ycoord))
self.bodecanvas.updateBode(freqs, vals)
class bodewindow(QMainWindow):
def __init__(self, config, *args, **kwargs):
self.config = config
super(bodewindow, self).__init__(*args, **kwargs)
self.ui = gui.Ui_MainWindow()
self.ui.setupUi(self)
self.setupView()
self.currentFEM = None
# Solutions Manager
## Will act whenever you change frequency range and step size to calculate new solutions as required
self.FEMMSolutionManager = None
# Menubar signals
self.ui.actionLoad_FEM_File.triggered.connect(self.selectFEM) # Open *.FEM file
self.ui.actionExit.triggered.connect(self.close) # Exit Action
def setupView(self):
self.FEMMFig = matplotlib.figure.Figure(figsize=(100, 100), dpi=300)
self.FEMMCanvas = FEMMCanvas(self.FEMMFig)
self.FEMMTab = self.ui.tabWidget.addTab(self.FEMMCanvas, "FEMM Project Display")
self.bodeFig = matplotlib.figure.Figure(figsize=(100, 100), dpi=300)
self.bodeCanvas = BodeCanvas(self.bodeFig)
self.bodeTab = self.ui.tabWidget.addTab(self.bodeCanvas, "Bode Plot")
def selectFEM(self):
fileDialog = QFileDialog()
fileDialog.setDefaultSuffix("FEM")
fileDialog.setNameFilters(["FEMM Project File (*.FEM *.fem)", "FEMM Solution File (*.ans)", "Any files (*)"])
if fileDialog.exec():
path = fileDialog.selectedFiles()[0] # selectedFiles returns a list of selected files, but we only take the first
if os.path.exists(path):
self.currentFEM = FEMMfem(path=path)
self.FEMMSolutionManager = FEMMSolutionManager(self.FEMMCanvas, self.bodeCanvas, self.ui, self.currentFEM, self.config)
def main():
configdir = appdirs.user_config_dir("FEMMBode")
if not os.path.isdir(configdir): # Create configuration dir if it doesn't exist
os.makedirs(configdir)
if os.path.exists(os.path.join(configdir, "preferences.json")): # Check if config file exists, load if true
with open(os.path.join(configdir, "preferences.json")) as f:
config = json.load(f)
else: # Create blank config file if false
config = {"cdrivepath": os.path.join(os.path.expanduser("~/.wine/drive_c")),
"femmexe": "C:\\\\femm42\\\\bin\\\\femm.exe"}
with open(os.path.join(configdir, "preferences.json"), "w") as f:
json.dump(config, f, indent=4, sort_keys=True)
app = QApplication(sys.argv)
mainwindow = bodewindow(config)
mainwindow.show()
retcode = app.exec_()
sys.exit(retcode)
if __name__ == "__main__":
a = FEMM()
main()
| [
"[email protected]"
] | |
df6b3cdaa09073a8075e928c7ef9df25a4f7150f | 1f3a2cee3654c11586b30151fd9a3fc6fd705c0a | /deep_learning/pic.py | 5488840c452ac9667371b0542085d8a928c76634 | [] | no_license | 2020668/AI | 24ff42cefacac3f397e7a6f54dda7e9d741f2e03 | 3e1a78a6348e453b0f3e8862a784105620a22bc1 | refs/heads/master | 2020-12-09T02:03:55.311209 | 2020-02-10T15:36:26 | 2020-02-10T15:36:26 | 231,186,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | import tensorflow as tf
print(tf.__version__)
# 输出'2.0.0-alpha0'
print(tf.test.is_gpu_available())
# 会输出True,则证明安装成功
| [
"[email protected]"
] | |
b70b66f6096c21bc356db17ef7a9da3c02eaf719 | f8bb2d5287f73944d0ae4a8ddb85a18b420ce288 | /python/basic/for/format_obj.py | df47968c9dd34f209133f19983ab9c23ed9b0fe2 | [] | no_license | nishizumi-lab/sample | 1a2eb3baf0139e9db99b0c515ac618eb2ed65ad2 | fcdf07eb6d5c9ad9c6f5ea539046c334afffe8d2 | refs/heads/master | 2023-08-22T15:52:04.998574 | 2023-08-20T04:09:08 | 2023-08-20T04:09:08 | 248,222,555 | 8 | 20 | null | 2023-02-02T09:03:50 | 2020-03-18T12:14:34 | C | UTF-8 | Python | false | false | 46 | py | for 変数 in オブジェクト:
処理
| [
"[email protected]"
] | |
9ba384d7416217505108520e70e49bd802012c66 | 7b3009e019e081667df67c6b41328b5db632b898 | /instances/shadows_of_infinity.py | 34a98b9e889adc772c3ce344061ead7226547267 | [
"MIT"
] | permissive | frostburn/multibranch-mandelbrot | d1e2cc6bce6ab8f065b678fb2133bd3057b832d5 | 84e4887ffc90a5338ae448ced6f62fcf40bc11a1 | refs/heads/master | 2023-08-02T18:20:56.671175 | 2021-09-28T09:57:58 | 2021-09-28T09:57:58 | 287,219,716 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,926 | py | import os
import sys
sys.path.insert(0, os.path.abspath('.'))
from pylab import *
from mandelbrot import mandelbrot, mandelbrot_generic, buddhabrot
import nonescaping
import classic
from coloring import red_lavender, black_multi_edge, rainbow, gray, sepia, subharmonics, creature, white_multi_edge
import color_gradients
from scipy.ndimage import gaussian_filter
def make_picture_frame(rgb, dither=1.0/256.0):
if dither:
rgb = [channel + random(channel.shape)*dither for channel in rgb]
frame = stack(rgb, axis=-1)
frame = clip(frame, 0.0, 1.0)
return frame
if __name__ == '__main__':
scale = 10
# Instagram
width, height = 108*scale, 108*scale
anti_aliasing = 2
num_samples = 1<<25
max_iter = 1<<10
min_iter = 1<<9
zoom = -1.7
rotation = -pi*0.5
x, y = -0.2, 0.001
def circle_factory(theta, delta, radius=1.0, spread=0.5, x=0.0, y=0.08):
def circle(num_samples):
phi = rand(num_samples) - rand(num_samples)
phi = theta + delta * phi
r = radius + randn(num_samples) * spread
return x + cos(phi) * r + 1j * (y + sin(phi) * r)
return circle
offset = 0.5
delta = 3.5
exposures = []
num_layers = 1
for i in range(num_layers):
sub_exposures = [
(3*i+min_iter, 3*i+max_iter, circle_factory(offset + j*2*pi/3, delta)) for j in range(3)
]
exposures.extend(sub_exposures)
def color_map(exposed):
e = exposed[0]*0.0
result = array([e, e, e])
for i in range(num_layers):
for j in range(3):
result[j] += (3*scale**2*exposed[i*3 + j] * num_samples**-0.9)**0.78
return result
image = buddhabrot(width, height, x, y, zoom, rotation, -2, 1, num_samples, exposures, color_map, anti_aliasing=anti_aliasing, bailout=1e300)
imsave("/tmp/out.png", make_picture_frame(image))
| [
"[email protected]"
] | |
5dd88af5ae5e82c13194560776281ec8a542cab7 | 93ad65a519037b2a6c9363f356a00b3e51350537 | /djR/conf.py | c80052a26cce28b6dc8ddf5b5a232ed373f0e040 | [
"MIT"
] | permissive | JheanMarllos/django-R | 6ccc9b42dbca50c803c740315fbeda136be1ad9c | 3c1f8adfa2a16ad9cf9856e4dd7cd889e7a3c229 | refs/heads/master | 2020-05-30T12:42:49.200732 | 2019-06-02T11:37:49 | 2019-06-02T11:37:49 | 189,741,022 | 0 | 0 | MIT | 2019-06-01T14:16:22 | 2019-06-01T14:16:22 | null | UTF-8 | Python | false | false | 418 | py | # -*- coding: utf-8 -*-
from django.conf import settings
RETHINKDB_HOST = getattr(settings, 'RETHINKDB_HOST', 'localhost')
RETHINKDB_PORT = getattr(settings, 'RETHINKDB_PORT', 28015)
RETHINKDB_USER = getattr(settings, 'RETHINKDB_USER', None)
RETHINKDB_PASSWORD = getattr(settings, 'RETHINKDB_PASSWORD', None)
DEFAULT_DB = getattr(settings, 'R_DEFAULT_DB', None)
VERBOSE = getattr(settings, 'R_VERBOSE', False) | [
"[email protected]"
] | |
9782f416c9447c9bea34e745ec11be24c68003db | 8dbba1dc3b0a9cb3972e6fee6f41459d6fa56d78 | /ch09/ex9-10.py | b644f50c93a1b0bc17981d880b07bc7f5e71550d | [] | no_license | freebz/Foundations-for-Analytics-with-Python | 8da8308981538266e8e982ffcd080657058144ca | 736b2075e339a679905071b39201e6a575f59229 | refs/heads/master | 2020-03-15T21:53:43.100954 | 2018-05-06T18:07:59 | 2018-05-06T18:07:59 | 132,363,669 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | import numpy as np
from numpy import concatenate, vstack, r_
array_concat = np.concatenate([array1, array2], axis=0)
array_concat = np.vstack((array1, array2))
array_concat = np.r_[arry1, array2]
| [
"[email protected]"
] | |
f3409674f6082e19e2cdbb91ddc6cc1956ae779f | 9aea1b19a8681b4c6b15d628a080982fb2d98b39 | /mianJing111111/Google/Implement Queue using Stacks.py | 2e144185623f255bcbf62dc1b0ca3271002fcff4 | [] | no_license | yzl232/code_training | ee7612efc6f166742fcf48e1af715f57a624d3aa | fc165027c3d7b1fec58ebfad2f9ada275a6b8c03 | refs/heads/master | 2021-01-21T04:32:02.522931 | 2016-07-01T21:35:29 | 2016-07-01T21:35:29 | 26,989,266 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 901 | py | # encoding=utf-8
'''
In this method, in en-queue operation, the new element is entered at the top of stack1. In de-queue operation, if stack2 is empty then all the elements are moved to stack2 and finally top of stack2 is returned.
enQueue(q, x)
1) Push x to stack1 (assuming size of stacks is unlimited).
deQueue(q)
1) If both stacks are empty then error.
2) If stack2 is empty
While stack1 is not empty, push everything from satck1 to stack2.
3) Pop the element from stack2 and return it.
'''
# G家考过。
class queue:
def __init__(self):
self.stack1 = []
self.stack2 = []
def enqueue(self, x):
self.stack1.append(x)
def dequeue(self):
if not self.stack1 and not self.stack2: raise ValueError()
if not self.stack2:
while self.stack1: self.stack2.append(self.stack1.pop())
return self.stack2.pop() | [
"[email protected]"
] | |
96697e0a1210d4821564472422964ebcc50a0e3b | 1d928c3f90d4a0a9a3919a804597aa0a4aab19a3 | /python/zulip/2015/12/run-dev.py | 67b1614d07af8c64d03977bec514a539b207ca73 | [] | no_license | rosoareslv/SED99 | d8b2ff5811e7f0ffc59be066a5a0349a92cbb845 | a062c118f12b93172e31e8ca115ce3f871b64461 | refs/heads/main | 2023-02-22T21:59:02.703005 | 2021-01-28T19:40:51 | 2021-01-28T19:40:51 | 306,497,459 | 1 | 1 | null | 2020-11-24T20:56:18 | 2020-10-23T01:18:07 | null | UTF-8 | Python | false | false | 3,852 | py | #!/usr/bin/env python2.7
import optparse
import subprocess
import signal
import traceback
import sys
import os
from twisted.internet import reactor
from twisted.web import proxy, server, resource
# Monkey-patch twisted.web.http to avoid request.finish exceptions
# https://trac.zulip.net/ticket/1728
from twisted.web.http import Request
orig_finish = Request.finish
def patched_finish(self):
if self._disconnected:
return
return orig_finish(self)
Request.finish = patched_finish
if 'posix' in os.name and os.geteuid() == 0:
raise RuntimeError("run-dev.py should not be run as root.")
parser = optparse.OptionParser(r"""
Starts the app listening on localhost, for local development.
This script launches the Django and Tornado servers, then runs a reverse proxy
which serves to both of them. After it's all up and running, browse to
http://localhost:9991/
Note that, while runserver and runtornado have the usual auto-restarting
behavior, the reverse proxy itself does *not* automatically restart on changes
to this file.
""")
parser.add_option('--test',
action='store_true', dest='test',
help='Use the testing database and ports')
parser.add_option('--interface',
action='store', dest='interface',
default='127.0.0.1', help='Set the IP or hostname for the proxy to listen on')
(options, args) = parser.parse_args()
base_port = 9991
manage_args = ''
if options.test:
base_port = 9981
settings_module = "zproject.test_settings"
else:
settings_module = "zproject.settings"
manage_args = ['--settings=%s' % (settings_module,)]
os.environ['DJANGO_SETTINGS_MODULE'] = settings_module
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
proxy_port = base_port
django_port = base_port+1
tornado_port = base_port+2
webpack_port = base_port+3
os.chdir(os.path.join(os.path.dirname(__file__), '..'))
# Clean up stale .pyc files etc.
subprocess.check_call('./tools/clean-repo')
# Set up a new process group, so that we can later kill run{server,tornado}
# and all of the processes they spawn.
os.setpgrp()
# Pass --nostatic because we configure static serving ourselves in
# zulip/urls.py.
cmds = [['./tools/compile-handlebars-templates', 'forever'],
['./tools/webpack', 'watch'],
['python', 'manage.py', 'rundjango'] +
manage_args + ['localhost:%d' % (django_port,)],
['python', 'manage.py', 'runtornado'] +
manage_args + ['localhost:%d' % (tornado_port,)],
['./tools/run-dev-queue-processors'] + manage_args,
['env', 'PGHOST=localhost', # Force password authentication using .pgpass
'./puppet/zulip/files/postgresql/process_fts_updates']]
for cmd in cmds:
subprocess.Popen(cmd)
class Resource(resource.Resource):
def getChild(self, name, request):
# Assume an HTTP 1.1 request
proxy_host = request.requestHeaders.getRawHeaders('Host')
request.requestHeaders.setRawHeaders('X-Forwarded-Host', proxy_host)
if (request.uri in ['/json/get_events'] or
request.uri.startswith('/json/events') or
request.uri.startswith('/api/v1/events') or
request.uri.startswith('/sockjs')):
return proxy.ReverseProxyResource('localhost', tornado_port, '/'+name)
elif (request.uri.startswith('/webpack') or
request.uri.startswith('/socket.io')):
return proxy.ReverseProxyResource('localhost', webpack_port, '/'+name)
return proxy.ReverseProxyResource('localhost', django_port, '/'+name)
try:
reactor.listenTCP(proxy_port, server.Site(Resource()), interface=options.interface)
reactor.run()
except:
# Print the traceback before we get SIGTERM and die.
traceback.print_exc()
raise
finally:
# Kill everything in our process group.
os.killpg(0, signal.SIGTERM)
| [
"[email protected]"
] | |
b4b48833b14eeae1819479c4994e066e45300d1c | d0dccd8b1c31c0256dca3472719acab561661aa9 | /events/views.py | 8f52985d56e7d9d48486c2516ac1ab2f8b850635 | [] | no_license | cjredmond/GrouperApp | 5fe97271bc275e570d2e3565c2bb5233ce34a79d | aba431c7def9173150e24686dbbb87685d25ed24 | refs/heads/master | 2020-03-19T21:43:12.609648 | 2018-06-29T16:17:10 | 2018-06-29T16:17:10 | 136,947,937 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | from django.shortcuts import render
from django.views.generic import *
from django.views.generic.edit import *
from django.contrib.auth import get_user_model
from django.urls import reverse
from .models import Event
from .forms import EventCreateForm
from group.models import Entity
class EventCreateView(CreateView):
model = Event
form_class = EventCreateForm
def form_valid(self,form,**kwargs):
instance = form.save(commit=False)
instance.entity = Entity.objects.get(slug=self.kwargs['slug'])
return super().form_valid(form)
def get_success_url(self):
return reverse('landing_view')
| [
"[email protected]"
] | |
06072e8a7f63c5d0535c8e97d3d3590ec3ef64bc | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_backup.py | 9eff4b2688d253805fa8fdb41115f611870c9543 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py |
#calss header
class _BACKUP():
def __init__(self,):
self.name = "BACKUP"
self.definitions = [u'(someone or something that provides) support or help, or something that you have arranged in case your main plans, equipment, etc. go wrong: ', u'a copy of information held on a computer that is stored separately from the computer: ', u'a player who plays when the person who usually plays is not available: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"[email protected]"
] | |
8fdf3accccfac6904b4799b77cccadf2bfc83862 | 42516b0348936e257d04113c2e632dc72ba58e91 | /test_env/test_suit_ui_file_explorer/test_suit_ui_file_explorer_case06.py | d446ea461909499285fd138a157ee129cd48ee84 | [] | no_license | wwlwwlqaz/Qualcomm | 2c3a225875fba955d771101f3c38ca0420d8f468 | a04b717ae437511abae1e7e9e399373c161a7b65 | refs/heads/master | 2021-01-11T19:01:06.123677 | 2017-04-05T07:57:21 | 2017-04-05T07:57:21 | 79,292,426 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,124 | py | #coding=utf-8
import settings.common as SC
from test_case_base import TestCaseBase
from logging_wrapper import log_test_case, take_screenshot
from test_case_base import TestCaseBase
from qrd_shared.case import *
import fs_wrapper
from case_utility import *
import settings.common as SC
from utility_wrapper import *
############################################
#author:
# [email protected]
#function:
# copy and paste items cross folder in FileExplorer
#precondition:
#
#step:
# 1.goto FileExplorer;
# if not, goto step4
# 2.try to create a new folder;
# if not, goto step4
# 3.confirm whether new floder is created correctly;
# if not, goto step4
# 4.exit to end case
############################################
import os,re,string,subprocess,shlex
from test_suit_ui_file_explorer import *
class test_suit_ui_file_explorer_case06(TestCaseBase):
tag = 'ui_file_explorer_case06'
def test_case_main(self, case_results):
case_flag = False
#
# STEP 1: goto work_dir in FileExplorer
#
work_dir = '/Phone storage/DCIM/Camera'
number = preprocess(self.tag,work_dir,floor=3)
goto_dir(work_dir,'Folder')
#
# STEP 2: choose items to copy
#
try:
(index_list,num1) = random_index_list_in_folder(work_dir,'.jpg')
log_test_case(self.tag,"num1=%s want to copy %s photos"%(str(num1),str(len(index_list)+1)))
first_name = get_view_text_by_id(VIEW_TEXT_VIEW,'text')
click_textview_by_id('text',waitForView=1, clickType=LONG_CLICK)
name_list = []
for i in range(len(index_list)):
click_textview_by_index(index_list[i])
name_list.append(get_view_text_by_index(VIEW_TEXT_VIEW,index_list[i]))
name_list.insert(0, first_name)
click_textview_by_desc('Copy',isScrollable=0)
except:
take_screenshot()
cur_path = get_view_text_by_index(VIEW_TEXT_VIEW,0)
log_test_case(self.tag, "during COPY: something wrong, maybe no item in " + cur_path)
set_cannot_continue()
#
# STEP 3: goto destination in FileExplorer
#
if can_continue():
destination = '/Phone storage/Download'
goto_dir(destination,'Folder',go_from_home_screen=False)
#
# STEP 4: copy items to destination
#
if can_continue():
try:
click_button_by_text('Paste',waitForView=1)
except:
take_screenshot()
cur_path = get_view_text_by_index(VIEW_TEXT_VIEW,0)
log_test_case(self.tag, "during COPY: no 'Paste' in " + cur_path)
set_cannot_continue()
# check
if can_continue():
goto_dir(destination,'Folder',go_from_home_screen=True)
cur_path = get_view_text_by_index(VIEW_TEXT_VIEW,0)
flag = True
for i in range(len(name_list)):
if search_text('%s'%name_list[i],searchFlag=TEXT_MATCHES_REGEX):
try:scroll_to_top()
except:pass
continue
else:
flag = False
break
if flag is True:
case_flag = True
else:
log_test_case(self.tag, "failed copy %s"%name_list[i] +'in '+ cur_path)
#
# STEP 5: exit
#
exit_cur_case(self.tag)
log_test_case(self.tag, "case_flag = "+str(case_flag))
if case_flag:
qsst_log_case_status(STATUS_SUCCESS, "" , SEVERITY_HIGH)
else:
qsst_log_case_status(STATUS_FAILED, "copy and paste items cross folder is failed", SEVERITY_HIGH)
case_results.append((self.case_config_map[fs_wrapper.CASE_NAME_ATTR], can_continue()))
| [
"[email protected]"
] | |
fc7d7b27a526a43db9c9b511ae29a4442acf81d4 | 0fb2e09c0629cf47045881d7eecc125f674230e5 | /pps_webapp/main/views.py | bf6d96c2869c358792ae6771da7c09201b547904 | [] | no_license | satwik77/phenopacket-scraper-webapp | ea24ad2cc2fbd988e12df1178be5ba940c8a9859 | 4382c2a4e501448e7bfd68c7826a3c4c5ab39a26 | refs/heads/master | 2021-01-17T09:33:07.188192 | 2016-08-23T17:24:20 | 2016-08-23T17:24:20 | 61,695,575 | 0 | 0 | null | 2016-06-22T06:45:50 | 2016-06-22T06:45:50 | null | UTF-8 | Python | false | false | 1,984 | py | from django.shortcuts import get_object_or_404, render_to_response, redirect
from django.shortcuts import render
from django.contrib import auth
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
import requests
import pprint
api_url= 'http://localhost:8001/api/'
@csrf_exempt
def home(request):
if request.POST:
choice = str(request.POST['choice'])
url = str(request.POST['url'])
data = ""
if choice == '1':
get_data={'url' : str(url)}
response = requests.get(api_url+ 'scrape', params = get_data)
if response.status_code == 200:
response_data = response.json()
abstract = response_data['Abstract']
title = str(response_data['Title'])
hpo_terms = response_data['HPO Terms']
data+= "Title:\n" + title + "\n"
data+="Abstract:\n" + abstract + "\n"
data+="HPO Terms:\n"
for term in hpo_terms:
data += str(term) + "\n"
if choice == '2':
get_data={'url' : str(url)}
response = requests.get(api_url+ 'annotate', params = get_data)
if response.status_code == 200:
response_data = response.json()
data = {}
data["annotated_terms"] = response_data['Annotated HPO Terms']
data["annotated_abstract"] = response_data['Annotated Abstract']
data= pprint.pformat(data, indent=4)
if choice == '3':
get_data={'url' : str(url)}
response = requests.get(api_url+ 'phenopacket', params = get_data)
if response.status_code == 200:
response_data = response.json()
phenopacket = response_data['phenopacket']
data = phenopacket
return HttpResponse(data)
return render(request, 'main/index.html')
| [
"[email protected]"
] | |
31a0c3c321b124e25d22c7584aa8ccbc4ed0ae04 | c7a6f8ed434c86b4cdae9c6144b9dd557e594f78 | /ECE364/.PyCharm40/system/python_stubs/348993582/PyQt4/QtNetwork/__init__.py | 499f277c4fd5601ad24160f4fb960e5e5fc2f65f | [] | no_license | ArbalestV/Purdue-Coursework | 75d979bbe72106975812b1d46b7d854e16e8e15e | ee7f86145edb41c17aefcd442fa42353a9e1b5d1 | refs/heads/master | 2020-08-29T05:27:52.342264 | 2018-04-03T17:59:01 | 2018-04-03T17:59:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,637 | py | # encoding: utf-8
# module PyQt4.QtNetwork
# from /usr/lib64/python2.6/site-packages/PyQt4/QtNetwork.so
# by generator 1.136
# no doc
# imports
import PyQt4.QtCore as __PyQt4_QtCore
# no functions
# classes
from QAbstractNetworkCache import QAbstractNetworkCache
from QAbstractSocket import QAbstractSocket
from QAuthenticator import QAuthenticator
from QFtp import QFtp
from QHostAddress import QHostAddress
from QHostInfo import QHostInfo
from QHttp import QHttp
from QHttpHeader import QHttpHeader
from QHttpRequestHeader import QHttpRequestHeader
from QHttpResponseHeader import QHttpResponseHeader
from QLocalServer import QLocalServer
from QLocalSocket import QLocalSocket
from QNetworkAccessManager import QNetworkAccessManager
from QNetworkAddressEntry import QNetworkAddressEntry
from QNetworkCacheMetaData import QNetworkCacheMetaData
from QNetworkCookie import QNetworkCookie
from QNetworkCookieJar import QNetworkCookieJar
from QNetworkDiskCache import QNetworkDiskCache
from QNetworkInterface import QNetworkInterface
from QNetworkProxy import QNetworkProxy
from QNetworkProxyFactory import QNetworkProxyFactory
from QNetworkProxyQuery import QNetworkProxyQuery
from QNetworkReply import QNetworkReply
from QNetworkRequest import QNetworkRequest
from QSsl import QSsl
from QSslCertificate import QSslCertificate
from QSslCipher import QSslCipher
from QSslConfiguration import QSslConfiguration
from QSslError import QSslError
from QSslKey import QSslKey
from QTcpSocket import QTcpSocket
from QSslSocket import QSslSocket
from QTcpServer import QTcpServer
from QUdpSocket import QUdpSocket
from QUrlInfo import QUrlInfo
| [
"[email protected]"
] | |
4d4ecc1e1bddc6ac36317f8f1c3f8dc07d77ef43 | 8e79de4b73998dd0ee1dae4881784a2b12410615 | /Bite_83/test_timezone.py | 48b2574919d1328895aa94b5cdc1f6966ae66c3b | [
"MIT"
] | permissive | alehpineda/bitesofpy | e6eb7c9413cf407a12643efece01bef5457e5dcb | bfd319a606cd0b7b9bfb85a3e8942872a2d43c48 | refs/heads/master | 2021-07-15T19:59:35.061049 | 2020-09-25T17:49:32 | 2020-09-25T17:49:32 | 209,878,791 | 0 | 0 | MIT | 2020-09-06T00:11:45 | 2019-09-20T20:49:51 | Python | UTF-8 | Python | false | false | 1,060 | py | from datetime import datetime
from timezone import what_time_lives_pybites
def test_what_time_lives_pybites_spanish_summertime():
# AUS is 8 hours ahead of ES
naive_utc_dt = datetime(2018, 4, 27, 22, 55, 0)
aus_dt, es_dt = what_time_lives_pybites(naive_utc_dt)
assert aus_dt.year == 2018
assert aus_dt.month == 4
assert aus_dt.day == 28
assert aus_dt.hour == 8
assert aus_dt.minute == 55
assert es_dt.year == 2018
assert es_dt.month == 4
assert es_dt.day == 28
assert es_dt.hour == 0
assert es_dt.minute == 55
def test_what_time_lives_pybites_spanish_wintertime():
# AUS is 10 hours ahead of ES
naive_utc_dt = datetime(2018, 11, 1, 14, 10, 0)
aus_dt, es_dt = what_time_lives_pybites(naive_utc_dt)
assert aus_dt.year == 2018
assert aus_dt.month == 11
assert aus_dt.day == 2
assert aus_dt.hour == 1
assert aus_dt.minute == 10
assert es_dt.year == 2018
assert es_dt.month == 11
assert es_dt.day == 1
assert es_dt.hour == 15
assert es_dt.minute == 10
| [
"[email protected]"
] | |
acb8cafa74645f1560e286301b9c4b31274498d0 | b72e42f7f15ea8d359512cc0fe524f5407f358e5 | /CS50_web_dev/src/src8/airline1/airline/urls.py | f9e85ea4cf69f88b0dfd879f4bc3a158aa887856 | [
"MIT"
] | permissive | ChuaCheowHuan/web_app_DPTH | ec9f96d66c69ebd7e04df8d4b92578a3aaa7e392 | dd901e6359fe76f15b69701c53f76666c3219173 | refs/heads/master | 2021-06-18T11:31:10.959634 | 2020-07-23T04:04:52 | 2020-07-23T04:04:52 | 205,556,446 | 0 | 0 | MIT | 2021-06-10T21:55:14 | 2019-08-31T14:42:35 | HTML | UTF-8 | Python | false | false | 797 | py | """airline URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('', include('flights.urls')),
path('admin/', admin.site.urls),
]
| [
"[email protected]"
] | |
5d396f8a619172ddd3f61c1c285aedc696426ca7 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03001/s613586641.py | 5dadc4795c529eb1e7ffe05c54da04dc2de9168e | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | import sys
sys.setrecursionlimit(10**6)
w, h, x, y = map(int, input().split())
ans1 = w*h/2
ans2 = 0
if x == w/2 and y == h/2:
ans2 = 1
print(ans1, ans2)
| [
"[email protected]"
] | |
2ca6dd6d9e283d56848cb08dedccbc18699489cf | f40c65a649206261d7255eb3132ea67963f13a17 | /src/wait.py | d830074cea069e62e268c87b9f7ee5afbff4750b | [
"MIT"
] | permissive | fcurella/gh-status-check | 2ec47ca212bfe471cda97d1ae0c1ee41f16420e3 | 1fdb5f7be1dcdb9f2338839ad55ad7c9188b159b | refs/heads/main | 2022-12-22T18:35:00.543508 | 2020-10-01T17:46:35 | 2020-10-01T17:46:35 | 297,731,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,852 | py | import asyncio
import logging
import os
import sys
import aiohttp
from asgiref.sync import sync_to_async
from github import Github
REPOSITORY = os.environ["GITHUB_REPOSITORY"]
SHA = os.environ["GITHUB_SHA"]
EVENT = os.environ["GITHUB_EVENT_NAME"]
EVENT_PATH = os.environ["GITHUB_EVENT_PATH"]
TOKEN = os.environ["INPUT_GITHUBTOKEN"]
IGNORECONTEXTS = os.environ["INPUT_IGNORECONTEXTS"].split(',')
IGNOREACTIONS = os.environ["INPUT_IGNOREACTIONS"].split(',')
INTERVAL = float(os.environ["INPUT_CHECKINTERVAL"])
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
async def poll_checks(session, repo, ref):
headers = {
"Content-Type": "application/vnd.github.antiope-preview+json",
"Authorization": f"token {TOKEN}",
}
url = f"https://api.github.com/repos/{repo}/commits/{ref}/check-runs"
async with session.get(url, headers=headers, raise_for_status=True) as resp:
data = await resp.json()
check_runs = [
check_run for check_run in data["check_runs"]
if check_run["name"] not in IGNOREACTIONS
]
logger.info(
"Checking %s actions: %s",
len(check_runs),
", ".join([check_run["name"] for check_run in check_runs])
)
for check_run in check_runs:
name, status = check_run["name"], check_run["status"]
logger.info("%s: %s", name, status)
if status != "completed":
return False
return True
async def poll_statuses(commit):
combined_status = await sync_to_async(commit.get_combined_status)()
statuses = [
status for status in combined_status.statuses
if status.context not in IGNORECONTEXTS
]
logger.info(
"Checking %s statuses: %s",
len(statuses),
", ".join([status.context for status in statuses])
)
for status in statuses:
context, state = status.context, status.state
logger.info("%s: %s", context, state)
if state != "success":
return False
return True
async def main():
g = Github(TOKEN)
repo = await sync_to_async(g.get_repo)(REPOSITORY)
commit = await sync_to_async(repo.get_commit)(sha=SHA)
results = [False, False]
async with aiohttp.ClientSession() as session:
while False in results:
results = await asyncio.gather(
poll_statuses(commit),
poll_checks(session, REPOSITORY, SHA),
return_exceptions=False,
)
if False in results:
logger.info("Checking again in %s seconds", INTERVAL)
await asyncio.sleep(INTERVAL)
return results
if __name__ == "__main__":
try:
asyncio.run(main())
print("::set-output name=status::success")
except:
print("::set-output name=status::failure")
raise
| [
"[email protected]"
] | |
76dfebb655f45b53d778e40b6ae290fc76785090 | 09f0505f3ac1dccaf301c1e363423f38768cc3cc | /r_DailyProgrammer/Intermediate/C239/__init__.py | cf2036a70032f331ee1707580700ecc8e93ea54f | [] | no_license | Awesome-Austin/PythonPractice | 02212292b92814016d062f0fec1c990ebde21fe7 | 9a717f91d41122be6393f9fcd1a648c5e62314b3 | refs/heads/master | 2023-06-21T11:43:59.366064 | 2021-07-29T23:33:00 | 2021-07-29T23:33:00 | 270,854,302 | 0 | 0 | null | 2020-08-11T20:47:10 | 2020-06-08T23:24:09 | Python | UTF-8 | Python | false | false | 71 | py | #! python3
from r_DailyProgrammer.Intermediate.C239.main import main
| [
"{ID}+{username}@users.noreply.github.com"
] | {ID}+{username}@users.noreply.github.com |
d7804ea8b7f2ecc0bd38927b3992aa58daadc478 | af9b7a00b55aac5eaed58592cf8a9d69e659a076 | /learning_log/learning_logs/forms.py | da3fa007a63b1f3e2886d67e9cb7c2ee946bc820 | [] | no_license | dujiaojingyu/Django-Practice | bc246d2283a8f994567756b4e391ea167359620b | cab5db123eb97bd424a84fae24629cc0e1be4652 | refs/heads/master | 2020-03-25T17:33:25.597885 | 2018-08-08T08:38:09 | 2018-08-08T08:38:09 | 143,983,344 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | __author__ = "Narwhale"
from django import forms
from .models import Topic
class TopicForm(forms.ModelForm):
class Meta:
model = Topic
fields= ['text']
labels = {'text':''}
| [
"[email protected]"
] | |
e277cd2671d5de63fd0453d59feb4fde97ffbdaf | ac4b9385b7ad2063ea51237fbd8d1b74baffd016 | /.history/google/s5_getparser_20210216045646.py | c901cc9e70acfeeb7542185d4f3f24d9c669f62e | [] | no_license | preethanpa/ssoemprep | 76297ef21b1d4893f1ac2f307f60ec72fc3e7c6f | ce37127845253c768d01aeae85e5d0d1ade64516 | refs/heads/main | 2023-03-09T00:15:55.130818 | 2021-02-20T06:54:58 | 2021-02-20T06:54:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,991 | py | import shutil
from fonduer.parser.preprocessors import html_doc_preprocessor
from sqlalchemy import exc
import pdftotree
import re
from sen_parser_usable import *
from config import config
import json
import os
import posixpath
import http.server
import urllib.request, urllib.parse, urllib.error
import cgi
import shutil
import mimetypes
import re
from io import BytesIO
import json
import uuid
import sys
import logging
import errno
from os import walk
from fonduer.parser.models import Document, Sentence, Table
from fonduer.parser.preprocessors import HTMLDocPreprocessor
from fonduer.parser import Parser
from pprint import pprint
from fonduer import Meta, init_logging
from fonduer.candidates import CandidateExtractor
from fonduer.candidates import MentionNgrams
from fonduer.candidates import MentionExtractor
from fonduer.candidates.models import Mention
from fonduer.candidates.models import mention_subclass
from fonduer.candidates.models import candidate_subclass
from fonduer.candidates.matchers import RegexMatchSpan, DictionaryMatch, LambdaFunctionMatcher, Intersect, Union
from fonduer.features import Featurizer
import inspect
import matchers as matchers
from extract_html import *
PII_KEYLIST = '/home/dsie/Developer/sandbox/3ray/3rml/kbc_process/model/pii-keylist.json'
PARALLEL = 4 # assuming a quad-core machine
# ATTRIBUTE = "ns8s_invoice_poc_stage"
# check that the databases mentioned below already exist
getdbref = __import__('s1_2_getdbref')
# Will return <module '1_2_getdbref' from '/home/dsie/Developer/sandbox/3ray/server/backend/python/kbc_process/1_2_getdbref.py'>
# pdf_path = '/home/dsie/Developer/sandbox/3ray/3rml/kbc_process/documents/pdf/'
# docs_path = '/home/dsie/Developer/sandbox/3ray/3rml/kbc_process/documents/html/'
# pdf_path = json.loads(sys.argv[1])['pdf_path']
# docs_path = json.loads(sys.argv[1])['html_path']
# job_id = json.loads(sys.argv[1])['job_id']
# exc_context = 'email_id'
# doc_context = 'mock'
# exc_context = json.loads(sys.argv[1])['context'] if len(sys.argv) > 0 and json.loads(sys.argv[1])['context'] is not None else None
# doc_context = json.loads(sys.argv[1])['doc_name'] if len(sys.argv) > 0 and json.loads(sys.argv[1])['doc_name'] is not None else None
# exc_context = 'phone_number'
pdf_path = '/home/dsie/Developer/sandbox/3ray/3rml/kbc_process/drive_documents/efca2facee5f8df9/pdf/'
docs_path = '/home/dsie/Developer/sandbox/3ray/3rml/kbc_process/drive_documents/efca2facee5f8df9/html/'
job_id = 'efca2facee5f8df9'
exc_context = None
doc_context = None
# Configure logging for Fonduer
init_logging(log_dir="logs", level=logging.ERROR)
max_docs = 1000
PARALLEL = 4
doc_preprocessor = None
execution_stack = ["1. Get session object..."]
try:
session = getdbref.get_session()
sessType = type(session) # Will return <class 'sqlalchemy.orm.session.Session'>
execution_stack.append("Done.")
execution_stack.append("2. Processing layout...")
except Exception as session_exception:
logging.error(f'{execution_stack}, session = getdbref.get_session(), {session_exception}')
except exc.SQLAlchemyError as sql_exception:
logging.error(f'{execution_stack}, session = getdbref.get_session(), {sql_exception}')
def do_prepare_mentions_batch(candidate_mentions, config):
# for index, data in enumerate(config):
for index, data in config.items():
mention_subclass_list = list()
max_ngrams = None
for key in data.keys():
if key == 'Candidates':
for c in data.get(key):
# if c not in candidate_mentions.keys(): #TODO verify this condition
# candidate_mentions[c] = {
# "mention_names": [],
# "mention_ngrams": [],
# "mention_matchers": [],
# "mention_subclass": [],
# "max_ngrams": [],
# "throttler_function": []
# }
candidate_mentions[c]['mention_names'].append(data['MentionName'])
candidate_mentions[c]['mention_ngrams'].append(data['MentionNGrams'])
candidate_mentions[c]['mention_matchers'].append(matchers.matcher[data.get('Context')])
if 'mention_subclass' in candidate_mentions[c].keys():
candidate_mentions[c]['mention_subclass'].append(mention_subclass(data['MentionName']))
else:
candidate_mentions[c]['mention_subclass'] = [mention_subclass(data['MentionName'])]
if 'max_ngrams' in candidate_mentions[c].keys():
candidate_mentions[c]['max_ngrams'].append(MentionNgrams(n_max=candidate_mentions[c].get('mention_ngrams')))
else:
candidate_mentions[c]['max_ngrams'] = [MentionNgrams(n_max=candidate_mentions[c].get('mention_ngrams'))]
# candidate_mentions[c]['throttler_function'] = data.get('ThrottlerFunctions')[0].get('tf')
candidate_mentions[c]['throttler_function'] = [{data.get('ThrottlerFunctions')[0].get('tf')}]
return candidate_mentions
def do_prepare_mentions(candidate_mentions, config, context):
mention_subclass_list = list()
max_ngrams = None
ctx = {
"mention_names": [],
"mention_ngrams": [],
"mention_matchers": [],
"mention_subclass": [],
"max_ngrams": [],
"throttler_function": None
}
ctx['mention_names'].append(config[context].get('MentionName'))
ctx['mention_ngrams'].append(config[context]['MentionNGrams'])
ctx['mention_matchers'].append(matchers.matcher[config[context].get('Context')])
ctx['mention_subclass'].append(mention_subclass(config[context]['MentionName']))
ctx['max_ngrams'].append(MentionNgrams(n_max=config[context].get('MaxNGrams')))
ctx['throttler_function'] = config[context].get('ThrottlerFunctions')[0].get('tf')
candidate_mentions[context] = ctx
return candidate_mentions
def do_train(candidate_mentions):
from sqlalchemy import desc
docs = session.query(Document).order_by(Document.name).all()
# docs = session.query(Document).order_by(desc(Document.id)).limit(1)
total_mentions = session.query(Mention).count()
splits = (1, 0.0, 0.0)
train_cands = []
for candidate_key in candidate_mentions.keys():
train_docs = set()
dev_docs = set()
test_docs = set()
'''print('Mention Subclass {}, Ngrams {} and Matchers {}'
.format(candidate_mentions[candidate_key]['mention_subclass'],
candidate_mentions[candidate_key]['max_ngrams'],
candidate_mentions[candidate_key]['mention_matchers']))
'''
mention_extractor = MentionExtractor(session, candidate_mentions[candidate_key]['mention_subclass'], candidate_mentions[candidate_key]['max_ngrams'], candidate_mentions[candidate_key]['mention_matchers'])
mention_extractor.apply(docs, clear=False, parallelism=PARALLEL, progress_bar=False)
# mention_extractor.apply(docs)
candidate_mentions[candidate_key]['candidate_subclass'] = candidate_subclass(candidate_key, candidate_mentions[candidate_key].get('mention_subclass'), table_name=candidate_mentions[candidate_key]['mention_names'][0])
candidate_extractor = CandidateExtractor(session, [candidate_mentions[candidate_key]['candidate_subclass']], throttlers=[candidate_mentions[candidate_key]['throttler_function']])
data = [(doc.name, doc) for doc in docs]
data.sort(key=lambda x: x[0])
for i, (doc_name, doc) in enumerate(data):
train_docs.add(doc)
for i, docs in enumerate([train_docs, dev_docs, test_docs]):
candidate_extractor.apply(docs, split=i, parallelism=PARALLEL)
train_cands = candidate_extractor.get_candidates(split = 0)
train_cands.append(candidate_extractor.get_candidates(split = 0))
candidate_mentions[candidate_key]['train_cands'] = candidate_extractor.get_candidates(split = 0)
for index, item in enumerate(candidate_mentions[candidate_key]['train_cands']):
if len(item) > 0:
featurizer = Featurizer(session, [candidate_mentions[candidate_key]['candidate_subclass']])
featurizer.apply(split=0, train=True, parallelism=PARALLEL)
F_train = featurizer.get_feature_matrices(candidate_mentions[candidate_key]['train_cands'])
# %time featurizer.apply(split=0, train=True, parallelism=PARALLEL)
# %time F_train = featurizer.get_feature_matrices(candidate_mentions[candidate_key]['train_cands'])
else:
candidate_mentions[candidate_key]['train_cands'].pop(index)
# candidate[candidate_key]['train_cands'] = train_cands
return candidate_mentions
def do_process_get_candidates(candidate_mentions=None):
train_cands = do_train(candidate_mentions)
return train_cands
def handle_return(generator, func):
contextInfoDict = yield from generator
func(contextInfoDict)
def get_context_async(sm, document_context='', search_context=''):
pass
# star_char_index = sm.char_start
# end_char_index = sm.char_end
# star_char_index = sm['applicant_name_context'].char_start
# end_char_index = sm['applicant_name_context'].char_end
# contextInfoDictionary = {
# 'label': {
# # 'spanMention': sm['applicant_name_context'],
# 'document': sm[search_context].sentence.document.name,
# 'documentId': sm[search_context].sentence.document.id,
# 'sentence': sm[search_context].sentence.text,
# 'contextValue': sm[search_context].sentence.text[star_char_index:end_char_index+1],
# 'startChar': star_char_index,
# 'endChar': end_char_index
# },
# 'value': {
# # 'spanMention': sm['applicant_name_context'],
# 'document': sm[search_context].sentence.document.name,
# 'documentId': sm[search_context].sentence.document.id,
# 'sentence': sm[search_context].sentence.text,
# 'contextValue': sm[search_context].sentence.text[star_char_index:end_char_index+1],
# 'startChar': star_char_index,
# 'endChar': end_char_index
# }
# }
# yield contextInfoDictionary
def print_values(value):
print('returned: {}'.format(json.dumps(value)))
def do_get_docs_values(candidates=None, document_context=None, search_context=None):
'''
"<class 'fonduer.parser.models.document.Document'>"
"<class 'fonduer.parser.models.section.Section'>"
"<class 'fonduer.parser.models.sentence.Sentence'>"
"<class 'fonduer.candidates.models.span_mention.SpanMention'>"
"<class 'fonduer.candidates.models.mention.ApplicationNameLabel'>"
'''
train_cands = None
docs_and_values = []
all_docs_and_values = []
# print(document_context, search_context)
search_types = ['all_docs_and_pii', 'all_doc_and_'+search_context, 'all_pii_for_'+document_context, search_context+'_for_'+document_context]
search_type = ''
if document_context == None and search_context == None:
'''Entire KB'''
search_type = search_types[0]
elif document_context == None and search_context is not None:
''' Send entire KB '''
search_type = search_types[1]
elif document_context is not None and search_context == None:
''' Send KB for document'''
search_type = search_types[2]
else:
''' Send KB for match in Doc'''
search_type = search_types[3]
for index, item in enumerate(candidates):
train_cands = candidates.get(item).get('train_cands')
if train_cands is not None:
for instances in train_cands:
for candidate in instances:
for key, value in enumerate(candidate):
# all_docs_and_values.append({
docs_and_values.append({
"documentName": value.context.sentence.document.name,
"page": value.context.sentence.page,
"piiFound": value.context.sentence.text
})
for item in all_docs_and_values:
if search_type == 0:
docs_and_values.append(item)
elif search_type == 1:
'''
search_context is already filtered, hence do not filter any document
'''
docs_and_values.append(item)
elif search_type == 2:
'''
only filter document name
'''
docs_and_values.append(item) if item.get("documentName") in document_context else None
else:
'''
search_type is 3
search_context is already filtered, hence only filter document_name
'''
docs_and_values.append(item) if item.get("documentName") in document_context else None
# logging.info(f'docs_and_values: {docs_and_values}')
return docs_and_values
def train_and_test_experiment(document_context=None, context_label='', user=0, pdf_path=''):
'''
context_value:
context_label:
user:
pdf_path:
'''
candidate_mentions = do_prepare_mentions({}, config, context_label)
candidates = do_process_get_candidates(candidate_mentions)
results = []
if candidates is not None:
span_mention = None
span_mention_list = do_get_docs_values(candidates, document_context, context_label)
if len(span_mention_list) > 0:
span_mention = span_mention_list[0]
returned_contexts = handle_return(get_context_async(span_mention, document_context, context_label), print_values)
for x in returned_contexts:
results.append(x)
else:
# TODO
pass
return results
def train_and_test(document_context=None, context_label='', user=0, pdf_path=''):
'''
context_value:
context_label:
user:
pdf_path:
'''
candidate_mentions = do_prepare_mentions({}, config, context_label)
# candidate_mentions = do_prepare_mentions_batch({}, config)
candidates = do_process_get_candidates(candidate_mentions)
results = []
if candidates is not None:
results = do_get_docs_values(candidates, document_context, context_label)
return results
_, _, filenames = next(walk(pdf_path))
exc_context_list = config.keys()
for fn in filenames:
fn = fn.split('.')[0]
for ec in exc_context_list:
combined_results =
print(json.dumps({"result": train_and_test(document_context=fn, context_label=ec), "job_id": job_id })) | [
"{[email protected]}"
] | |
8af1f2b9b43cf26c7d092f16479f3b479eed5d23 | 90f52d0348aa0f82dc1f9013faeb7041c8f04cf8 | /wxPython3.0 Docs and Demos/wxPython/samples/wxPIA_book/Chapter-10/popupmenu.py | 5226849ca7224afab2ef1c1e69a3aae5158a74d5 | [] | no_license | resource-jason-org/python-wxPythonTool | 93a25ad93c768ca8b69ba783543cddf7deaf396b | fab6ec3155e6c1ae08ea30a23310006a32d08c36 | refs/heads/master | 2021-06-15T10:58:35.924543 | 2017-04-14T03:39:27 | 2017-04-14T03:39:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,388 | py | import wx
class MyFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, -1,
"Popup Menu Example")
self.panel = p = wx.Panel(self)
menu = wx.Menu()
exit = menu.Append(-1, "Exit")
self.Bind(wx.EVT_MENU, self.OnExit, exit)
menuBar = wx.MenuBar()
menuBar.Append(menu, "Menu")
self.SetMenuBar(menuBar)
wx.StaticText(p, -1,
"Right-click on the panel to show a popup menu",
(25,25))
self.popupmenu = wx.Menu()
for text in "one two three four five".split():
item = self.popupmenu.Append(-1, text)
self.Bind(wx.EVT_MENU, self.OnPopupItemSelected, item)
p.Bind(wx.EVT_CONTEXT_MENU, self.OnShowPopup)
def OnShowPopup(self, event):
pos = event.GetPosition()
pos = self.panel.ScreenToClient(pos)
self.panel.PopupMenu(self.popupmenu, pos)
def OnPopupItemSelected(self, event):
item = self.popupmenu.FindItemById(event.GetId())
text = item.GetText()
wx.MessageBox("You selected item '%s'" % text)
def OnExit(self, event):
self.Close()
if __name__ == "__main__":
app = wx.App()
frame = MyFrame()
frame.Show()
app.MainLoop()
| [
"[email protected]"
] | |
41a58d08aeb1f5f3ee5fbd1e3067dbcc9eefbc43 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/sieve-big-3540.py | a875472b054493c30d3909c9a4a41429a81f0434 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,742 | py | # A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A resizable list of integers
class Vector2(object):
items: [int] = None
items2: [int] = None
size: int = 0
size2: int = 0
def __init__(self:"Vector2"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector2") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector2") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector2", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector2", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector2", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector2", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector2", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector2", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector2", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector2", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector2") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector2") -> int:
return self.size
# A resizable list of integers
class Vector3(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
def __init__(self:"Vector3"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector3") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector3", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector3", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector3", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector3", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector3", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector3", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector3", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector3", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector3", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector3", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector3", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector3", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector3") -> int:
return self.size
# A resizable list of integers
class Vector4(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
def __init__(self:"Vector4"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector4") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector4", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector4", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector4", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector4", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector4", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector4", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
$Exp
# Removes an item from the middle of vector
def remove_at(self:"Vector4", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector4", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector4", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector4", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector4", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector4", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector4") -> int:
return self.size
# A resizable list of integers
class Vector5(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
items5: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
size5: int = 0
def __init__(self:"Vector5"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity5(self:"Vector5") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity5(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector5", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector5", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector5", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector5", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append5(self:"Vector5", item: int, item2: int, item3: int, item4: int, item5: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector5", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector5", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all5(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int], new_items5: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
item5:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector5", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector5", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector5", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector5", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector5", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector5", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length5(self:"Vector5") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector2(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector3(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector4(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector5(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
doubling_limit5:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity5(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange2(i:int, j:int, i2:int, j2:int) -> Vector:
v:Vector = None
v2:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange3(i:int, j:int, i2:int, j2:int, i3:int, j3:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange4(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange5(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int, i5:int, j5:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve2(v:Vector, v2:Vector) -> object:
i:int = 0
i2:int = 0
j:int = 0
j2:int = 0
k:int = 0
k2:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve3(v:Vector, v2:Vector, v3:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
j:int = 0
j2:int = 0
j3:int = 0
k:int = 0
k2:int = 0
k3:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve4(v:Vector, v2:Vector, v3:Vector, v4:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve5(v:Vector, v2:Vector, v3:Vector, v4:Vector, v5:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
j5:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
k5:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
n2:int = 50
n3:int = 50
n4:int = 50
n5:int = 50
# Data
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
v = vrange(2, n)
v2 = vrange(2, n)
v3 = vrange(2, n)
v4 = vrange(2, n)
v5 = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
| [
"[email protected]"
] | |
0709557c1f679fa1a41d7157bfe2c991f6adadfc | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp/NTWS-AP-IF-MIB.py | d9da09616db8ef8ddc0d2db88e651ab9fd3c63d5 | [
"Apache-2.0"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 5,408 | py | #
# PySNMP MIB module NTWS-AP-IF-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/NTWS-AP-IF-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 20:16:00 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueRangeConstraint, ConstraintsIntersection, SingleValueConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueRangeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ValueSizeConstraint")
IANAifType, = mibBuilder.importSymbols("IANAifType-MIB", "IANAifType")
NtwsApSerialNum, = mibBuilder.importSymbols("NTWS-AP-TC", "NtwsApSerialNum")
ntwsMibs, = mibBuilder.importSymbols("NTWS-ROOT-MIB", "ntwsMibs")
ObjectGroup, NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "NotificationGroup", "ModuleCompliance")
Counter64, IpAddress, iso, Bits, Integer32, TimeTicks, Counter32, ObjectIdentity, ModuleIdentity, MibIdentifier, Unsigned32, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, NotificationType = mibBuilder.importSymbols("SNMPv2-SMI", "Counter64", "IpAddress", "iso", "Bits", "Integer32", "TimeTicks", "Counter32", "ObjectIdentity", "ModuleIdentity", "MibIdentifier", "Unsigned32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "NotificationType")
MacAddress, TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "MacAddress", "TextualConvention", "DisplayString")
ntwsApIfMib = ModuleIdentity((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 16))
ntwsApIfMib.setRevisions(('2008-11-20 00:01',))
if mibBuilder.loadTexts: ntwsApIfMib.setLastUpdated('200811200001Z')
if mibBuilder.loadTexts: ntwsApIfMib.setOrganization('Nortel Networks')
class NtwsApInterfaceIndex(TextualConvention, Unsigned32):
status = 'current'
displayHint = 'd'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(1, 1024)
ntwsApIfMibObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 16, 1))
ntwsApIfTable = MibTable((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 16, 1, 1), )
if mibBuilder.loadTexts: ntwsApIfTable.setStatus('current')
ntwsApIfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 16, 1, 1, 1), ).setIndexNames((0, "NTWS-AP-IF-MIB", "ntwsApIfApSerialNum"), (0, "NTWS-AP-IF-MIB", "ntwsApIfIndex"))
if mibBuilder.loadTexts: ntwsApIfEntry.setStatus('current')
ntwsApIfApSerialNum = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 16, 1, 1, 1, 1), NtwsApSerialNum())
if mibBuilder.loadTexts: ntwsApIfApSerialNum.setStatus('current')
ntwsApIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 16, 1, 1, 1, 2), NtwsApInterfaceIndex())
if mibBuilder.loadTexts: ntwsApIfIndex.setStatus('current')
ntwsApIfName = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 16, 1, 1, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntwsApIfName.setStatus('current')
ntwsApIfType = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 16, 1, 1, 1, 4), IANAifType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntwsApIfType.setStatus('current')
ntwsApIfMtu = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 16, 1, 1, 1, 5), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntwsApIfMtu.setStatus('current')
ntwsApIfHighSpeed = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 16, 1, 1, 1, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntwsApIfHighSpeed.setStatus('current')
ntwsApIfMac = MibTableColumn((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 16, 1, 1, 1, 7), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntwsApIfMac.setStatus('current')
ntwsApIfConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 16, 2))
ntwsApIfCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 16, 2, 1))
ntwsApIfGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 16, 2, 2))
ntwsApIfCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 16, 2, 1, 1)).setObjects(("NTWS-AP-IF-MIB", "ntwsApIfBasicGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ntwsApIfCompliance = ntwsApIfCompliance.setStatus('current')
ntwsApIfBasicGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 45, 6, 1, 4, 16, 2, 2, 1)).setObjects(("NTWS-AP-IF-MIB", "ntwsApIfName"), ("NTWS-AP-IF-MIB", "ntwsApIfType"), ("NTWS-AP-IF-MIB", "ntwsApIfMtu"), ("NTWS-AP-IF-MIB", "ntwsApIfHighSpeed"), ("NTWS-AP-IF-MIB", "ntwsApIfMac"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ntwsApIfBasicGroup = ntwsApIfBasicGroup.setStatus('current')
mibBuilder.exportSymbols("NTWS-AP-IF-MIB", ntwsApIfApSerialNum=ntwsApIfApSerialNum, ntwsApIfConformance=ntwsApIfConformance, ntwsApIfCompliance=ntwsApIfCompliance, PYSNMP_MODULE_ID=ntwsApIfMib, ntwsApIfName=ntwsApIfName, ntwsApIfMib=ntwsApIfMib, ntwsApIfHighSpeed=ntwsApIfHighSpeed, NtwsApInterfaceIndex=NtwsApInterfaceIndex, ntwsApIfBasicGroup=ntwsApIfBasicGroup, ntwsApIfEntry=ntwsApIfEntry, ntwsApIfMac=ntwsApIfMac, ntwsApIfIndex=ntwsApIfIndex, ntwsApIfMtu=ntwsApIfMtu, ntwsApIfType=ntwsApIfType, ntwsApIfTable=ntwsApIfTable, ntwsApIfCompliances=ntwsApIfCompliances, ntwsApIfMibObjects=ntwsApIfMibObjects, ntwsApIfGroups=ntwsApIfGroups)
| [
"[email protected]"
] | |
fbe0979bb9bfd1111ac0cd12f14a2aecde30e551 | 892266713e500efa5ac04e1b8de812200410c956 | /devset.py | cd8b6e2d344c504aedbc001fde9be6ebc8fc85de | [
"BSD-2-Clause"
] | permissive | martinphellwig/django-g11n | 972eb95128637ec0b21efabad6b40ba02c30356c | 94eb9da7d7027061873cd44356fdf3378cdb3820 | refs/heads/master | 2020-08-29T12:24:04.687019 | 2016-10-10T15:54:32 | 2016-10-10T15:54:32 | 218,030,322 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,315 | py | #! /usr/bin/env python
"""
Developer Reset.
"""
import os
APP = 'django_g11n'
DIR = os.path.dirname(os.path.abspath(__file__))
def get_last_migration_file():
"Fetch the latest migration file."
_ = os.path.join(DIR, APP, 'migrations')
_ = [os.path.join(_, item) for item in os.listdir(_) if not item.startswith('_')]
_.sort()
if len(_) > 0:
return _[-1]
else:
return None
def modify_migration():
"Modify migration, add pylint disable line."
path = get_last_migration_file()
if path is None:
return
text = '# pylint: disable=invalid-name, missing-docstring, line-too-long\n'
with open(path, 'r+') as file_open:
data = file_open.readlines()
data.insert(1, text)
file_open.seek(0)
file_open.write(''.join(data))
def execute_shell(command, prefix='python manage.py', pipe=None):
"Execute shell python manage.py"
import subprocess
cmd = prefix + ' ' + command
if pipe is not None:
cmd = pipe + ' | ' + cmd
subprocess.call(cmd, shell=True)
def add_superuser(username, password):
"Add superuser"
from django.contrib.auth.models import User
user = User(username=username)
user.set_password(password)
user.is_superuser = True
user.is_staff = True
user.save()
return user
def remove_db():
"remove the db if it exists"
_ = os.path.join(DIR, 'db.sqlite3')
if os.path.exists(_):
os.remove(_)
def remove_last_migration():
"remove last migration file."
_ = get_last_migration_file()
if _ is not None:
os.remove(_)
def add_migrations():
"set up the new migrations and migrate"
execute_shell('makemigrations ' + APP)
execute_shell('makemigrations')
execute_shell('migrate')
modify_migration()
def main():
"Executed when this is the interface module"
remove_db()
remove_last_migration()
add_migrations()
#
# This will run a shell which imports this file as a module, this means
# we can execute things in a Django environment.
execute_shell('shell', pipe='echo "import devset"')
#
execute_shell('runserver')
def as_module():
"Executed when this is imported."
add_superuser('admin', 'admin')
if __name__ == '__main__':
main()
else:
as_module()
| [
"martin@localhost"
] | martin@localhost |
e4cf4ab1bde5d12816c7568855788438a2c3bde5 | b7620d0f1a90390224c8ab71774b9c906ab3e8e9 | /aliyun-python-sdk-ehpc/aliyunsdkehpc/request/v20180412/SetAutoScaleConfigRequest.py | 3e59df6b987590270e37c30aed6f6b4716a2eea1 | [
"Apache-2.0"
] | permissive | YaoYinYing/aliyun-openapi-python-sdk | e9c62940baee1a35b9ec4a9fbd1e4eb0aaf93b2f | e9a93cc94bd8290d1b1a391a9cb0fad2e6c64627 | refs/heads/master | 2022-10-17T16:39:04.515562 | 2022-10-10T15:18:34 | 2022-10-10T15:18:34 | 117,057,304 | 0 | 0 | null | 2018-01-11T06:03:02 | 2018-01-11T06:03:01 | null | UTF-8 | Python | false | false | 10,448 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkehpc.endpoint import endpoint_data
class SetAutoScaleConfigRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'EHPC', '2018-04-12', 'SetAutoScaleConfig')
self.set_method('GET')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ImageId(self): # String
return self.get_query_params().get('ImageId')
def set_ImageId(self, ImageId): # String
self.add_query_param('ImageId', ImageId)
def get_SpotPriceLimit(self): # Float
return self.get_query_params().get('SpotPriceLimit')
def set_SpotPriceLimit(self, SpotPriceLimit): # Float
self.add_query_param('SpotPriceLimit', SpotPriceLimit)
def get_ExcludeNodes(self): # String
return self.get_query_params().get('ExcludeNodes')
def set_ExcludeNodes(self, ExcludeNodes): # String
self.add_query_param('ExcludeNodes', ExcludeNodes)
def get_ExtraNodesGrowRatio(self): # Integer
return self.get_query_params().get('ExtraNodesGrowRatio')
def set_ExtraNodesGrowRatio(self, ExtraNodesGrowRatio): # Integer
self.add_query_param('ExtraNodesGrowRatio', ExtraNodesGrowRatio)
def get_ShrinkIdleTimes(self): # Integer
return self.get_query_params().get('ShrinkIdleTimes')
def set_ShrinkIdleTimes(self, ShrinkIdleTimes): # Integer
self.add_query_param('ShrinkIdleTimes', ShrinkIdleTimes)
def get_GrowTimeoutInMinutes(self): # Integer
return self.get_query_params().get('GrowTimeoutInMinutes')
def set_GrowTimeoutInMinutes(self, GrowTimeoutInMinutes): # Integer
self.add_query_param('GrowTimeoutInMinutes', GrowTimeoutInMinutes)
def get_ClusterId(self): # String
return self.get_query_params().get('ClusterId')
def set_ClusterId(self, ClusterId): # String
self.add_query_param('ClusterId', ClusterId)
def get_EnableAutoGrow(self): # Boolean
return self.get_query_params().get('EnableAutoGrow')
def set_EnableAutoGrow(self, EnableAutoGrow): # Boolean
self.add_query_param('EnableAutoGrow', EnableAutoGrow)
def get_EnableAutoShrink(self): # Boolean
return self.get_query_params().get('EnableAutoShrink')
def set_EnableAutoShrink(self, EnableAutoShrink): # Boolean
self.add_query_param('EnableAutoShrink', EnableAutoShrink)
def get_SpotStrategy(self): # String
return self.get_query_params().get('SpotStrategy')
def set_SpotStrategy(self, SpotStrategy): # String
self.add_query_param('SpotStrategy', SpotStrategy)
def get_MaxNodesInCluster(self): # Integer
return self.get_query_params().get('MaxNodesInCluster')
def set_MaxNodesInCluster(self, MaxNodesInCluster): # Integer
self.add_query_param('MaxNodesInCluster', MaxNodesInCluster)
def get_ShrinkIntervalInMinutes(self): # Integer
return self.get_query_params().get('ShrinkIntervalInMinutes')
def set_ShrinkIntervalInMinutes(self, ShrinkIntervalInMinutes): # Integer
self.add_query_param('ShrinkIntervalInMinutes', ShrinkIntervalInMinutes)
def get_Queuess(self): # RepeatList
return self.get_query_params().get('Queues')
def set_Queuess(self, Queues): # RepeatList
for depth1 in range(len(Queues)):
if Queues[depth1].get('QueueName') is not None:
self.add_query_param('Queues.' + str(depth1 + 1) + '.QueueName', Queues[depth1].get('QueueName'))
if Queues[depth1].get('SystemDiskLevel') is not None:
self.add_query_param('Queues.' + str(depth1 + 1) + '.SystemDiskLevel', Queues[depth1].get('SystemDiskLevel'))
if Queues[depth1].get('InstanceTypes') is not None:
for depth2 in range(len(Queues[depth1].get('InstanceTypes'))):
if Queues[depth1].get('InstanceTypes')[depth2].get('VSwitchId') is not None:
self.add_query_param('Queues.' + str(depth1 + 1) + '.InstanceTypes.' + str(depth2 + 1) + '.VSwitchId', Queues[depth1].get('InstanceTypes')[depth2].get('VSwitchId'))
if Queues[depth1].get('InstanceTypes')[depth2].get('SpotStrategy') is not None:
self.add_query_param('Queues.' + str(depth1 + 1) + '.InstanceTypes.' + str(depth2 + 1) + '.SpotStrategy', Queues[depth1].get('InstanceTypes')[depth2].get('SpotStrategy'))
if Queues[depth1].get('InstanceTypes')[depth2].get('ZoneId') is not None:
self.add_query_param('Queues.' + str(depth1 + 1) + '.InstanceTypes.' + str(depth2 + 1) + '.ZoneId', Queues[depth1].get('InstanceTypes')[depth2].get('ZoneId'))
if Queues[depth1].get('InstanceTypes')[depth2].get('InstanceType') is not None:
self.add_query_param('Queues.' + str(depth1 + 1) + '.InstanceTypes.' + str(depth2 + 1) + '.InstanceType', Queues[depth1].get('InstanceTypes')[depth2].get('InstanceType'))
if Queues[depth1].get('InstanceTypes')[depth2].get('SpotPriceLimit') is not None:
self.add_query_param('Queues.' + str(depth1 + 1) + '.InstanceTypes.' + str(depth2 + 1) + '.SpotPriceLimit', Queues[depth1].get('InstanceTypes')[depth2].get('SpotPriceLimit'))
if Queues[depth1].get('EnableAutoGrow') is not None:
self.add_query_param('Queues.' + str(depth1 + 1) + '.EnableAutoGrow', Queues[depth1].get('EnableAutoGrow'))
if Queues[depth1].get('HostNameSuffix') is not None:
self.add_query_param('Queues.' + str(depth1 + 1) + '.HostNameSuffix', Queues[depth1].get('HostNameSuffix'))
if Queues[depth1].get('SpotPriceLimit') is not None:
self.add_query_param('Queues.' + str(depth1 + 1) + '.SpotPriceLimit', Queues[depth1].get('SpotPriceLimit'))
if Queues[depth1].get('EnableAutoShrink') is not None:
self.add_query_param('Queues.' + str(depth1 + 1) + '.EnableAutoShrink', Queues[depth1].get('EnableAutoShrink'))
if Queues[depth1].get('SpotStrategy') is not None:
self.add_query_param('Queues.' + str(depth1 + 1) + '.SpotStrategy', Queues[depth1].get('SpotStrategy'))
if Queues[depth1].get('DataDisks') is not None:
for depth2 in range(len(Queues[depth1].get('DataDisks'))):
if Queues[depth1].get('DataDisks')[depth2].get('DataDiskDeleteWithInstance') is not None:
self.add_query_param('Queues.' + str(depth1 + 1) + '.DataDisks.' + str(depth2 + 1) + '.DataDiskDeleteWithInstance', Queues[depth1].get('DataDisks')[depth2].get('DataDiskDeleteWithInstance'))
if Queues[depth1].get('DataDisks')[depth2].get('DataDiskEncrypted') is not None:
self.add_query_param('Queues.' + str(depth1 + 1) + '.DataDisks.' + str(depth2 + 1) + '.DataDiskEncrypted', Queues[depth1].get('DataDisks')[depth2].get('DataDiskEncrypted'))
if Queues[depth1].get('DataDisks')[depth2].get('DataDiskKMSKeyId') is not None:
self.add_query_param('Queues.' + str(depth1 + 1) + '.DataDisks.' + str(depth2 + 1) + '.DataDiskKMSKeyId', Queues[depth1].get('DataDisks')[depth2].get('DataDiskKMSKeyId'))
if Queues[depth1].get('DataDisks')[depth2].get('DataDiskSize') is not None:
self.add_query_param('Queues.' + str(depth1 + 1) + '.DataDisks.' + str(depth2 + 1) + '.DataDiskSize', Queues[depth1].get('DataDisks')[depth2].get('DataDiskSize'))
if Queues[depth1].get('DataDisks')[depth2].get('DataDiskCategory') is not None:
self.add_query_param('Queues.' + str(depth1 + 1) + '.DataDisks.' + str(depth2 + 1) + '.DataDiskCategory', Queues[depth1].get('DataDisks')[depth2].get('DataDiskCategory'))
if Queues[depth1].get('DataDisks')[depth2].get('DataDiskPerformanceLevel') is not None:
self.add_query_param('Queues.' + str(depth1 + 1) + '.DataDisks.' + str(depth2 + 1) + '.DataDiskPerformanceLevel', Queues[depth1].get('DataDisks')[depth2].get('DataDiskPerformanceLevel'))
if Queues[depth1].get('MinNodesInQueue') is not None:
self.add_query_param('Queues.' + str(depth1 + 1) + '.MinNodesInQueue', Queues[depth1].get('MinNodesInQueue'))
if Queues[depth1].get('MaxNodesPerCycle') is not None:
self.add_query_param('Queues.' + str(depth1 + 1) + '.MaxNodesPerCycle', Queues[depth1].get('MaxNodesPerCycle'))
if Queues[depth1].get('SystemDiskCategory') is not None:
self.add_query_param('Queues.' + str(depth1 + 1) + '.SystemDiskCategory', Queues[depth1].get('SystemDiskCategory'))
if Queues[depth1].get('MaxNodesInQueue') is not None:
self.add_query_param('Queues.' + str(depth1 + 1) + '.MaxNodesInQueue', Queues[depth1].get('MaxNodesInQueue'))
if Queues[depth1].get('SystemDiskSize') is not None:
self.add_query_param('Queues.' + str(depth1 + 1) + '.SystemDiskSize', Queues[depth1].get('SystemDiskSize'))
if Queues[depth1].get('QueueImageId') is not None:
self.add_query_param('Queues.' + str(depth1 + 1) + '.QueueImageId', Queues[depth1].get('QueueImageId'))
if Queues[depth1].get('InstanceType') is not None:
self.add_query_param('Queues.' + str(depth1 + 1) + '.InstanceType', Queues[depth1].get('InstanceType'))
if Queues[depth1].get('HostNamePrefix') is not None:
self.add_query_param('Queues.' + str(depth1 + 1) + '.HostNamePrefix', Queues[depth1].get('HostNamePrefix'))
if Queues[depth1].get('MinNodesPerCycle') is not None:
self.add_query_param('Queues.' + str(depth1 + 1) + '.MinNodesPerCycle', Queues[depth1].get('MinNodesPerCycle'))
def get_GrowIntervalInMinutes(self): # Integer
return self.get_query_params().get('GrowIntervalInMinutes')
def set_GrowIntervalInMinutes(self, GrowIntervalInMinutes): # Integer
self.add_query_param('GrowIntervalInMinutes', GrowIntervalInMinutes)
def get_GrowRatio(self): # Integer
return self.get_query_params().get('GrowRatio')
def set_GrowRatio(self, GrowRatio): # Integer
self.add_query_param('GrowRatio', GrowRatio)
| [
"[email protected]"
] | |
7db90b76ad8b3755f314e61da0f7b4ddf29bd341 | ce196aba0adde47ea2767eae1d7983a1ef548bb8 | /lambda单行表达式_0.py | dd751c9cc43d2361811c60ac8ee87e8da1b77fb7 | [] | no_license | xiang-daode/Python3_codes | 5d2639ffd5d65065b98d029e79b8f3608a37cf0b | 06c64f85ce2c299aef7f9311e9473e0203a05b09 | refs/heads/main | 2023-08-30T14:59:55.123128 | 2021-11-03T05:12:24 | 2021-11-03T05:12:24 | 333,632,892 | 0 | 2 | null | null | null | null | GB18030 | Python | false | false | 500 | py | #!/usr/bin/python
# -*- coding: cp936 -*-
b= [x for x in range(2,100) if not[y for y in range(2,int(x**0.5)) if not x%y]]
print("100以内的全部质数是:",b)
c= [y for y in range(2,36)]
print('2--35全部输出',c)
b= [x for x in range(2,24) if True]
print('2--23全部输出',b)
d= [x for x in range(2,24) if False]
print('无返回: ',d)
d= [x for x in range(1,25) if x%2]
print('奇数有:',d)
d= [x for x in range(1,25) if not x%5]
print('5的倍数有:',d)
| [
"[email protected]"
] | |
748b1a4c649433f18bc779c59fa3d4da540bf330 | bd185738ea6a74d1e76d9fc9d8cbc59f94990842 | /onadata/libs/pagination.py | f3aaf30a3bad15075443aa054f66f133a9d41638 | [
"BSD-2-Clause"
] | permissive | aondiaye/myhelpline | c4ad9e812b3a13c6c3c8bc65028a3d3567fd6a98 | d72120ee31b6713cbaec79f299f5ee8bcb7ea429 | refs/heads/master | 2020-12-22T05:32:59.576519 | 2019-10-29T08:52:55 | 2019-10-29T08:52:55 | 236,683,448 | 1 | 0 | NOASSERTION | 2020-01-28T07:50:18 | 2020-01-28T07:50:17 | null | UTF-8 | Python | false | false | 206 | py | from rest_framework.pagination import PageNumberPagination
class StandardPageNumberPagination(PageNumberPagination):
page_size = 1000
page_size_query_param = 'page_size'
max_page_size = 10000
| [
"[email protected]"
] | |
42533e87831e34941babde24267e52e9219a54f1 | 6fa13067f1f5f50a48f7a535184c8abfb0334012 | /old/fall2019/lecture8/sqlite_example2.py | e6d430e24ea7aacf5ae9ecffb2af1c1309060823 | [] | no_license | mkzia/eas503 | 89193b889c39fc5dbc81217e1c6c3d2581b6929d | 4d7b548cc7fa8e938842d390f3df710c23d5f8fb | refs/heads/master | 2023-09-04T06:56:49.796298 | 2023-09-01T02:05:16 | 2023-09-01T02:05:16 | 205,002,120 | 70 | 123 | null | null | null | null | UTF-8 | Python | false | false | 2,280 | py | import os
import sqlite3
from sqlite3 import Error
def create_connection(db_file):
conn = None
try:
conn = sqlite3.connect(db_file)
conn.execute("PRAGMA foreign_keys = 1")
except Error as e:
print(e)
return conn
def create_table(conn, create_table_sql):
try:
c = conn.cursor()
c.execute(create_table_sql)
except Error as e:
print(e)
def insert_depts(conn, values):
sql = ''' INSERT INTO Departments(DepartmentName)
VALUES(?) '''
cur = conn.cursor()
cur.execute(sql, values)
return cur.lastrowid
def insert_student(conn, values):
sql = ''' INSERT INTO Students(StudentName, DepartmentId, DateOfBirth)
VALUES(?,?,?) '''
cur = conn.cursor()
cur.execute(sql, values)
return cur.lastrowid
def select_all_students(conn):
cur = conn.cursor()
cur.execute("""SELECT * FROM Students INNER JOIN Departments USING(DepartmentId);""")
rows = cur.fetchall()
for row in rows:
print(row)
return rows
db_file = 'sample_data_py.db'
if os.path.exists(db_file):
os.remove(db_file)
create_table_departments_sql = """ CREATE TABLE [Departments] (
[DepartmentId] INTEGER NOT NULL PRIMARY KEY,
[DepartmentName] NVARCHAR(50) NULL
); """
create_table_students_sql = """ CREATE TABLE [Students] (
[StudentId] INTEGER PRIMARY KEY NOT NULL,
[StudentName] NVARCHAR(50) NOT NULL,
[DepartmentId] INTEGER NULL,
[DateOfBirth] DATE NULL,
FOREIGN KEY(DepartmentId) REFERENCES Departments(DepartmentId)
); """
conn = create_connection(db_file)
depts = ('IT', 'Physics', 'Arts', 'Math')
students = (
('Michael', 1, '1998-10-12'),
('John', 1, '1998-10-12'),
('Jack', 1, '1998-10-12'),
('Sara', 2, '1998-10-12'),
('Sally', 2, '1998-10-12'),
('Jena', None, '1998-10-12'),
('Nancy', 2, '1998-10-12'),
('Adam', 3, '1998-10-12'),
('Stevens', 3, '1998-10-12'),
('George', None, '1998-10-12')
)
with conn:
create_table(conn, create_table_departments_sql)
create_table(conn, create_table_students_sql)
for values in depts:
insert_depts(conn, (values, ))
for values in students:
insert_student(conn, values)
select_all_students(conn)
| [
"[email protected]"
] | |
37c1c3091247a88ff307abacfcd63fbc7b304bb5 | 8cce087dfd5c623c2f763f073c1f390a21838f0e | /projects/versioneer/test.py | 32fd79d45d8ce3d9b488d3feae6bca952e983ac1 | [
"Unlicense"
] | permissive | quinn-dougherty/python-on-nix | b2ae42761bccf7b3766999b27a4674310e276fd8 | 910d3f6554acd4a4ef0425ebccd31104dccb283c | refs/heads/main | 2023-08-23T11:57:55.988175 | 2021-09-24T05:55:00 | 2021-09-24T05:55:00 | 414,799,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18 | py | import versioneer
| [
"[email protected]"
] | |
fbcc54fea5b182b3e2383026e517dcaa50974606 | f20516958c39123f204e2bc442c91df7df1cc34a | /amqpstorm/exchange.py | 865a03bd8e75475a400c5bdf1d4068945cb5fa0b | [
"BSD-3-Clause"
] | permissive | bradparks/ReadableWebProxy | 3c2732cff64007afa8318b5b159616a529068322 | 81fbce3083471126942d2e2a298dba9eaf1092b1 | refs/heads/master | 2020-05-29T11:48:40.189530 | 2016-08-25T15:17:14 | 2016-08-25T15:17:14 | 66,568,996 | 0 | 0 | null | 2016-08-25T15:13:39 | 2016-08-25T15:13:39 | null | UTF-8 | Python | false | false | 5,689 | py | """AMQP-Storm Channel.Exchange."""
import logging
from pamqp.specification import Exchange as pamqp_exchange
from amqpstorm import compatibility
from amqpstorm.base import Handler
from amqpstorm.exception import AMQPInvalidArgument
LOGGER = logging.getLogger(__name__)
class Exchange(Handler):
"""AMQP Channel.exchange"""
__slots__ = []
def declare(self, exchange='', exchange_type='direct', passive=False,
durable=False, auto_delete=False, arguments=None):
"""Declare an Exchange.
:param str exchange:
:param str exchange_type:
:param bool passive:
:param bool durable:
:param bool auto_delete:
:param dict arguments:
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:rtype: dict
"""
if not compatibility.is_string(exchange):
raise AMQPInvalidArgument('exchange should be a string')
elif not compatibility.is_string(exchange_type):
raise AMQPInvalidArgument('exchange_type should be a string')
elif not isinstance(passive, bool):
raise AMQPInvalidArgument('passive should be a boolean')
elif not isinstance(durable, bool):
raise AMQPInvalidArgument('durable should be a boolean')
elif not isinstance(auto_delete, bool):
raise AMQPInvalidArgument('auto_delete should be a boolean')
elif arguments is not None and not isinstance(arguments, dict):
raise AMQPInvalidArgument('arguments should be a dict or None')
declare_frame = pamqp_exchange.Declare(exchange=exchange,
exchange_type=exchange_type,
passive=passive,
durable=durable,
auto_delete=auto_delete,
arguments=arguments)
return self._channel.rpc_request(declare_frame)
def delete(self, exchange='', if_unused=False):
"""Delete an Exchange.
:param str exchange:
:param bool if_unused:
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:rtype: dict
"""
if not compatibility.is_string(exchange):
raise AMQPInvalidArgument('exchange should be a string')
delete_frame = pamqp_exchange.Delete(exchange=exchange,
if_unused=if_unused)
return self._channel.rpc_request(delete_frame)
def bind(self, destination='', source='', routing_key='',
arguments=None):
"""Bind an Exchange.
:param str destination:
:param str source:
:param str routing_key:
:param dict arguments:
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:rtype: dict
"""
if not compatibility.is_string(destination):
raise AMQPInvalidArgument('destination should be a string')
elif not compatibility.is_string(source):
raise AMQPInvalidArgument('source should be a string')
elif not compatibility.is_string(routing_key):
raise AMQPInvalidArgument('routing_key should be a string')
elif arguments is not None and not isinstance(arguments, dict):
raise AMQPInvalidArgument('arguments should be a dict or None')
bind_frame = pamqp_exchange.Bind(destination=destination,
source=source,
routing_key=routing_key,
arguments=arguments)
return self._channel.rpc_request(bind_frame)
def unbind(self, destination='', source='', routing_key='',
arguments=None):
"""Unbind an Exchange.
:param str destination:
:param str source:
:param str routing_key:
:param dict arguments:
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:rtype: dict
"""
if not compatibility.is_string(destination):
raise AMQPInvalidArgument('destination should be a string')
elif not compatibility.is_string(source):
raise AMQPInvalidArgument('source should be a string')
elif not compatibility.is_string(routing_key):
raise AMQPInvalidArgument('routing_key should be a string')
elif arguments is not None and not isinstance(arguments, dict):
raise AMQPInvalidArgument('arguments should be a dict or None')
unbind_frame = pamqp_exchange.Unbind(destination=destination,
source=source,
routing_key=routing_key,
arguments=arguments)
return self._channel.rpc_request(unbind_frame)
| [
"[email protected]"
] | |
9372d896b7050b2587a7d13762a113a2e9af5b33 | cfa2417f07259e512a1bbface4f1f4ccd66502c6 | /test/test_Likelihood/test_LensLikelihood/test_base_lens_likelihood.py | 6ecb7556a125e7d02dc4226cdb44037390dd4b9d | [
"BSD-3-Clause"
] | permissive | jiwoncpark/hierArc | 3779439533d3c9c5fe2e687f4bdf737dfc7673e8 | 3f31c0ae7540387fe98f778035d415c3cff38756 | refs/heads/master | 2021-05-18T21:32:45.590675 | 2020-12-23T00:01:01 | 2020-12-23T00:01:01 | 251,431,028 | 0 | 0 | NOASSERTION | 2020-03-30T21:20:08 | 2020-03-30T21:20:08 | null | UTF-8 | Python | false | false | 4,045 | py | import numpy as np
import pytest
import unittest
from hierarc.Likelihood.LensLikelihood.base_lens_likelihood import LensLikelihoodBase
class TestLensLikelihood(object):
def setup(self):
np.random.seed(seed=41)
self.z_lens = 0.8
self.z_source = 3.0
num_samples = 10000
ddt_samples = np.random.normal(1, 0.1, num_samples)
dd_samples = np.random.normal(1, 0.1, num_samples)
self.likelihood_type_list = ['DdtGaussian',
'DdtDdKDE',
'DdtDdGaussian',
'DsDdsGaussian',
'DdtLogNorm',
'IFUKinCov',
'DdtHist',
'DdtHistKDE',
'DdtHistKin',
'DdtGaussKin',
'Mag',
'TDMag']
self.kwargs_likelihood_list = [{'ddt_mean': 1, 'ddt_sigma': 0.1},
{'dd_samples': dd_samples, 'ddt_samples': ddt_samples, 'kde_type': 'scipy_gaussian', 'bandwidth': 1},
{'ddt_mean': 1, 'ddt_sigma': 0.1, 'dd_mean': 1, 'dd_sigma': 0.1},
{'ds_dds_mean': 1, 'ds_dds_sigma': 0.1},
{'ddt_mu': 1, 'ddt_sigma': 0.1},
{'sigma_v_measurement': [1], 'j_model': [1], 'error_cov_measurement': [[1]], 'error_cov_j_sqrt': [[1]]},
{'ddt_samples': ddt_samples},
{'ddt_samples': ddt_samples},
{'ddt_samples': ddt_samples, 'sigma_v_measurement': [1], 'j_model': [1], 'error_cov_measurement': [[1]], 'error_cov_j_sqrt': [[1]]},
{'ddt_mean': 1, 'ddt_sigma': 0.1, 'sigma_v_measurement': [1], 'j_model': [1], 'error_cov_measurement': [[1]], 'error_cov_j_sqrt': [[1]]},
{'amp_measured': [1], 'cov_amp_measured': [[1]], 'mag_model': [1], 'cov_model': [[1]]},
{'time_delay_measured': [1.], 'cov_td_measured': [[1.]], 'amp_measured': [1., 1.], 'cov_amp_measured': [[1., 0], [0, 1.]], 'fermat_diff': [1.], 'mag_model': [1., 1.], 'cov_model': np.ones((3, 3))}
]
def test_log_likelihood(self):
for i, likelihood_type in enumerate(self.likelihood_type_list):
likelihood = LensLikelihoodBase(z_lens=self.z_lens, z_source=self.z_source, likelihood_type=likelihood_type,
**self.kwargs_likelihood_list[i])
print(likelihood_type)
logl = likelihood.log_likelihood(ddt=1, dd=1, aniso_scaling=None, sigma_v_sys_error=1, mu_intrinsic=1)
print(logl)
assert logl > -np.inf
def test_predictions_measurements(self):
for i, likelihood_type in enumerate(self.likelihood_type_list):
likelihood = LensLikelihoodBase(z_lens=self.z_lens, z_source=self.z_source, likelihood_type=likelihood_type,
**self.kwargs_likelihood_list[i])
ddt_measurement = likelihood.ddt_measurement()
likelihood.sigma_v_measurement(sigma_v_sys_error=0)
likelihood.sigma_v_prediction(ddt=1, dd=1, aniso_scaling=1)
assert len(ddt_measurement) == 2
class TestRaise(unittest.TestCase):
def test_raise(self):
with self.assertRaises(ValueError):
LensLikelihoodBase(z_lens=0.5, z_source=2, likelihood_type='BAD')
with self.assertRaises(ValueError):
likelihood = LensLikelihoodBase(z_lens=0.5, z_source=2, likelihood_type='DdtGaussian',
**{'ddt_mean': 1, 'ddt_sigma': 0.1})
likelihood.likelihood_type = 'BAD'
likelihood.log_likelihood(ddt=1, dd=1)
if __name__ == '__main__':
pytest.main()
| [
"[email protected]"
] | |
f8aa9cc771efab36e523016cc18be7dd92b8bf88 | 43ab33b2f50e47f5dbe322daa03c86a99e5ee77c | /test/test_study_group_values_controller_api.py | 671f7e874460bcd47617d26a420f26a608131ef4 | [] | no_license | Sage-Bionetworks/rcc-client | c770432de2d2950e00f7c7bd2bac22f3a81c2061 | 57c4a621aecd3a2f3f9faaa94f53b2727992a01a | refs/heads/main | 2023-02-23T05:55:39.279352 | 2021-01-21T02:06:08 | 2021-01-21T02:06:08 | 331,486,099 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,603 | py | # coding: utf-8
"""
nPhase REST Resource
REDCap REST API v.2 # noqa: E501
The version of the OpenAPI document: 2.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import rcc
from rcc.api.study_group_values_controller_api import StudyGroupValuesControllerApi # noqa: E501
from rcc.rest import ApiException
class TestStudyGroupValuesControllerApi(unittest.TestCase):
"""StudyGroupValuesControllerApi unit test stubs"""
def setUp(self):
self.api = rcc.api.study_group_values_controller_api.StudyGroupValuesControllerApi() # noqa: E501
def tearDown(self):
pass
def test_create11(self):
"""Test case for create11
Create new Study Group Value for current Study based on auth token provided # noqa: E501
"""
pass
def test_delete8(self):
"""Test case for delete8
Delete Study Group Value for current Study based on auth token provided # noqa: E501
"""
pass
def test_get_details8(self):
"""Test case for get_details8
Get specified Study Group Value details # noqa: E501
"""
pass
def test_get_list9(self):
"""Test case for get_list9
Get list of all Study Group Values for specified Study # noqa: E501
"""
pass
def test_update10(self):
"""Test case for update10
Update Study Group Value for current Study based on auth token provided # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.