blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0d2af27c7b63e8f21fc7c713d6004cfdb8063ea9 | 820a8e7ec541299f315ac43ddb3b41236e11cd33 | /demo/streaming/message_based_client.py | 8bba3e3493dd7f6aadd1d443706b2ee614e2f6f3 | [
"Apache-2.0"
] | permissive | hpsaturn/Autobahn | 5caba163ee976e8ddedadfb1a79139ba6014861b | f7bd44433f227130901440e768073e2afbf410bf | refs/heads/master | 2021-01-17T22:09:02.484645 | 2011-11-01T18:27:57 | 2011-11-01T18:27:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,904 | py | ###############################################################################
##
## Copyright 2011 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from ranstring import randomByteString
from twisted.internet import reactor
from autobahn.websocket import WebSocketClientFactory, WebSocketClientProtocol
MESSAGE_SIZE = 1 * 2**20
class MessageBasedHashClientProtocol(WebSocketClientProtocol):
"""
Message-based WebSockets client that generates stream of random octets
sent to WebSockets server as a sequence of messages. The server will
respond to us with the SHA-256 computed over each message. When
we receive response, we repeat by sending a new message.
"""
def sendOneMessage(self):
data = randomByteString(MESSAGE_SIZE)
self.sendMessage(data, binary = True)
def onOpen(self):
self.count = 0
self.sendOneMessage()
def onMessage(self, message, binary):
print "Digest for message %d computed by server: %s" % (self.count, message)
self.count += 1
self.sendOneMessage()
if __name__ == '__main__':
factory = WebSocketClientFactory()
factory.protocol = MessageBasedHashClientProtocol
reactor.connectTCP("localhost", 9000, factory)
reactor.run()
| [
"[email protected]"
] | |
a8a95539dac6b0b456a25ccbafca9321dd5c8b20 | 5e8832e7a49e121c4db1f57d036fe39b4250246a | /347_top_k_frequent_elements.py | f3f54cdd069c2acf438ed7c5694b526627821a0d | [] | no_license | shaniavina/Leetcode_Python | 9e80477794cd80e00a399d65b76088eea41d80d1 | 185bf1542265f5f4feca2e937d1d36a7bb4a5d2b | refs/heads/master | 2022-10-12T10:56:23.476219 | 2022-09-21T01:53:40 | 2022-09-21T01:53:40 | 52,979,850 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 506 | py | import collections
class Solution(object):
def topKFrequent(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[int]
"""
frq = collections.defaultdict(list)
for key, cnt in collections.Counter(nums).items():
frq[cnt].append(key)
res = []
for times in reversed(range(len(nums) + 1)):
res.extend(frq[times])
if len(res) >= k:
return res[:k]
return res[:k]
| [
"[email protected]"
] | |
097321d7ebb305770869f9fd631a4837eeeec702 | b87f66b13293782321e20c39aebc05defd8d4b48 | /maps/build/mayavi/enthought/mayavi/tools/sources.py | 2dcd4601cae2528189b5d19a6fb7ba72ba533b83 | [] | no_license | m-elhussieny/code | 5eae020932d935e4d724c2f3d16126a0d42ebf04 | 5466f5858dbd2f1f082fa0d7417b57c8fb068fad | refs/heads/master | 2021-06-13T18:47:08.700053 | 2016-11-01T05:51:06 | 2016-11-01T05:51:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49,570 | py | """
Data sources classes and their associated functions for mlab.
"""
# Author: Gael Varoquaux <[email protected]>
# Prabhu Ramachandran
# Copyright (c) 2007-2010, Enthought, Inc.
# License: BSD Style.
import operator
import numpy as np
from enthought.traits.api import (HasTraits, Instance, CArray, Either,
Bool, on_trait_change, NO_COMPARE)
from enthought.tvtk.api import tvtk
from enthought.tvtk.common import camel2enthought
from enthought.mayavi.sources.array_source import ArraySource
from enthought.mayavi.core.registry import registry
import tools
from engine_manager import engine_manager
__all__ = [ 'vector_scatter', 'vector_field', 'scalar_scatter',
'scalar_field', 'line_source', 'array2d_source', 'grid_source',
'open', 'triangular_mesh_source', 'vertical_vectors_source',
]
################################################################################
# A subclass of CArray that will accept floats and do a np.atleast_1d
################################################################################
class CArrayOrNumber(CArray):
def validate( self, object, name, value):
if operator.isNumberType(value):
value = np.atleast_1d(value)
return CArray.validate(self, object, name, value)
################################################################################
# `MlabSource` class.
################################################################################
class MlabSource(HasTraits):
"""
This class represents the base class for all mlab sources. These
classes allow a user to easily update the data without having to
recreate the whole pipeline.
"""
# The TVTK dataset we manage.
dataset = Instance(tvtk.DataSet)
# The Mayavi data source we manage.
m_data = Instance(HasTraits)
########################################
# Private traits.
# Disable the update when data is changed.
_disable_update = Bool(False)
######################################################################
# `MlabSource` interface.
######################################################################
def reset(self, **traits):
"""Function to create the data from input arrays etc.
This is to be used when the size of the arrays change or the
first time when the data is created. This regenerates the data
structures and will be slower in general.
"""
raise NotImplementedError()
def update(self):
"""Update the visualization.
This is to be called after the data of the visualization has
changed.
"""
if not self._disable_update:
self.dataset.modified()
md = self.m_data
if md is not None:
if hasattr(md, '_assign_attribute'):
md._assign_attribute.update()
md.data_changed = True
def set(self, trait_change_notify=True, **traits):
"""Shortcut for setting object trait attributes.
This is an overridden method that will make changing multiple
traits easier. This method is to be called when the arrays have
changed content but not in shape/size. In that case one must
call the `reset` method.
Parameters
----------
trait_change_notify : Boolean
If **True** (the default), then each value assigned may generate a
trait change notification. If **False**, then no trait change
notifications will be generated. (see also: trait_setq)
traits : list of key/value pairs
Trait attributes and their values to be set
Returns
-------
self
The method returns this object, after setting attributes.
"""
try:
self._disable_update = True
super(MlabSource, self).set(trait_change_notify, **traits)
finally:
self._disable_update = False
if trait_change_notify:
self.update()
return self
######################################################################
# Non-public interface.
######################################################################
def _m_data_changed(self, ds):
if not hasattr(ds, 'mlab_source'):
ds.add_trait('mlab_source', Instance(MlabSource))
ds.mlab_source = self
ArrayOrNone = Either(None, CArray, comparison_mode=NO_COMPARE)
ArrayNumberOrNone = Either(None, CArrayOrNumber, comparison_mode=NO_COMPARE)
################################################################################
# `MGlyphSource` class.
################################################################################
class MGlyphSource(MlabSource):
"""
This class represents a glyph data source for Mlab objects and
allows the user to set the x, y, z, scalar/vector attributes.
"""
# The x, y, z and points of the glyphs.
x = ArrayNumberOrNone
y = ArrayNumberOrNone
z = ArrayNumberOrNone
points = ArrayOrNone
# The scalars shown on the glyphs.
scalars = ArrayNumberOrNone
# The u, v, w components of the vector and the vectors.
u = ArrayNumberOrNone
v = ArrayNumberOrNone
w = ArrayNumberOrNone
vectors = ArrayOrNone
######################################################################
# `MlabSource` interface.
######################################################################
def reset(self, **traits):
"""Creates the dataset afresh or resets existing data source."""
# First convert numbers to arrays.
for name in ('x', 'y', 'z', 'u', 'v', 'w', 'scalars'):
if name in traits and traits[name] is not None:
traits[name] = np.atleast_1d(traits[name])
# First set the attributes without really doing anything since
# the notification handlers are not called.
self.set(trait_change_notify=False, **traits)
vectors = self.vectors
scalars = self.scalars
points = self.points
x, y, z = self.x, self.y, self.z
x = np.atleast_1d(x)
y = np.atleast_1d(y)
z = np.atleast_1d(z)
if 'points' in traits:
x=points[:,0].ravel()
y=points[:,1].ravel()
z=points[:,2].ravel()
self.set(x=x,y=y,z=z,trait_change_notify=False)
else:
points = np.c_[x.ravel(), y.ravel(), z.ravel()].ravel()
points.shape = (points.size/3, 3)
self.set(points=points, trait_change_notify=False)
u, v, w = self.u, self.v, self.w
if u is not None:
u = np.atleast_1d(u)
v = np.atleast_1d(v)
w = np.atleast_1d(w)
if len(u) > 0:
vectors = np.c_[u.ravel(), v.ravel(),
w.ravel()].ravel()
vectors.shape = (vectors.size/3, 3)
self.set(vectors=vectors, trait_change_notify=False)
if 'vectors' in traits:
u=vectors[:,0].ravel()
v=vectors[:,1].ravel()
w=vectors[:,2].ravel()
self.set(u=u,v=v,w=w,trait_change_notify=False)
else:
if u is not None and len(u) > 0:
vectors = np.c_[u.ravel(), v.ravel(),
w.ravel()].ravel()
vectors.shape = (vectors.size/3, 3)
self.set(vectors=vectors, trait_change_notify=False)
if vectors is not None and len(vectors) > 0:
assert len(points) == len(vectors)
if scalars is not None:
scalars = np.atleast_1d(scalars)
if len(scalars) > 0:
assert len(points) == len(scalars)
# Create the dataset.
polys = np.arange(0, len(points), 1, 'l')
polys = np.reshape(polys, (len(points), 1))
if self.dataset is None:
# Create new dataset if none exists
pd = tvtk.PolyData()
else:
# Modify existing one.
pd = self.dataset
pd.set(points=points, polys=polys)
if self.vectors is not None:
pd.point_data.vectors = self.vectors
pd.point_data.vectors.name = 'vectors'
if self.scalars is not None:
pd.point_data.scalars = self.scalars
pd.point_data.scalars.name = 'scalars'
self.dataset = pd
######################################################################
# Non-public interface.
######################################################################
def _x_changed(self, x):
x = np.atleast_1d(x)
self.points[:,0] = x
self.update()
def _y_changed(self, y):
y = np.atleast_1d(y)
self.points[:,1] = y
self.update()
def _z_changed(self, z):
z = np.atleast_1d(z)
self.points[:,2] = z
self.update()
def _u_changed(self, u):
u = np.atleast_1d(u)
self.vectors[:,0] = u
self.update()
def _v_changed(self, v):
v = np.atleast_1d(v)
self.vectors[:,1] = v
self.update()
def _w_changed(self, w):
w = np.atleast_1d(w)
self.vectors[:,2] = w
self.update()
def _points_changed(self, p):
p = np.atleast_2d(p)
self.dataset.points = p
self.update()
def _scalars_changed(self, s):
if s is None:
self.dataset.point_data.scalars = None
self.dataset.point_data.remove_array('scalars')
else:
s = np.atleast_1d(s)
self.dataset.point_data.scalars = s
self.dataset.point_data.scalars.name = 'scalars'
self.update()
def _vectors_changed(self, v):
self.dataset.point_data.vectors = v
self.dataset.point_data.vectors.name = 'vectors'
self.update()
################################################################################
# `MVerticalGlyphSource` class.
################################################################################
class MVerticalGlyphSource(MGlyphSource):
"""
This class represents a vertical glyph data source for Mlab objects
and allows the user to set the x, y, z, scalar attributes. The
vectors are created from the scalars to represent them in the
vertical direction.
"""
def reset(self, **traits):
"""Creates the dataset afresh or resets existing data source."""
if 'scalars' in traits:
s = traits['scalars']
if s is not None:
traits['u'] = traits['v'] = np.ones_like(s),
traits['w'] = s
super(MVerticalGlyphSource, self).reset(**traits)
def _scalars_changed(self, s):
self.dataset.point_data.scalars = s
self.dataset.point_data.scalars.name = 'scalars'
self.set(vectors=np.c_[np.ones_like(s),
np.ones_like(s),
s])
self.update()
################################################################################
# `MArraySource` class.
################################################################################
class MArraySource(MlabSource):
"""
This class represents an array data source for Mlab objects and
allows the user to set the x, y, z, scalar/vector attributes.
"""
# The x, y, z arrays for the volume.
x = ArrayOrNone
y = ArrayOrNone
z = ArrayOrNone
# The scalars shown on the glyphs.
scalars = ArrayOrNone
# The u, v, w components of the vector and the vectors.
u = ArrayOrNone
v = ArrayOrNone
w = ArrayOrNone
vectors = ArrayOrNone
######################################################################
# `MlabSource` interface.
######################################################################
def reset(self, **traits):
"""Creates the dataset afresh or resets existing data source."""
# First set the attributes without really doing anything since
# the notification handlers are not called.
self.set(trait_change_notify=False, **traits)
vectors = self.vectors
scalars = self.scalars
x, y, z = [np.atleast_3d(a) for a in self.x, self.y, self.z]
u, v, w = self.u, self.v, self.w
if 'vectors' in traits:
u=vectors[:,0].ravel()
v=vectors[:,1].ravel()
w=vectors[:,2].ravel()
self.set(u=u,v=v,w=w,trait_change_notify=False)
else:
if u is not None and len(u) > 0:
#vectors = np.concatenate([u[..., np.newaxis],
# v[..., np.newaxis],
# w[..., np.newaxis] ],
# axis=3)
vectors = np.c_[u.ravel(), v.ravel(),
w.ravel()].ravel()
vectors.shape = (u.shape[0] , u.shape[1], w.shape[2], 3)
self.set(vectors=vectors, trait_change_notify=False)
if vectors is not None and len(vectors) > 0 and scalars is not None:
assert len(scalars) == len(vectors)
if x.shape[0] <= 1:
dx = 1
else:
dx = x[1, 0, 0] - x[0, 0, 0]
if y.shape[1] <= 1:
dy = 1
else:
dy = y[0, 1, 0] - y[0, 0, 0]
if z.shape[2] <= 1:
dz = 1
else:
dz = z[0, 0, 1] - z[0, 0, 0]
if self.m_data is None:
ds = ArraySource(transpose_input_array=True)
else:
ds = self.m_data
old_scalar = ds.scalar_data
ds.set(vector_data=vectors,
origin=[x.min(), y.min(), z.min()],
spacing=[dx, dy, dz],
scalar_data=scalars)
if scalars is old_scalar:
ds._scalar_data_changed(scalars)
self.dataset = ds.image_data
self.m_data = ds
######################################################################
# Non-public interface.
######################################################################
@on_trait_change('[x, y, z]')
def _xyz_changed(self):
x, y, z = self.x, self.y, self.z
dx = x[1, 0, 0] - x[0, 0, 0]
dy = y[0, 1, 0] - y[0, 0, 0]
dz = z[0, 0, 1] - z[0, 0, 0]
ds = self.dataset
ds.origin = [x.min(), y.min(), z.min()]
ds.spacing = [dx, dy, dz]
if self.m_data is not None:
self.m_data.set(origin=ds.origin, spacing=ds.spacing)
self.update()
def _u_changed(self, u):
self.vectors[...,0] = u
self.m_data._vector_data_changed(self.vectors)
def _v_changed(self, v):
self.vectors[...,1] = v
self.m_data._vector_data_changed(self.vectors)
def _w_changed(self, w):
self.vectors[...,2] = w
self.m_data._vector_data_changed(self.vectors)
def _scalars_changed(self, s):
old = self.m_data.scalar_data
self.m_data.scalar_data = s
if old is s:
self.m_data._scalar_data_changed(s)
def _vectors_changed(self, v):
self.m_data.vector_data = v
################################################################################
# `MLineSource` class.
################################################################################
class MLineSource(MlabSource):
"""
This class represents a line data source for Mlab objects and
allows the user to set the x, y, z, scalar attributes.
"""
# The x, y, z and points of the glyphs.
x = ArrayOrNone
y = ArrayOrNone
z = ArrayOrNone
points = ArrayOrNone
# The scalars shown on the glyphs.
scalars = ArrayOrNone
######################################################################
# `MlabSource` interface.
######################################################################
def reset(self, **traits):
"""Creates the dataset afresh or resets existing data source."""
# First set the attributes without really doing anything since
# the notification handlers are not called.
self.set(trait_change_notify=False, **traits)
points = self.points
scalars = self.scalars
x, y, z = self.x, self.y, self.z
if 'points' in traits:
x=points[:,0].ravel()
y=points[:,1].ravel()
z=points[:,2].ravel()
self.set(x=x,y=y,z=z,trait_change_notify=False)
else:
points = np.c_[x.ravel(), y.ravel(), z.ravel()].ravel()
points.shape = (len(x), 3)
self.set(points=points, trait_change_notify=False)
# Create the dataset.
n_pts = len(points) - 1
lines = np.zeros((n_pts, 2), 'l')
lines[:,0] = np.arange(0, n_pts-0.5, 1, 'l')
lines[:,1] = np.arange(1, n_pts+0.5, 1, 'l')
if self.dataset is None:
pd = tvtk.PolyData()
else:
pd = self.dataset
# Avoid lines refering to non existing points: First set the
# lines to None, then set the points, then set the lines
# refering to the new points.
pd.set(lines=None)
pd.set(points=points)
pd.set(lines=lines)
if scalars is not None and len(scalars) > 0:
assert len(x) == len(scalars)
pd.point_data.scalars = np.ravel(scalars)
pd.point_data.scalars.name = 'scalars'
self.dataset = pd
######################################################################
# Non-public interface.
######################################################################
def _x_changed(self, x):
self.points[:,0] = x
self.update()
def _y_changed(self, y):
self.points[:,1] = y
self.update()
def _z_changed(self, z):
self.points[:,2] = z
self.update()
def _points_changed(self, p):
self.dataset.points = p
self.update()
def _scalars_changed(self, s):
self.dataset.point_data.scalars = s.ravel()
self.dataset.point_data.scalars.name = 'scalars'
self.update()
################################################################################
# `MArray2DSource` class.
################################################################################
class MArray2DSource(MlabSource):
"""
This class represents a 2D array data source for Mlab objects and
allows the user to set the x, y and scalar attributes.
"""
# The x, y values.
# Values of X and Y as None are accepted, in that case we would build
# values of X and Y automatically from the shape of scalars
x = ArrayOrNone
y = ArrayOrNone
# The scalars shown on the glyphs.
scalars = ArrayOrNone
# The masking array.
mask = ArrayOrNone
######################################################################
# `MlabSource` interface.
######################################################################
def reset(self, **traits):
"""Creates the dataset afresh or resets existing data source."""
# First set the attributes without really doing anything since
# the notification handlers are not called.
self.set(trait_change_notify=False, **traits)
x, y, mask = self.x, self.y, self.mask
scalars = self.scalars
# We may have used this without specifying x and y at all in
# which case we set them from the shape of scalars.
nx, ny = scalars.shape
#Build X and Y from shape of Scalars if they are none
if x is None and y is None:
x, y = np.mgrid[-nx/2.:nx/2, -ny/2.:ny/2]
if mask is not None and len(mask) > 0:
scalars[mask.astype('bool')] = np.nan
# The NaN trick only works with floats.
scalars = scalars.astype('float')
self.set(scalars=scalars, trait_change_notify=False)
z = np.array([0])
self.set(x=x, y=y, z=z, trait_change_notify=False)
# Do some magic to extract the first row/column, independently of
# the shape of x and y
x = np.atleast_2d(x.squeeze().T)[0, :].squeeze()
y = np.atleast_2d(y.squeeze())[0, :].squeeze()
if x.ndim == 0:
dx = 1
else:
dx = x[1] - x[0]
if y.ndim == 0:
dy = 1
else:
dy = y[1] - y[0]
if self.m_data is None:
ds = ArraySource(transpose_input_array=True)
else:
ds = self.m_data
old_scalar = ds.scalar_data
ds.set(origin=[x.min(), y.min(), 0],
spacing=[dx, dy, 1],
scalar_data=scalars)
if old_scalar is scalars:
ds._scalar_data_changed(scalars)
self.dataset = ds.image_data
self.m_data = ds
######################################################################
# Non-public interface.
######################################################################
@on_trait_change('[x, y]')
def _xy_changed(self):
x, y,scalars = self.x, self.y, self.scalars
nx, ny = scalars.shape
if x is None or y is None:
x, y = np.mgrid[-nx/2.:nx/2, -ny/2.:ny/2]
self.trait_setq(x=x,y=y)
x = np.atleast_2d(x.squeeze().T)[0, :].squeeze()
y = np.atleast_2d(y.squeeze())[0, :].squeeze()
dx = x[1] - x[0]
dy = y[1] - y[0]
ds = self.dataset
ds.origin = [x.min(), y.min(), 0]
ds.spacing = [dx, dy, 1]
if self.m_data is not None:
self.m_data.set(origin=ds.origin, spacing=ds.spacing)
self.update()
def _scalars_changed(self, s):
mask = self.mask
if mask is not None and len(mask) > 0:
s[mask.astype('bool')] = np.nan
# The NaN tric only works with floats.
s = s.astype('float')
self.set(scalars=s, trait_change_notify=False)
old = self.m_data.scalar_data
self.m_data.scalar_data = s
if s is old:
self.m_data._scalar_data_changed(s)
################################################################################
# `MGridSource` class.
################################################################################
class MGridSource(MlabSource):
"""
This class represents a grid source for Mlab objects and
allows the user to set the x, y, scalar attributes.
"""
# The x, y, z and points of the grid.
x = ArrayOrNone
y = ArrayOrNone
z = ArrayOrNone
points = ArrayOrNone
# The scalars shown on the glyphs.
scalars = ArrayOrNone
######################################################################
# `MlabSource` interface.
######################################################################
def reset(self, **traits):
"""Creates the dataset afresh or resets existing data source."""
# First set the attributes without really doing anything since
# the notification handlers are not called.
self.set(trait_change_notify=False, **traits)
points = self.points
scalars = self.scalars
x, y, z = self.x, self.y, self.z
assert len(x.shape) == 2, "Array x must be 2 dimensional."
assert len(y.shape) == 2, "Array y must be 2 dimensional."
assert len(z.shape) == 2, "Array z must be 2 dimensional."
assert x.shape == y.shape, "Arrays x and y must have same shape."
assert y.shape == z.shape, "Arrays y and z must have same shape."
#Points in the grid source will always be created using x,y,z
#Changing of points is not allowed because it cannot be used to modify values of x,y,z
nx, ny = x.shape
points = np.c_[x.ravel(), y.ravel(), z.ravel()].ravel()
points.shape = (nx*ny, 3)
self.set(points=points, trait_change_notify=False)
i, j = np.mgrid[0:nx-1,0:ny-1]
i, j = np.ravel(i), np.ravel(j)
t1 = i*ny+j, (i+1)*ny+j, (i+1)*ny+(j+1)
t2 = (i+1)*ny+(j+1), i*ny+(j+1), i*ny+j
nt = len(t1[0])
triangles = np.zeros((nt*2, 3), 'l')
triangles[0:nt,0], triangles[0:nt,1], triangles[0:nt,2] = t1
triangles[nt:,0], triangles[nt:,1], triangles[nt:,2] = t2
if self.dataset is None:
pd = tvtk.PolyData()
else:
pd = self.dataset
pd.set(points=points, polys=triangles)
if scalars is not None and len(scalars) > 0:
if not scalars.flags.contiguous:
scalars = scalars.copy()
self.set(scalars=scalars, trait_change_notify=False)
assert x.shape == scalars.shape
pd.point_data.scalars = scalars.ravel()
pd.point_data.scalars.name = 'scalars'
self.dataset = pd
######################################################################
# Non-public interface.
######################################################################
def _x_changed(self, x):
self.trait_setq(x=x);
self.points[:,0] = x.ravel()
self.update()
def _y_changed(self, y):
self.trait_setq(y=y)
self.points[:,1] = y.ravel()
self.update()
def _z_changed(self, z):
self.trait_setq(z=z)
self.points[:,2] = z.ravel()
self.update()
def _points_changed(self, p):
self.dataset.points = p
self.update()
def _scalars_changed(self, s):
self.dataset.point_data.scalars = s.ravel()
self.dataset.point_data.scalars.name = 'scalars'
self.update()
################################################################################
# `MTriangularMeshSource` class.
################################################################################
class MTriangularMeshSource(MlabSource):
"""
This class represents a triangular mesh source for Mlab objects and
allows the user to set the x, y, scalar attributes.
"""
# The x, y, z and points of the grid.
x = ArrayOrNone
y = ArrayOrNone
z = ArrayOrNone
points = ArrayOrNone
triangles = ArrayOrNone
# The scalars shown on the glyphs.
scalars = ArrayOrNone
######################################################################
# `MlabSource` interface.
######################################################################
def reset(self, **traits):
"""Creates the dataset afresh or resets existing data source."""
# First set the attributes without really doing anything since
# the notification handlers are not called.
self.set(trait_change_notify=False, **traits)
points = self.points
scalars = self.scalars
x, y, z = self.x, self.y, self.z
points = np.c_[x.ravel(), y.ravel(), z.ravel()].ravel()
points.shape = (points.size/3, 3)
self.set(points=points, trait_change_notify=False)
triangles = self.triangles
assert triangles.shape[1] == 3, \
"The shape of the triangles array must be (X, 3)"
assert triangles.max() < len(points), \
"The triangles indices must be smaller that the number of points"
assert triangles.min() >= 0, \
"The triangles indices must be positive or null"
if self.dataset is None:
pd = tvtk.PolyData()
else:
pd = self.dataset
# Set the points first, and the triangles after: so that the
# polygone can refer to the right points, in the polydata.
pd.set(points=points)
pd.set(polys=triangles)
if (not 'scalars' in traits
and scalars is not None
and scalars.shape != x.shape):
# The scalars where set probably automatically to z, by the
# factory. We need to reset them, as the size has changed.
scalars = z
if scalars is not None and len(scalars) > 0:
if not scalars.flags.contiguous:
scalars = scalars.copy()
self.set(scalars=scalars, trait_change_notify=False)
assert x.shape == scalars.shape
pd.point_data.scalars = scalars.ravel()
pd.point_data.scalars.name = 'scalars'
self.dataset = pd
######################################################################
# Non-public interface.
######################################################################
def _x_changed(self, x):
self.trait_setq(x=x);
self.points[:,0] = x.ravel()
self.update()
def _y_changed(self, y):
self.trait_setq(y=y)
self.points[:,1] = y.ravel()
self.update()
def _z_changed(self, z):
self.trait_setq(z=z)
self.points[:,2] = z.ravel()
self.update()
def _points_changed(self, p):
self.dataset.points = p
self.update()
def _scalars_changed(self, s):
self.dataset.point_data.scalars = s.ravel()
self.dataset.point_data.scalars.name = 'scalars'
self.update()
def _triangles_changed(self, triangles):
if triangles.min() < 0:
raise ValueError, 'The triangles array has negative values'
if triangles.max() > self.x.size:
raise ValueError, 'The triangles array has values larger than' \
'the number of points'
self.dataset.polys = triangles
self.update()
############################################################################
# Argument processing
############################################################################
def convert_to_arrays(args):
""" Converts a list of iterables to a list of arrays or callables,
if needed.
"""
args = list(args)
for index, arg in enumerate(args):
if not callable(arg):
if not hasattr(arg, 'shape'):
arg = np.atleast_1d(np.array(arg))
if np.any(np.isinf(arg)):
raise ValueError("""Input array contains infinite values
You can remove them using: a[np.isinf(a)] = np.nan
""")
args[index] = arg
return args
def process_regular_vectors(*args):
""" Converts different signatures to (x, y, z, u, v, w). """
args = convert_to_arrays(args)
if len(args)==3:
u, v, w = [np.atleast_3d(a) for a in args]
assert len(u.shape)==3, "3D array required"
x, y, z = np.indices(u.shape)
elif len(args)==6:
x, y, z, u, v, w = args
elif len(args)==4:
x, y, z, f = args
if not callable(f):
raise ValueError, "When 4 arguments are provided, the fourth must be a callable"
u, v, w = f(x, y, z)
else:
raise ValueError, "wrong number of arguments"
assert ( x.shape == y.shape and
y.shape == z.shape and
u.shape == z.shape and
v.shape == u.shape and
w.shape == v.shape ), "argument shape are not equal"
return x, y, z, u, v, w
def process_regular_scalars(*args):
""" Converts different signatures to (x, y, z, s). """
args = convert_to_arrays(args)
if len(args)==1:
s = np.atleast_3d(args[0])
assert len(s.shape)==3, "3D array required"
x, y, z = np.indices(s.shape)
elif len(args)==3:
x, y, z = args
s = None
elif len(args)==4:
x, y, z, s = args
if callable(s):
s = s(x, y, z)
else:
raise ValueError, "wrong number of arguments"
assert ( x.shape == y.shape and
y.shape == z.shape and
( s is None
or s.shape == z.shape ) ), "argument shape are not equal"
return x, y, z, s
def process_regular_2d_scalars(*args, **kwargs):
""" Converts different signatures to (x, y, s). """
args = convert_to_arrays(args)
for index, arg in enumerate(args):
if not callable(arg):
args[index] = np.atleast_2d(arg)
if len(args)==1:
s = args[0]
assert len(s.shape)==2, "2D array required"
x, y = np.indices(s.shape)
elif len(args)==3:
x, y, s = args
if callable(s):
s = s(x, y)
else:
raise ValueError, "wrong number of arguments"
assert len(s.shape)==2, "2D array required"
if 'mask' in kwargs:
mask = kwargs['mask']
s[mask.astype('bool')] = np.nan
# The NaN tric only works with floats.
s = s.astype('float')
return x, y, s
############################################################################
# Sources
############################################################################
def vector_scatter(*args, **kwargs):
""" Creates scattered vector data.
**Function signatures**::
vector_scatter(u, v, w, ...)
vector_scatter(x, y, z, u, v, w, ...)
vector_scatter(x, y, z, f, ...)
If only 3 arrays u, v, w are passed the x, y and z arrays are assumed to be
made from the indices of vectors.
If 4 positional arguments are passed the last one must be a callable, f,
that returns vectors.
**Keyword arguments**:
:name: the name of the vtk object created.
:scalars: optional scalar data.
:figure: optionally, the figure on which to add the data source.
If None, the source is not added to any figure, and will
be added automatically by the modules or
filters. If False, no figure will be created by modules
or filters applied to the source: the source can only
be used for testing, or numerical algorithms, not
visualization."""
x, y, z, u, v, w = process_regular_vectors(*args)
scalars = kwargs.pop('scalars', None)
if scalars is not None:
scalars = np.ravel(scalars)
name = kwargs.pop('name', 'VectorScatter')
data_source = MGlyphSource()
data_source.reset(x=x, y=y, z=z, u=u, v=v, w=w, scalars=scalars)
ds = tools.add_dataset(data_source.dataset, name, **kwargs)
data_source.m_data = ds
return ds
def vector_field(*args, **kwargs):
""" Creates vector field data.
**Function signatures**::
vector_field(u, v, w, ...)
vector_field(x, y, z, u, v, w, ...)
vector_field(x, y, z, f, ...)
If only 3 arrays u, v, w are passed the x, y and z arrays are assumed to be
made from the indices of vectors.
If the x, y and z arrays are passed, they should have been generated
by `numpy.mgrid` or `numpy.ogrid`. The function builds a scalar field
assuming the points are regularily spaced on an orthogonal grid.
If 4 positional arguments are passed the last one must be a callable, f,
that returns vectors.
**Keyword arguments**:
:name: the name of the vtk object created.
:scalars: optional scalar data.
:figure: optionally, the figure on which to add the data source.
If None, the source is not added to any figure, and will
be added automatically by the modules or
filters. If False, no figure will be created by modules
or filters applied to the source: the source can only
be used for testing, or numerical algorithms, not
visualization."""
if len(args) == 3:
x = y = z = np.atleast_3d(1)
u, v, w = [np.atleast_3d(a) for a in args]
else:
x, y, z, u, v, w = [np.atleast_3d(a)
for a in process_regular_vectors(*args)]
scalars = kwargs.pop('scalars', None)
if scalars is not None:
scalars = np.atleast_3d(scalars)
data_source = MArraySource()
data_source.reset(x=x, y=y, z=z, u=u, v=v, w=w, scalars=scalars)
name = kwargs.pop('name', 'VectorField')
return tools.add_dataset(data_source.m_data, name, **kwargs)
def scalar_scatter(*args, **kwargs):
"""
Creates scattered scalar data.
**Function signatures**::
scalar_scatter(s, ...)
scalar_scatter(x, y, z, s, ...)
scalar_scatter(x, y, z, s, ...)
scalar_scatter(x, y, z, f, ...)
If only 1 array s is passed the x, y and z arrays are assumed to be
made from the indices of vectors.
If 4 positional arguments are passed the last one must be an array s, or
a callable, f, that returns an array.
**Keyword arguments**:
:name: the name of the vtk object created.
:figure: optionally, the figure on which to add the data source.
If None, the source is not added to any figure, and will
be added automatically by the modules or
filters. If False, no figure will be created by modules
or filters applied to the source: the source can only
be used for testing, or numerical algorithms, not
visualization."""
x, y, z, s = process_regular_scalars(*args)
if s is not None:
s = np.ravel(s)
data_source = MGlyphSource()
data_source.reset(x=x, y=y, z=z, scalars=s)
name = kwargs.pop('name', 'ScalarScatter')
ds = tools.add_dataset(data_source.dataset, name, **kwargs)
data_source.m_data = ds
return ds
def scalar_field(*args, **kwargs):
"""
Creates a scalar field data.
**Function signatures**::
scalar_field(s, ...)
scalar_field(x, y, z, s, ...)
scalar_field(x, y, z, f, ...)
If only 1 array s is passed the x, y and z arrays are assumed to be
made from the indices of arrays.
If the x, y and z arrays are passed they are supposed to have been
generated by `numpy.mgrid`. The function builds a scalar field assuming
the points are regularily spaced.
If 4 positional arguments are passed the last one must be an array s, or
a callable, f, that returns an array.
**Keyword arguments**:
:name: the name of the vtk object created.
:figure: optionally, the figure on which to add the data source.
If None, the source is not added to any figure, and will
be added automatically by the modules or
filters. If False, no figure will be created by modules
or filters applied to the source: the source can only
be used for testing, or numerical algorithms, not
visualization."""
if len(args) == 1:
# Be lazy, don't create three big arrays for 1 input array. The
# MArraySource is clever-enough to handle flat arrays
x = y = z = np.atleast_1d(1)
s = args[0]
else:
x, y, z, s = process_regular_scalars(*args)
data_source = MArraySource()
data_source.reset(x=x, y=y, z=z, scalars=s)
name = kwargs.pop('name', 'ScalarField')
return tools.add_dataset(data_source.m_data, name, **kwargs)
def line_source(*args, **kwargs):
"""
Creates line data.
**Function signatures**::
line_source(x, y, z, ...)
line_source(x, y, z, s, ...)
line_source(x, y, z, f, ...)
If 4 positional arguments are passed the last one must be an array s, or
a callable, f, that returns an array.
**Keyword arguments**:
:name: the name of the vtk object created.
:figure: optionally, the figure on which to add the data source.
If None, the source is not added to any figure, and will
be added automatically by the modules or
filters. If False, no figure will be created by modules
or filters applied to the source: the source can only
be used for testing, or numerical algorithms, not
visualization."""
if len(args)==1:
raise ValueError, "wrong number of arguments"
x, y, z, s = process_regular_scalars(*args)
data_source = MLineSource()
data_source.reset(x=x, y=y, z=z, scalars=s)
name = kwargs.pop('name', 'LineSource')
ds = tools.add_dataset(data_source.dataset, name, **kwargs)
data_source.m_data = ds
return ds
def array2d_source(*args, **kwargs):
"""
Creates structured 2D data from a 2D array.
**Function signatures**::
array2d_source(s, ...)
array2d_source(x, y, s, ...)
array2d_source(x, y, f, ...)
If 3 positional arguments are passed the last one must be an array s,
or a callable, f, that returns an array. x and y give the
coordinnates of positions corresponding to the s values.
x and y can be 1D or 2D arrays (such as returned by numpy.ogrid or
numpy.mgrid), but the points should be located on an orthogonal grid
(possibly non-uniform). In other words, all the points sharing a same
index in the s array need to have the same x or y value.
If only 1 array s is passed the x and y arrays are assumed to be
made from the indices of arrays, and an uniformly-spaced data set is
created.
**Keyword arguments**:
:name: the name of the vtk object created.
:figure: optionally, the figure on which to add the data source.
If None, the source is not added to any figure, and will
be added automatically by the modules or
filters. If False, no figure will be created by modules
or filters applied to the source: the source can only
be used for testing, or numerical algorithms, not
visualization.
:mask: Mask points specified in a boolean masking array.
"""
data_source = MArray2DSource()
mask = kwargs.pop('mask', None)
if len(args) == 1 :
args = convert_to_arrays(args)
s = np.atleast_2d(args[0])
data_source.reset(scalars=s, mask=mask)
else:
x, y, s = process_regular_2d_scalars(*args, **kwargs)
data_source.reset(x=x, y=y, scalars=s, mask=mask)
name = kwargs.pop('name', 'Array2DSource')
return tools.add_dataset(data_source.m_data, name, **kwargs)
def grid_source(x, y, z, **kwargs):
"""
Creates 2D grid data.
x, y, z are 2D arrays giving the positions of the vertices of the surface.
The connectivity between these points is implied by the connectivity on
the arrays.
For simple structures (such as orthogonal grids) prefer the array2dsource
function, as it will create more efficient data structures.
**Keyword arguments**:
:name: the name of the vtk object created.
:scalars: optional scalar data.
:figure: optionally, the figure on which to add the data source.
If None, the source is not added to any figure, and will
be added automatically by the modules or
filters. If False, no figure will be created by modules
or filters applied to the source: the source can only
be used for testing, or numerical algorithms, not
visualization.
"""
scalars = kwargs.pop('scalars', None)
if scalars is None:
scalars = z
x, y, z, scalars = convert_to_arrays((x, y, z, scalars))
data_source = MGridSource()
data_source.reset(x=x, y=y, z=z, scalars=scalars)
name = kwargs.pop('name', 'GridSource')
ds = tools.add_dataset(data_source.dataset, name, **kwargs)
data_source.m_data = ds
return ds
def vertical_vectors_source(*args, **kwargs):
"""
Creates a set of vectors pointing upward, useful eg for bar graphs.
**Function signatures**::
vertical_vectors_source(s, ...)
vertical_vectors_source(x, y, s, ...)
vertical_vectors_source(x, y, f, ...)
vertical_vectors_source(x, y, z, s, ...)
vertical_vectors_source(x, y, z, f, ...)
If only one positional argument is passed, it can be a 1D, 2D, or 3D
array giving the length of the vectors. The positions of the data
points are deducted from the indices of array, and an
uniformly-spaced data set is created.
If 3 positional arguments (x, y, s) are passed the last one must be
an array s, or a callable, f, that returns an array. x and y give the
2D coordinates of positions corresponding to the s values. The
vertical position is assumed to be 0.
If 4 positional arguments (x, y, z, s) are passed, the 3 first are
arrays giving the 3D coordinates of the data points, and the last one
is an array s, or a callable, f, that returns an array giving the
data value.
**Keyword arguments**:
:name: the name of the vtk object created.
:figure: optionally, the figure on which to add the data source.
If None, the source is not added to any figure, and will
be added automatically by the modules or
filters. If False, no figure will be created by modules
or filters applied to the source: the source can only
be used for testing, or numerical algorithms, not
visualization.
"""
if len(args) == 3:
x, y, data = args
if np.isscalar(x):
z = 0
else:
z = np.zeros_like(x)
args = (x, y, z, data)
x, y, z, s = process_regular_scalars(*args)
if s is not None:
s = np.ravel(s)
data_source = MVerticalGlyphSource()
data_source.reset(x=x, y=y, z=z, scalars=s)
name = kwargs.pop('name', 'VerticalVectorsSource')
ds = tools.add_dataset(data_source.dataset, name, **kwargs)
data_source.m_data = ds
return ds
def triangular_mesh_source(x, y, z, triangles, **kwargs):
"""
Creates 2D mesh by specifying points and triangle connectivity.
x, y, z are 2D arrays giving the positions of the vertices of the surface.
The connectivity between these points is given by listing triplets of
vertices inter-connected. These vertices are designed by there
position index.
**Keyword arguments**:
:name: the name of the vtk object created.
:scalars: optional scalar data.
:figure: optionally, the figure on which to add the data source.
If None, the source is not added to any figure, and will
be added automatically by the modules or
filters. If False, no figure will be created by modules
or filters applied to the source: the source can only
be used for testing, or numerical algorithms, not
visualization.
"""
x, y, z, triangles = convert_to_arrays((x, y, z, triangles))
if triangles.min() < 0:
raise ValueError, 'The triangles array has negative values'
if triangles.max() > x.size:
raise ValueError, 'The triangles array has values larger than' \
'the number of points'
scalars = kwargs.pop('scalars', None)
if scalars is None:
scalars = z
data_source = MTriangularMeshSource()
data_source.reset(x=x, y=y, z=z, triangles=triangles, scalars=scalars)
name = kwargs.pop('name', 'TriangularMeshSource')
ds = tools.add_dataset(data_source.dataset, name, **kwargs)
data_source.m_data = ds
return ds
def open(filename, figure=None):
"""Open a supported data file given a filename. Returns the source
object if a suitable reader was found for the file.
"""
if figure is None:
engine = tools.get_engine()
else:
engine = engine_manager.find_figure_engine(figure)
engine.current_scene = figure
src = engine.open(filename)
return src
############################################################################
# Automatically generated sources from registry.
############################################################################
def _create_data_source(metadata):
"""Creates a data source and adds it to the mayavi engine given
metadata of the source. Returns the created source.
"""
factory = metadata.get_callable()
src = factory()
engine = tools.get_engine()
engine.add_source(src)
return src
def _make_functions(namespace):
"""Make the automatic functions and add them to the namespace."""
for src in registry.sources:
if len(src.extensions) == 0:
func_name = camel2enthought(src.id)
if func_name.endswith('_source'):
func_name = func_name[:-7]
func = lambda metadata=src: _create_data_source(metadata)
func.__doc__ = src.help
func.__name__ = func_name
# Inject function into the namespace and __all__.
namespace[func_name] = func
__all__.append(func_name)
_make_functions(locals())
| [
"[email protected]"
] | |
ddf31aa0247b5bd2963cdb3c8159a26bb33c77e0 | fe039f62337b210061bfd7291000c5fa406fd0ff | /list/webapp/models.py | 4a9bf3ad037982d3daaeb33bc2a410482cb276bf | [] | no_license | Erlan1998/python_group_7_homework_45_Erlan_Kurbanaliev | a5f5956490d778341e4958fe6740ab6e1a395f45 | 4f860b561f046413bbc9ab8f587b8f7c40b8c23a | refs/heads/main | 2023-05-07T00:16:28.530637 | 2021-03-04T12:32:36 | 2021-03-04T12:32:36 | 342,240,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 692 | py | from django.db import models
status_choices = [('new', 'Новая'), ('in_progress', 'В процессе'), ('done', 'Сделано')]
class List(models.Model):
description = models.TextField(max_length=200, null=False, blank=False)
detailed_description = models.TextField(max_length=3000, null=True, blank=True)
status = models.CharField(max_length=120, null=False, blank=False, choices=status_choices)
updated_at = models.DateField(null=True, blank=True)
class Meta:
db_table = 'Lists'
verbose_name = 'Задача'
verbose_name_plural = 'Задачи'
def __str__(self):
return f'{self.id}. {self.status}: {self.description}' | [
"[email protected]"
] | |
f53ac3f6c538688800be418ff966c4e0919f43ec | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_248/ch81_2020_04_12_22_18_46_334181.py | 8e1cf83d9390b88d1079f2c7a2e6970a6b74812b | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | def interseccao_valores(dic1,dic2):
v=dic1.values
v2=dic2.values
lista=[]
if v1==v2:
lista.append(v1)
return lista | [
"[email protected]"
] | |
505e5e0ce0cb191a5ec404f1e81be10da0578bf5 | 268d9c21243e12609462ebbd6bf6859d981d2356 | /Python/python_stack/Django/Dojo_ninjas/main/apps/dojo_ninjas/migrations/0002_dojo_desc.py | 58a3322cbefd8d01f3ac70e8cbe91f35e5cc03d2 | [] | no_license | dkang417/cdj | f840962c3fa8e14146588eeb49ce7dbd08b8ff4c | 9966b04af1ac8a799421d97a9231bf0a0a0d8745 | refs/heads/master | 2020-03-10T03:29:05.053821 | 2018-05-23T02:02:07 | 2018-05-23T02:02:07 | 129,166,089 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2018-05-08 14:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dojo_ninjas', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='dojo',
name='desc',
field=models.TextField(null=True),
),
]
| [
"[email protected]"
] | |
ef52298f256957366a62065c6bbda48bbbfa0568 | 8efd8bcd3945d88370f6203e92b0376ca6b41c87 | /problems100_200/131_Palindrome_Partitioning.py | 4fd4acc10c6135bdd9be20744a848feda4634b56 | [] | no_license | Provinm/leetcode_archive | 732ad1ef5dcdfdde6dd5a33522e86f7e24ae2db5 | 3e72dcaa579f4ae6f587898dd316fce8189b3d6a | refs/heads/master | 2021-09-21T08:03:31.427465 | 2018-08-22T15:58:30 | 2018-08-22T15:58:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 834 | py | #coding=utf-8
'''
131. Palindrome Partitioning
Given a string s, partition s such that every substring of the partition is a palindrome.
Return all possible palindrome partitioning of s.
For example, given s = "aab",
Return
[
["aa","b"],
["a","a","b"]
]
'''
class Solution:
def partition(self, s):
"""
:type s: str
:rtype: List[List[str]]
"""
if not s:
return [[]]
res = []
for idx, item in enumerate(s):
cur_s = s[:idx+1]
if self.is_p(cur_s):
r = self.partition(s[idx+1:])
for sub_item in r:
res.append([cur_s] + sub_item)
return res
def is_p(self, s):
return s == s[::-1]
s = Solution()
r = s.partition("aab")
print(r)
## 深度优先算法
| [
"[email protected]"
] | |
22082fac0984c7728a7ac71f5666b9a60a1c7171 | 15cace5f904c5c2389ca3cc02b5ff1fc029c7651 | /parsing/management/commands/scraper/test.py | cc3b4a7431673dd5f8c4b261fd80953be86ccffa | [] | no_license | ghostnoop/django-youtube-parser-asyncio | fb7146e788dfe5986ad31a45a5d5b1da918583c6 | 631bc4ddc0eed0407f09a810c334a0e9d8d0ed7a | refs/heads/main | 2023-03-26T12:57:32.248097 | 2021-03-25T11:02:54 | 2021-03-25T11:02:54 | 341,303,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | main_list = [i for i in range(100)]
size = len(main_list) // 4
a = main_list[:size]
b = (main_list[size:size * 2])
c = (main_list[size * 2:size * 3])
d = (main_list[size * 3:])
| [
"[email protected]"
] | |
fbf8ce4a8f1a8fa531b08275055edceb9aa982a6 | bad44a92fb338260f9c077689d7fa5472526c3fe | /src/python/nnfusion/jit.py | 6fd2745e160f063b2ff9cf6c47e345239698423f | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | microsoft/nnfusion | ebc4c06331b8e93dbf5e176e5ecd3382e322ff21 | bd4f6feed217a43c9ee9be16f02fa8529953579a | refs/heads/main | 2023-08-25T17:41:37.517769 | 2022-09-16T05:59:01 | 2022-09-16T05:59:01 | 252,069,995 | 872 | 157 | MIT | 2023-07-19T03:06:21 | 2020-04-01T04:15:38 | C++ | UTF-8 | Python | false | false | 6,923 | py | import copy
import functools
from inspect import isfunction, ismethod, isclass
import torch
from .jit_utils import TorchModule, get_signature
from .runtime import NNFusionRT
from .config import Config
def is_method_of_instance(obj, cls):
return ismethod(obj) and isinstance(obj.__self__, cls)
def is_subclass_of_cls(obj, cls):
return isclass(obj) and issubclass(obj, cls)
def get_nrt_forward(obj, signature, config, outputs, *inputs,
is_method=False):
"""
Return a wrapped forward function that using nnf as runtime
"""
if not isinstance(obj, torch.nn.Module):
raise AssertionError(
"Internal bug, please report to "
"https://github.com/microsoft/nnfusion"
)
output_is_tensor = isinstance(outputs, torch.Tensor)
if output_is_tensor:
outputs = [outputs]
nnf = NNFusionRT(obj, config, signature)
nnf.compile(inputs, outputs)
# TODO free outputs and only save desc?
def forward(*inputs):
results = [
torch.empty_like(output)
for output in outputs
]
if is_method:
obj, *inputs = inputs
nnf.run_method(obj, inputs, results)
else:
inputs = list(inputs)
nnf.run(inputs, results)
if output_is_tensor:
return results[0]
return results
return forward
def nrt_forward(obj, *inputs, config=None, signature=None, is_method=False):
if signature is None:
signature = get_signature(obj)
if hasattr(obj, '_orig_forward'):
# shallow copy is needed to avoid recursion
# call instance forward -> call nnf_forward -> call instance forward
obj_ = copy.copy(obj)
obj_.forward = obj._orig_forward
obj = obj_
outputs = obj(*inputs)
def jit_class_method_using_decorator():
"""
Check if obj is a class method with @nnfusion.jit decorator.
The cases of decorating class method with the @ symbol or applying it
as function are different.
"""
return isinstance(inputs[0], torch.nn.Module)
if jit_class_method_using_decorator():
self, *inputs = inputs
# shallow copy is needed to avoid recursion when using jit as decorator:
# export onnx -> call forward to trace -> call nnf jit func -> export onnx
self_ = copy.copy(self)
def forward(*args):
if forward.first_call:
forward.first_call = False
return obj(self, *args)
# handle the case that jit target function will call `forward`
return self.forward(*args)
forward.first_call = True
self_.forward = forward
return get_nrt_forward(self_, signature, config, outputs,
*inputs, is_method=True)
if isfunction(obj) or is_method_of_instance(obj, torch.nn.Module):
return get_nrt_forward(TorchModule(obj), signature, config, outputs,
*inputs)
return get_nrt_forward(obj, signature, config, outputs, *inputs)
def parse_config(tune, tuning_steps, config):
if config is None:
config = Config()
elif type(config) is dict:
config = Config(config)
if not type(config) is Config:
raise TypeError(
"Expected optional 'config' argument of type dict or "
f"nnfusion.Config but found {config}"
)
if tuning_steps is not None:
if not isinstance(tuning_steps, int):
raise TypeError(
"Expected optional 'tuning_steps' argument of type int "
f"but found {tuning_steps}"
)
if tune is False:
raise ValueError(
f"Conflict is detected: tune={tune} and "
f"tuning_steps={tuning_steps}"
)
tune = True
config['kernel_tuning_steps'] = tuning_steps
if tune is not None:
if not isinstance(tune, bool):
raise TypeError(
"Expected optional 'tune' argument of type bool "
f"but found {tune}"
)
config['antares_mode'] = tune
return config
def check_obj_type(obj):
if not (
isfunction(obj)
or isinstance(obj, torch.nn.Module)
or is_subclass_of_cls(obj, torch.nn.Module)
or is_method_of_instance(obj, torch.nn.Module)
):
raise TypeError(
"Expected function or torch.nn.Module instance/method/class "
f"but found {obj}"
)
def jit_class(obj, config):
"""
Return jitted class using dynamic inheritance to override the forward
function and keep its signature.
"""
class JITModule(obj):
@jit(config=config,
_signature='.'.join([get_signature(obj), 'forward']))
def forward(self, *args, **kwargs):
return super().forward(*args, **kwargs)
return JITModule
def jit(obj=None, *, tune=None, tuning_steps=None, config=None, _signature=None):
"""
Parameters:
obj (function, `torch.nn.Module` instance/method/class):
The target object to be traced. When `obj` is an instance or a
class, it is equivalent to trace its `forward` function.
tune (Optional[bool]):
Whether to tune kernel. By default it follows `config`.
If set, it overwrites `config`.
tuning_steps (Optional[int]):
Number of kernel tuning steps. By default it follows `config`.
If set, it overwrites `config` and `tune`.
config (Optional[dict, nnfusion.Config]):
NNFusion compilation config.
By default it will be set to `nnfusion.Config()`.
Pass a `dict` to overwrite default config or directly pass an
instance of `nnfusion.Config`.
For example, `@nnfusion.jit(tune=True,
config={'kernel_tuning_steps': 42})`
For more flags information, please execute the command `nnfusion`
in the terminal.
"""
config = parse_config(tune, tuning_steps, config)
def _jit(_obj):
check_obj_type(_obj)
if is_subclass_of_cls(_obj, torch.nn.Module):
return jit_class(_obj, config)
@functools.wraps(_obj)
def wrapper(*args): # TODO support kwargs?
if wrapper.forward is None:
wrapper.forward = nrt_forward(_obj, *args,
config=config,
signature=_signature)
return wrapper.forward(*args)
wrapper.forward = None
if isinstance(_obj, torch.nn.Module):
_obj._orig_forward = _obj.forward
_obj.forward = wrapper
return _obj
return wrapper
if obj is None:
return _jit
return _jit(obj)
| [
"[email protected]"
] | |
3d27cf4f50a9cc4bd469bd18977762b572f062a1 | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /company/new_problem/look_week_into_different_child/woman.py | adf36b9a017251053bd4003aaba072a84a85d48d | [] | no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | py |
#! /usr/bin/env python
def right_week_or_little_person(str_arg):
seem_work_for_next_man(str_arg)
print('eye')
def seem_work_for_next_man(str_arg):
print(str_arg)
if __name__ == '__main__':
right_week_or_little_person('place')
| [
"[email protected]"
] | |
419801dc9b41a351205b81a2536848b549bcdca3 | 67a48a7a2db56247fdd84474efa35124565fd8b9 | /Codeforces/1567/1567a.py | d8ac3e266bff074dc1c8d5d2ab0d617f691e4d6f | [] | no_license | qazz625/Competitive-Programming-Codes | e3de31f9276f84e919a6017b2cf781c946809862 | e5df9cdc4714d78b7b6a7535ed7a45e07d3781c3 | refs/heads/master | 2022-08-30T07:57:55.172867 | 2022-08-10T08:02:07 | 2022-08-10T08:02:07 | 242,182,922 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | t = int(input())
for _ in range(t):
n = int(input())
arr = []
s = input()
for x in s:
if x == 'L' or x == 'R':
arr += [x]
elif x == 'D':
arr += ['U']
else:
arr += ['D']
print(*arr, sep='')
| [
"[email protected]"
] | |
f95eb6c548d33fdfb4e4c5bca3aec825ebb08bec | 8957fd60446378ba77d5920c883935bac9275933 | /btools/building/customobj.py | f74c400d927c3004785f0cf10d68d480f9d7714c | [
"MIT"
] | permissive | ranjian0/building_tools | 37f647608873288db3346bc7d2d9e2c97fbefabe | 4a5950ed712b41fa3b953ea4ac3e1b1db8d5f489 | refs/heads/master | 2023-09-04T01:53:35.926031 | 2023-03-12T09:58:05 | 2023-03-12T09:58:05 | 123,632,239 | 831 | 94 | MIT | 2021-02-08T12:58:09 | 2018-03-02T21:22:22 | Python | UTF-8 | Python | false | false | 10,758 | py | """
Tools to allow users to place custom meshes on a building
"""
import bpy
import bmesh
from mathutils import Matrix, Vector
from bpy.props import PointerProperty
from .facemap import (
FaceMap,
add_faces_to_map,
add_facemap_for_groups
)
from ..utils import (
select,
local_xyz,
bm_to_obj,
crash_safe,
bm_from_obj,
popup_message,
calc_faces_median,
calc_verts_median,
get_bounding_verts,
calc_face_dimensions,
bmesh_from_active_object,
subdivide_face_vertically,
subdivide_face_horizontally,
get_selected_face_dimensions,
)
from ..utils import VEC_UP, VEC_FORWARD
from .array import ArrayProperty, ArrayGetSet
from .sizeoffset import SizeOffsetProperty, SizeOffsetGetSet
class CustomObjectProperty(bpy.types.PropertyGroup, SizeOffsetGetSet, ArrayGetSet):
array: PointerProperty(type=ArrayProperty)
size_offset: PointerProperty(type=SizeOffsetProperty)
def init(self, wall_dimensions):
self["wall_dimensions"] = wall_dimensions
self.size_offset.init(
(self["wall_dimensions"][0] / self.count, self["wall_dimensions"][1]),
default_size=(1.0, 1.0),
default_offset=(0.0, 0.0),
)
def draw(self, context, layout):
box = layout.box()
self.size_offset.draw(context, box)
layout.prop(self.array, "count")
@crash_safe
def add_custom_execute(self, context):
custom_obj = context.scene.btools_custom_object
if not custom_obj:
# Custom object has not been assigned
self.report({'INFO'}, "No Object Selected!")
return {"CANCELLED"}
if custom_obj.users == 0 or custom_obj.name not in context.view_layer.objects:
# Object was already deleted
self.report({'INFO'}, "Object has been deleted!")
return {"CANCELLED"}
self.props.init(get_selected_face_dimensions(context))
apply_transforms(context, custom_obj)
place_custom_object(context, self.props, custom_obj)
transfer_materials(custom_obj, context.object)
return {'FINISHED'}
class BTOOLS_OT_add_custom(bpy.types.Operator):
"""Place custom meshes on the selected faces"""
bl_idname = "btools.add_custom"
bl_label = "Add Custom Geometry"
bl_options = {"REGISTER", "UNDO", "PRESET"}
props: PointerProperty(type=CustomObjectProperty)
@classmethod
def poll(cls, context):
return context.object is not None and context.mode == "EDIT_MESH"
def execute(self, context):
add_facemap_for_groups([FaceMap.CUSTOM])
return add_custom_execute(self, context)
def draw(self, context):
self.props.draw(context, self.layout)
def apply_transforms(context, obj):
# -- store the current active object
mode_previous = context.mode
active_previous = context.active_object
# -- switch to object mode, if we are not already there
if context.mode != "OBJECT":
bpy.ops.object.mode_set(mode='OBJECT')
# -- make obj the active object and select it
bpy.context.view_layer.objects.active = obj
select(bpy.context.view_layer.objects, False)
obj.select_set(True)
# -- apply transform
bpy.ops.object.transform_apply(location=True, rotation=True, scale=True)
# -- resume the previous state
bpy.context.view_layer.objects.active = active_previous
select(bpy.context.view_layer.objects, False)
active_previous.select_set(True)
bpy.ops.object.mode_set(mode=mode_previous.replace('_MESH', ""))
def place_custom_object(context, prop, custom_obj):
with bmesh_from_active_object(context) as bm:
faces = [face for face in bm.faces if face.select]
for face in faces:
face.select = False
# No support for upward/downward facing
if face.normal.z:
popup_message("Faces with Z+/Z- normals not supported!", title="Invalid Face Selection")
continue
array_faces = subdivide_face_horizontally(bm, face, widths=[prop.size_offset.size.x] * prop.count)
for aface in array_faces:
# -- Create split and place obj
split_face = create_split(bm, aface, prop.size_offset.size, prop.size_offset.offset)
place_object_on_face(bm, split_face, custom_obj, prop)
bmesh.ops.remove_doubles(bm, verts=bm.verts, dist=0.0001)
def transfer_materials(from_object, to_obj):
"""Transfer materials from 'from_object' to 'to_object'"""
materials = from_object.data.materials
if not materials:
return
# -- copy materials
to_mats = to_obj.data.materials
if not to_mats:
# -- to_obj has no materials
list(map(to_mats.append, materials))
else:
# -- to_obj has some materials, ensure we are not duplicating
for mat in materials:
if mat.name not in to_mats:
to_mats.append(mat)
def mat_name_from_idx(idx):
for i, m in enumerate(materials):
if i == idx:
return m.name.encode()
return "".encode()
# -- store material names on the face layer
bm = bm_from_obj(from_object)
bm.faces.layers.string.verify()
mat_name = bm.faces.layers.string.active
for face in bm.faces:
face[mat_name] = mat_name_from_idx(face.material_index)
bm_to_obj(bm, from_object)
def duplicate_into_bm(bm, obj):
"""Copy all the mesh data in obj to the bm
Return the newly inserted faces
"""
max_index = len(bm.faces)
bm.from_mesh(obj.data.copy())
return [f for f in bm.faces if f.index >= max_index]
# TODO(ranjian0) refactor function (duplicated from create_window_split)
def create_split(bm, face, size, offset):
"""Use properties from SplitOffset to subdivide face into regular quads"""
wall_w, wall_h = calc_face_dimensions(face)
# horizontal split
h_widths = [wall_w / 2 + offset.x - size.x / 2, size.x, wall_w / 2 - offset.x - size.x / 2]
h_faces = subdivide_face_horizontally(bm, face, h_widths)
# vertical split
v_width = [wall_h / 2 + offset.y - size.y / 2, size.y, wall_h / 2 - offset.y - size.y / 2]
v_faces = subdivide_face_vertically(bm, h_faces[1], v_width)
return v_faces[1]
def place_object_on_face(bm, face, custom_obj, prop):
"""Place the custom_object mesh flush on the face"""
# XXX get mesh from custom_obj into bm
face_idx = face.index
custom_faces = duplicate_into_bm(bm, custom_obj)
face = [f for f in bm.faces if f.index == face_idx].pop() # restore reference
add_faces_to_map(bm, custom_faces, FaceMap.CUSTOM)
custom_verts = list({v for f in custom_faces for v in f.verts})
# (preprocess)calculate bounds of the object
# NOTE: bounds are calculated before any transform is made
dims = custom_obj.dimensions
current_size = [max(dims.x, dims.y), dims.z]
# -- move the custom faces into proper position on this face
transform_parallel_to_face(bm, custom_faces, face)
scale_to_size(bm, custom_verts, current_size, prop.size_offset.size, local_xyz(face))
# cleanup
bmesh.ops.delete(bm, geom=[face], context="FACES_ONLY")
def get_coplanar_faces(face_verts):
""" Determine extent faces that should be coplanar to walls"""
bounds = get_bounding_verts(face_verts)
coplanar_faces = (
list(bounds.topleft.link_faces) +
list(bounds.topright.link_faces) +
list(bounds.botleft.link_faces) +
list(bounds.botright.link_faces)
)
return set(coplanar_faces)
def calc_coplanar_median(face_verts):
""" Determine the median point for coplanar faces"""
return calc_faces_median(get_coplanar_faces(face_verts))
def calc_coplanar_normal(faces):
face_verts = list({v for f in faces for v in f.verts})
coplanar_faces = get_coplanar_faces(face_verts)
normals = {f.normal.copy().to_tuple(3) for f in coplanar_faces}
return Vector(normals.pop())
def transform_parallel_to_face(bm, custom_faces, target_face):
"""Move and rotate verts(mesh) so that it lies with it's
forward-extreme faces parallel to `face`
"""
target_normal = target_face.normal.copy()
target_median = target_face.calc_center_median()
verts = list({v for f in custom_faces for v in f.verts})
verts_median = calc_verts_median(verts)
custom_normal = calc_coplanar_normal(custom_faces)
try:
angle = target_normal.xy.angle_signed(custom_normal.xy)
except ValueError:
# TODO(ranjian0) Support all mesh shapes when placing along face
angle = 0
bmesh.ops.rotate(
bm, verts=verts,
cent=verts_median,
matrix=Matrix.Rotation(angle, 4, VEC_UP)
)
# -- determine the median of the faces that should be coplanar to the walls
coplanar_median = calc_coplanar_median(verts)
coplanar_median.z = verts_median.z # Compensate on Z axis for any coplanar faces not considered in calculations
# -- move the custom faces to the target face based on coplanar median
transform_diff = target_median - coplanar_median
bmesh.ops.translate(bm, verts=verts, vec=transform_diff)
def scale_to_size(bm, verts, current_size, target_size, local_dir):
"""Scale verts to target size along local direction (x and y)"""
x_dir, y_dir, z_dir = local_dir
target_width, target_height = target_size
current_width, current_height = current_size
# --scale
scale_x = x_dir * (target_width / current_width)
scale_y = y_dir * (target_height / current_height)
scale_z = Vector(map(abs, z_dir))
bmesh.ops.scale(
bm, verts=verts, vec=scale_x + scale_y + scale_z,
space=Matrix.Translation(-calc_verts_median(verts))
)
def set_face_materials(bm, faces):
mat_name = bm.faces.layers.string.active
if not mat_name:
return
obj_mats = bpy.context.object.data.materials
for f in faces:
mat = obj_mats.get(f[mat_name].decode())
f.material_index = list(obj_mats).index(mat)
classes = (CustomObjectProperty, BTOOLS_OT_add_custom)
def register_custom():
bpy.types.Scene.btools_custom_object = PointerProperty(
type=bpy.types.Object, description="Object to use for custom placement"
)
for cls in classes:
bpy.utils.register_class(cls)
def unregister_custom():
del bpy.types.Scene.btools_custom_object
for cls in classes:
bpy.utils.unregister_class(cls)
| [
"[email protected]"
] | |
d9defe5ad47eb503e1e8834bad3974c9f76ea1ae | 33fc4f5b3b92fc5d84be6c4872094264be5c2192 | /108numpy-copy-deepcopy.py | c41df93204747de028547d6883e0e74eb2590112 | [] | no_license | greenmac/python-morvan-numpy-pandas | 2ee9f572b910f65b44fe76316774fa9f604e9eb2 | 77fe010b15074e7ecabaefc07bc80bf667575d89 | refs/heads/master | 2020-04-12T14:54:47.317643 | 2018-12-22T07:18:19 | 2018-12-22T07:18:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | # https://morvanzhou.github.io/tutorials/data-manipulation/np-pd/2-8-np-copy/
import numpy as np
# a = np.arange(4)
# b = a
# c = a
# d = b
# a[0] = 11
# print(a)
# print(b)
# print(c)
# print(d)
# print(b is a)
# print(d is a)
# a = np.arange(4)
# b = a
# c = a
# d = b
# a[0] = 11
# d[1:3] = [22, 33]
# print(a)
# print(b)
# print(c)
# print(d)
a = np.arange(4)
b = a
c = a
d = b
a[0] = 11
d[1:3] = [22, 33]
b = a.copy() # deep copy, 深度copy, 這樣就不會被關聯
a[3] = 44
print(a)
print(b) # b因為deep copy的關係, 所以b[3]不會被改變, 這樣就不會被關聯
print(c)
print(d) | [
"[email protected]"
] | |
1344db5d293e0d52eb43ae1b44c466eb59437167 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02380/s484509438.py | 102ce45748de53d8af54b0469dd1cd39937af871 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | import math
a,b,C=map(int,input().split())
radC=math.radians(C)
S=a*b*math.sin(radC)*(1/2)
c=a**2+b**2-2*a*b*math.cos(radC)
L=a+b+math.sqrt(c)
h=2*S/a
list=[S,L,h]
for i in list:
print('{:.08f}'.format(i))
| [
"[email protected]"
] | |
fd2f9e40af42009d2df03ad31acbf7115cfbdb22 | ec0e202ba914a1d9318c449130eee74223af6c98 | /rememerme/users/client.py | c79c6d6be62bb75e649fba4b1b42f040d57849c3 | [
"Apache-2.0"
] | permissive | rememerme/users-model | 0f07c76bdbabf803fc6b8f6fe4aabcde42fe0e34 | 6b62af077ae93f073e9bb831a82ca8f011697277 | refs/heads/master | 2020-05-17T00:27:01.990149 | 2014-01-18T05:54:46 | 2014-01-18T05:54:46 | 15,694,812 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,572 | py | import requests
from rememerme.users.models import User
class UserClientError(Exception):
pass
def strip_trailing_slash(url):
if url[-1] == '/':
return url[:-1]
return url
class UserClient:
DEFAULT_URL = 'http://134.53.148.103'
def __init__(self, session_id, url=DEFAULT_URL):
self.url = strip_trailing_slash(url)
self.session_id = session_id
def create(self, username, password):
return NotImplementedError()
payload = { 'username':username, 'password':password }
r = requests.post(self.url + '/rest/v1/sessions',data=payload)
if r.status_code is not 200:
raise UserClientError(r.text)
return User.fromMap(r.json())
def update(self, user_id, username=None, password=None, email=None):
payload = {}
if username: payload['username'] = username
if password: payload['password'] = password
if email: payload['email'] = email
headers = { 'HTTP_AUTHORIZATION' : self.session_id }
r = requests.put(self.url + '/rest/v1/sessions/%s' % str(user_id), data=payload, headers=headers)
if r.status_code is not 200:
raise UserClientError(r.text)
return User.fromMap(r.json())
def get(self, user_id):
headers = { 'HTTP_AUTHORIZATION' : self.session_id }
r = requests.delete(self.url + '/rest/v1/sessions/%s' % str(user_id), headers=headers)
if r.status_code is not 200:
raise UserClientError(r.text)
return User.fromMap(r.json())
| [
"[email protected]"
] | |
5d1e42d4fcbfa344f5d00f5f5bbb49288f53b5ac | 559995c23c13f67ee6f342389d0db81081207d87 | /prjforinfcreditvilfw/vig/estisimurand/sall_aws_sandbox/template_onefile/esr_s1357_submit_job.py | 9640ff2bd0dc0bdddbcce8ae8bbcf6bb9621c9c1 | [] | no_license | MacroFinanceHub/PrjForInfCreditVilFW | 06a6c475d0c846c1578205e062acb0190bcce1c2 | d2a863656962691f8dc13d205a82c81823040c8b | refs/heads/main | 2023-07-19T05:31:15.992847 | 2021-08-30T14:44:14 | 2021-08-30T14:44:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,237 | py | """
Assume that:
1. Container on ECR has been updated to contain latest pyfan and thaijmp code
2. A task with the task name below has been submitted.
Note that for different invokations, can adjust the default command and compute
size of registered tasks.
Submit two separate tasks, representing two different regions.
"""
import logging
import pyfan.amto.json.json as support_json
import time
import boto3aws.tools.manage_aws as boto3aws
import parameters.runspecs.compute_specs as computespec
import parameters.runspecs.estimate_specs as estispec
import projectsupport.systemsupport as proj_sys_sup
logger = logging.getLogger(__name__)
FORMAT = '%(filename)s - %(funcName)s - %(lineno)d - %(asctime)s - %(levelname)s %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
"""
OPTINAL PARAMETER SPECIFICATIONS
"""
esr_run = 7
it_call_options = 2
if it_call_options == 1:
it_esti_top_which_max = 5
# A1. Main folder name
save_directory_main = 'esti_tst_onefile_xN5'
# A2. subfolder name
esrbstfilesuffix = "_esr_tstN5_aws"
# C1. ITG or x, normal, or detailed
esrbxrditg = "x"
# C2. compute spec key
esrscomputespeckey = "ng_s_t"
# C3. test scale (esti spec key)
esrssttestscale = "_tinytst_"
elif it_call_options == 2:
it_esti_top_which_max = 5
save_directory_main = 'esti_tst_onefile_ITGN5'
esrbstfilesuffix = "_esr_tstN5_aws"
esrbxrditg = "_ITG"
esrscomputespeckey = "b_ng_p_d"
esrssttestscale = "_tinytst_"
# Both regions
ar_regions = ['ce', 'ne']
# ar_regions = ['ne']
# Region-specific combo_type information
# st_cta, st_ctb = 'e', '20201025x_esr_medtst'
st_cta, st_ctb = 'e', '20201025' + esrbxrditg + esrbstfilesuffix
dc_combo_type = {'ce': {'cta': st_cta, 'ctb': st_ctb,
'ctc': 'list_tKap_mlt_ce1a2'},
'ne': {'cta': st_cta, 'ctb': st_ctb,
'ctc': 'list_tKap_mlt_ne1a2'}}
# Region specific speckey
dc_moment_key = {'ce': '3', 'ne': '4'}
momset_key = '3'
dc_compute_spec_key = {1: esrscomputespeckey, 3: 'mpoly_1',
5: esrscomputespeckey, 7: esrscomputespeckey}
dc_esti_spec_key = {1: 'esti' + esrssttestscale + 'thin_1', 3: 'esti' + esrssttestscale + 'mpoly_13',
5: 'esti_mplypostsimu_1', 7: 'esti_mplypostesti_12'}
"""
OPTINAL PARAMETER SPECIFICATIONS
"""
# Start Batch
aws_batch = boto3aws.start_boto3_client('batch')
# This is a already registered task: see esr_s0_register_task.py
jobDefinitionName = 'a-1-thaijmp-runesr-x'
# task info
job_queue = 'Spot'
# common code esr_run specific
# 1. Sepckey
compute_spec_key, esti_spec_key = dc_compute_spec_key[esr_run], dc_esti_spec_key[esr_run]
dc_speckey = {'ce': '='.join([compute_spec_key, esti_spec_key, dc_moment_key['ce'], momset_key]),
'ne': '='.join([compute_spec_key, esti_spec_key, dc_moment_key['ne'], momset_key])}
# 1b. speckey ERS3
compute_spec_key_mpoly, esti_spec_key_mpoly = dc_compute_spec_key[3], dc_esti_spec_key[3]
dc_speckey_mpoly = {'ce': '='.join([compute_spec_key_mpoly, esti_spec_key_mpoly, dc_moment_key['ce'], momset_key]),
'ne': '='.join([compute_spec_key_mpoly, esti_spec_key_mpoly, dc_moment_key['ne'], momset_key])}
# 2. Container options
array_size = estispec.estimate_set(esti_spec_key)['esti_param_vec_count']
it_memory = computespec.compute_set(compute_spec_key)['memory']
it_vcpus = computespec.compute_set(compute_spec_key)['vcpus']
# run by region
dc_responses = {}
for st_regions in ar_regions:
if esr_run == 1 or esr_run == 3:
response = aws_batch.submit_job(
jobName=jobDefinitionName + '-' + st_regions + '-' + proj_sys_sup.save_suffix_time(2),
jobQueue=job_queue,
arrayProperties={'size': array_size},
jobDefinition=jobDefinitionName,
containerOverrides={"vcpus": int(it_vcpus),
"memory": int(it_memory),
"command": ["python",
"/ThaiJMP/invoke/run_esr.py",
str(esr_run),
"-s", dc_speckey[st_regions],
"-cta", dc_combo_type[st_regions]["cta"],
"-ctb", dc_combo_type[st_regions]["ctb"],
"-ctc", dc_combo_type[st_regions]["ctc"],
"-f", save_directory_main]})
elif esr_run == 5 or esr_run == 7:
response = aws_batch.submit_job(
jobName=jobDefinitionName + '-' + st_regions + '-' + proj_sys_sup.save_suffix_time(2),
jobQueue=job_queue,
arrayProperties={'size': it_esti_top_which_max},
jobDefinition=jobDefinitionName,
containerOverrides={"vcpus": int(it_vcpus),
"memory": int(it_memory),
"command": ["python",
"/ThaiJMP/invoke/run_esr.py",
str(esr_run),
"-s", dc_speckey[st_regions],
"-cta", dc_combo_type[st_regions]["cta"],
"-ctb", dc_combo_type[st_regions]["ctb"],
"-ctc", dc_combo_type[st_regions]["ctc"],
"-cte1", dc_speckey_mpoly[st_regions],
"-cte2", str(it_esti_top_which_max),
"-f", save_directory_main]})
else:
raise ValueError(f'The specified esr_run, {esr_run=} is not allowed.')
support_json.jdump(response, 'submit_job--response', logger=logger.info)
dc_responses[st_regions] = response
# Display status
fl_start = time.time()
dc_bl_job_in_progress = {'ce': True, 'ne': True}
dc_it_wait_seconds = {'ce': 0, 'ne': 0}
while (dc_bl_job_in_progress['ce'] or dc_bl_job_in_progress['ne']):
for st_regions in ar_regions:
dc_json_batch_response = dc_responses[st_regions]
# Get Job ID
st_batch_jobID = dc_json_batch_response['jobId']
# Print Job ID
# print(f'{st_batch_jobID=}')
# While loop to check status
# describe job
dc_json_batch_describe_job_response = aws_batch.describe_jobs(jobs=[st_batch_jobID])
# pprint.pprint(dc_json_batch_describe_job_response, width=1)
it_array_size = dc_json_batch_describe_job_response['jobs'][0]['arrayProperties']['size']
if it_array_size >= 1000:
it_wait_time = 300
elif it_array_size >= 100:
it_wait_time = 120
elif it_array_size >= 10:
it_wait_time = 60
else:
it_wait_time = 20
dc_status_summary = dc_json_batch_describe_job_response['jobs'][0]['arrayProperties']['statusSummary']
if dc_status_summary:
# check status
it_completed = dc_status_summary['SUCCEEDED'] + dc_status_summary['FAILED']
if it_completed < it_array_size:
dc_bl_job_in_progress[st_regions] = True
# sleep three seconds
time.sleep(it_wait_time)
dc_it_wait_seconds[st_regions] = round(time.time() - fl_start)
else:
dc_bl_job_in_progress[st_regions] = False
print(f'{st_regions.upper()} ({dc_it_wait_seconds[st_regions]} sec): '
f'ArrayN={it_array_size},'
f'SUCCEEDED={dc_status_summary["SUCCEEDED"]}, FAILED={dc_status_summary["FAILED"]}, '
f'RUNNING={dc_status_summary["RUNNING"]}, PENDING={dc_status_summary["PENDING"]}, '
f'RUNNABLE={dc_status_summary["RUNNABLE"]}')
else:
dc_bl_job_in_progress[st_regions] = True
# empty statussummary
time.sleep(it_wait_time)
dc_it_wait_seconds[st_regions] = round(time.time() - fl_start)
print(f'{st_regions.upper()} ({dc_it_wait_seconds[st_regions]} sec): ArrayN={it_array_size}')
| [
"[email protected]"
] | |
93b7f21504d58d63e17f2a7e1435cb78ca6999d6 | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part009324.py | cbdf2c82d8e24b917f93048ece6a2aa7d84ec418 | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,298 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher108113(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i3.1.2.0', 1, 1, None), Mul),
(VariableWithCount('i3.1.2.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher108113._instance is None:
CommutativeMatcher108113._instance = CommutativeMatcher108113()
return CommutativeMatcher108113._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 108112
return
yield
from collections import deque | [
"[email protected]"
] | |
ff3f576564a64698fd39d488aee3b2df3873b01e | 9d8e2dd4441c50b443390f76c899ad1f46c42c0e | /mit_intro_algos/max_heap.py | 13d0a325af33fa82b8c19924971ba9c0b20d5f14 | [] | no_license | vikramjit-sidhu/algorithms | 186ec32de471386ce0fd6b469403199a5e3bbc6d | cace332fc8e952db76c19e200cc91ec8485ef14f | refs/heads/master | 2021-01-01T16:20:52.071495 | 2015-08-03T17:42:29 | 2015-08-03T17:42:29 | 29,119,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,979 | py | """
Creates a max heap, can also use heap sort algorithm on a pre created array
Uses an array to implement array
My implementation of python heapq module
"""
class MaxHeap:
def __init__(self, ar=[None]):
self.A = ar
if len(self.A) > 1:
self.__create_maxheap()
def __max_heapify(self, index):
left, right = 2*index, 2*index+1
if left < len(self.A) and self.A[index] < self.A[left]:
maximum = left
else:
maximum = index
if right < len(self.A) and self.A[maximum] < self.A[right]:
maximum = right
if maximum != index:
self.A[index], self.A[maximum] = self.A[maximum], self.A[index]
self.__max_heapify(maximum)
return True
return False
def __create_maxheap(self):
if self.A[0]:
self.A.append(self.A[0])
self.A[0] = None
start_index = int((len(self.A)-1)/2)
for i in range(start_index, 0, -1):
self.__max_heapify(i)
def find_max(self):
return self.A[1]
def extract_max(self):
last_index = len(self.A) - 1
self.A[1], self.A[last_index] = self.A[last_index], self.A[1]
max_key = self.A.pop()
max_heapify(1)
return max_key
def insert_key(self, key):
self.A.append(key)
check_index = len(self.A) - 1
parent_index = int(check_index/2)
self.__parent_updatify(parent_index, check_index)
def __parent_updatify(self, parent_index, check_index):
while parent_index >=1 and self.A[parent_index] < self.A[check_index]:
self.A[parent_index], self.A[check_index] = self.A[check_index], self.A[parent_index]
check_index, parent_index = parent_index, int(parent_index/2)
def update_key(self, key, new_key):
key_index = self.find_key(key)
self.A[key_index] = new_key
if not self.__max_heapify(key_index):
self.__parent_updatify(int(key_index/2), key_index)
def find_key(self, key):
"""
Returns index of key in array (self.A). Uses BFS.
"""
from queue import Queue
qu = Queue()
qu.put(1)
key_index = None
while not qu.empty():
element = qu.get_nowait()
if self.A[element] == key:
key_index = element
break
left, right = element*2, element*2+1
if left < len(self.A) and self.A[left] >= key:
qu.put_nowait(left)
if right < len(self.A) and self.A[right] >= key:
qu.put_nowait(right)
else:
print("Key {0} not found".format(key))
del(qu)
return key_index
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
f583736aeb98af156de12d7ff928aca9a305b7c8 | 711756b796d68035dc6a39060515200d1d37a274 | /output_exocyst_tags/initial_7607.py | f3e10cc911458956f628b86bc422c72bf2469275 | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,587 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Sec3_GFPN" not in marker_sets:
s=new_marker_set('Sec3_GFPN')
marker_sets["Sec3_GFPN"]=s
s= marker_sets["Sec3_GFPN"]
mark=s.place_marker((19, 105, 690), (0.15, 0.78, 0.66), 21.9005)
if "Sec3_GFPC" not in marker_sets:
s=new_marker_set('Sec3_GFPC')
marker_sets["Sec3_GFPC"]=s
s= marker_sets["Sec3_GFPC"]
mark=s.place_marker((215, 753, 192), (0.15, 0.78, 0.66), 31.586)
if "Sec3_Anch" not in marker_sets:
s=new_marker_set('Sec3_Anch')
marker_sets["Sec3_Anch"]=s
s= marker_sets["Sec3_Anch"]
mark=s.place_marker((122, 745, 777), (0.15, 0.58, 0.66), 26.9335)
if "Sec5_GFPN" not in marker_sets:
s=new_marker_set('Sec5_GFPN')
marker_sets["Sec5_GFPN"]=s
s= marker_sets["Sec5_GFPN"]
mark=s.place_marker((285, 668, 783), (0.38, 0.24, 0.37), 21.9005)
if "Sec5_GFPC" not in marker_sets:
s=new_marker_set('Sec5_GFPC')
marker_sets["Sec5_GFPC"]=s
s= marker_sets["Sec5_GFPC"]
mark=s.place_marker((266, 354, 710), (0.38, 0.24, 0.37), 31.586)
if "Sec6_GFPN" not in marker_sets:
s=new_marker_set('Sec6_GFPN')
marker_sets["Sec6_GFPN"]=s
s= marker_sets["Sec6_GFPN"]
mark=s.place_marker((732, 670, 594), (0.84, 0.98, 0.24), 21.9005)
if "Sec6_GFPC" not in marker_sets:
s=new_marker_set('Sec6_GFPC')
marker_sets["Sec6_GFPC"]=s
s= marker_sets["Sec6_GFPC"]
mark=s.place_marker((696, 107, 386), (0.84, 0.98, 0.24), 31.586)
if "Sec6_Anch" not in marker_sets:
s=new_marker_set('Sec6_Anch')
marker_sets["Sec6_Anch"]=s
s= marker_sets["Sec6_Anch"]
mark=s.place_marker((558, 299, 781), (0.84, 0.78, 0.24), 26.9335)
if "Sec8_GFPC" not in marker_sets:
s=new_marker_set('Sec8_GFPC')
marker_sets["Sec8_GFPC"]=s
s= marker_sets["Sec8_GFPC"]
mark=s.place_marker((428, 270, 711), (0.62, 0.67, 0.45), 31.586)
if "Sec8_Anch" not in marker_sets:
s=new_marker_set('Sec8_Anch')
marker_sets["Sec8_Anch"]=s
s= marker_sets["Sec8_Anch"]
mark=s.place_marker((877, 991, 805), (0.62, 0.47, 0.45), 26.9335)
if "Sec10_GFPN" not in marker_sets:
s=new_marker_set('Sec10_GFPN')
marker_sets["Sec10_GFPN"]=s
s= marker_sets["Sec10_GFPN"]
mark=s.place_marker((899, 576, 943), (0, 0.91, 0), 21.9005)
if "Sec10_GFPC" not in marker_sets:
s=new_marker_set('Sec10_GFPC')
marker_sets["Sec10_GFPC"]=s
s= marker_sets["Sec10_GFPC"]
mark=s.place_marker((671, 362, 423), (0, 0.91, 0), 31.586)
if "Sec10_Anch" not in marker_sets:
s=new_marker_set('Sec10_Anch')
marker_sets["Sec10_Anch"]=s
s= marker_sets["Sec10_Anch"]
mark=s.place_marker((699, 105, 883), (0, 0.71, 0), 26.9335)
if "Sec15_GFPN" not in marker_sets:
s=new_marker_set('Sec15_GFPN')
marker_sets["Sec15_GFPN"]=s
s= marker_sets["Sec15_GFPN"]
mark=s.place_marker((340, 501, 893), (0.11, 0.51, 0.86), 21.9005)
if "Sec15_GFPC" not in marker_sets:
s=new_marker_set('Sec15_GFPC')
marker_sets["Sec15_GFPC"]=s
s= marker_sets["Sec15_GFPC"]
mark=s.place_marker((964, 729, 337), (0.11, 0.51, 0.86), 31.586)
if "Sec15_Anch" not in marker_sets:
s=new_marker_set('Sec15_Anch')
marker_sets["Sec15_Anch"]=s
s= marker_sets["Sec15_Anch"]
mark=s.place_marker((486, 503, 223), (0.11, 0.31, 0.86), 26.9335)
if "Exo70_GFPN" not in marker_sets:
s=new_marker_set('Exo70_GFPN')
marker_sets["Exo70_GFPN"]=s
s= marker_sets["Exo70_GFPN"]
mark=s.place_marker((472, 868, 488), (0.89, 0.47, 0.4), 21.9005)
if "Exo70_GFPC" not in marker_sets:
s=new_marker_set('Exo70_GFPC')
marker_sets["Exo70_GFPC"]=s
s= marker_sets["Exo70_GFPC"]
mark=s.place_marker((333, 100, 187), (0.89, 0.47, 0.4), 31.586)
if "Exo70_Anch" not in marker_sets:
s=new_marker_set('Exo70_Anch')
marker_sets["Exo70_Anch"]=s
s= marker_sets["Exo70_Anch"]
mark=s.place_marker((147, 620, 939), (0.89, 0.27, 0.4), 26.9335)
if "Exo84_GFPN" not in marker_sets:
s=new_marker_set('Exo84_GFPN')
marker_sets["Exo84_GFPN"]=s
s= marker_sets["Exo84_GFPN"]
mark=s.place_marker((573, 301, 997), (0.5, 0.7, 0), 31.586)
if "Exo84_GFPC" not in marker_sets:
s=new_marker_set('Exo84_GFPC')
marker_sets["Exo84_GFPC"]=s
s= marker_sets["Exo84_GFPC"]
mark=s.place_marker((585, 771, 647), (0.5, 0.7, 0), 31.586)
if "Exo84_Anch" not in marker_sets:
s=new_marker_set('Exo84_Anch')
marker_sets["Exo84_Anch"]=s
s= marker_sets["Exo84_Anch"]
mark=s.place_marker((183, 347, 23), (0.5, 0.5, 0), 26.9335)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"[email protected]"
] | |
9fe14f76ed7f167080c56d6ae5377451ea028db9 | 607241e619ca499121106b218a5e00ac5244bda3 | /analysis/zeldovich_enzo_mass.py | 808a1269774d71bef4bd037a05e3c33e5614d2a5 | [] | no_license | bvillasen/cosmo_sims | 37caea950c7be0626a5170333bfe734071c58124 | 8b20dc05842a22ea50ceb3d646037d2e66fc8c9b | refs/heads/master | 2020-04-22T23:22:28.670894 | 2020-01-02T23:32:39 | 2020-01-02T23:32:39 | 114,167,239 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,713 | py | import sys
import numpy as np
import matplotlib.pyplot as plt
import h5py as h5
import yt
dev_dir = '/home/bruno/Desktop/Dropbox/Developer/'
cosmo_dir = dev_dir + 'cosmo_sims/'
toolsDirectory = cosmo_dir + "tools/"
sys.path.extend([toolsDirectory ] )
from load_data_cholla import load_snapshot_data
from internal_energy import get_internal_energy, get_temp, get_Temperaure_From_Flags_DE
# from load_data_enzo import load_snapshot_enzo
from cosmo_constants import *
from tools import create_directory
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
nSnap = rank
# rank = 0
dataDir = '/raid/bruno/data/'
# dataDir = '/home/bruno/Desktop/data/'
data_set = 'enzo_simple_beta_convDE'
startSnap = 27
enzoDir = dataDir + 'cosmo_sims/enzo/ZeldovichPancake_HLLC/'
outDir = dev_dir + 'figures/zeldovich_mass/'
if rank == 0:
create_directory( outDir )
a_list = []
gamma = 5./3
j_indx = 0
i_indx = 0
L = 64.
n = 256
dx = L / ( n )
x = np.arange(0, 256, 1)* dx + 0.5*dx
dv = (dx*1e3)**3
chollaDir_0 = dataDir + 'cosmo_sims/cholla_pm/zeldovich/data_PLMC_HLLC_VL_eta0.001_0.030_z1/'
chollaDir_1 = dataDir + 'cosmo_sims/cholla_pm/zeldovich/data_PLMP_HLLC_VL_eta0.001_0.030_z1/'
chollaDir_2 = dataDir + 'cosmo_sims/cholla_pm/zeldovich/data_PPMC_HLLC_VL_eta0.001_0.030_z1_ic0/'
chollaDir_3 = dataDir + 'cosmo_sims/cholla_pm/zeldovich/data_PPMP_HLLC_VL_eta0.001_0.030_z1_ic64/'
chollaDir_4 = dataDir + 'cosmo_sims/cholla_pm/zeldovich/data_PPMP_HLLC_VL_eta0.001_0.030_z1_ic32/'
chollaDir_5 = dataDir + 'cosmo_sims/cholla_pm/zeldovich/data_PPMP_HLLC_VL_eta0.001_0.030_z1_ic4/'
chollaDir_6 = dataDir + 'cosmo_sims/cholla_pm/zeldovich/data_PPMP_HLLC_VL_eta0.001_0.030_z1_ic0/'
# chollaDir_3 = dataDir + 'cosmo_sims/cholla_pm/zeldovich/data_PPMC_HLLC_VL_eta0.001_0.030_z1_signStone/'
dir_list = [ chollaDir_0, chollaDir_1, chollaDir_2, chollaDir_3, chollaDir_4, chollaDir_5, chollaDir_6 ]
labels = ['PLMC', 'PLMP', 'PPMC_ic0', 'PPMP_ic64', 'PPMP_ic32', 'PPMP_ic4', 'PPMP_ic0', ]
out_file_name = 'zeldovich_mass.png'
#Plot UVB uvb_rates
nrows=1
ncols = 1
fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=(10*ncols,8*nrows))
lw = 3
for i,chollaDir in enumerate(dir_list):
print chollaDir
mass = []
z = []
for nSnap in range(50):
data_cholla = load_snapshot_data( nSnap, chollaDir )
current_z = data_cholla['current_z']
dens_ch = data_cholla['gas']['density'][...]
mass_tot = dens_ch.sum() / dv
z.append(current_z)
mass.append( mass_tot )
# print mass
ax.plot( z, mass, label=labels[i] )
ax.legend()
ax.set_xlabel('Redshift')
ax.set_ylabel(r'Mass [$\mathrm{M}_{\odot}/h$ ]')
fig.savefig( outDir+out_file_name, bbox_inches='tight', dpi=100) | [
"[email protected]"
] | |
1f4b6688675f5b730dc3dd73a877fc56530df03b | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/doc3d/W_w_Mgt_to_C_focus_div/ch032/Tcrop_s255_p20_j15/pyr_6s/L3/step09_6side_L3.py | bb36cd8c779761d81788e6554c71dce596961f80 | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 50,455 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
from tkinter import S
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
from step08_b_use_G_generate_W_w_M_to_Cx_Cy_combine import W_w_M_to_Cx_Cy
from step08_b_use_G_generate_0_util import Tight_crop
from step09_c_train_step import Train_step_W_w_M_to_Cx_Cy
from step09_d_KModel_builder_combine_step789 import KModel_builder, MODEL_NAME
use_what_gen_op = W_w_M_to_Cx_Cy( separate_out=True, focus=True, tight_crop=Tight_crop(pad_size=20, resize=(255, 255), jit_scale= 0) )
use_what_train_step = Train_step_W_w_M_to_Cx_Cy( separate_out=True, focus=True, tight_crop=Tight_crop(pad_size=20, resize=(255, 255), jit_scale= 15) )
use_hid_ch = 32
import time
start_time = time.time()
###############################################################################################################################################################################################
##################################
### 6side1
##################################
##### 5side1
# side1, "1" 3 6 10 15 21 28 36 45 55, 1
pyramid_1side_1__2side_1__3side_1_4side_1_5s1_6s1 = [6, 0, 0, 0, 0, 0, 6]
# side2, 1 "3" 6 10 15 21 28 36 45 55, 4
pyramid_1side_2__2side_1__3side_1_4side_1_5s1_6s1 = [6, 1, 0, 0, 0, 1, 6]
pyramid_1side_2__2side_2__3side_1_4side_1_5s1_6s1 = [6, 2, 0, 0, 0, 2, 6]
pyramid_1side_2__2side_2__3side_2_4side_1_5s1_6s1 = [6, 3, 0, 0, 0, 3, 6]
pyramid_1side_2__2side_2__3side_2_4side_2_5s1_6s1 = [6, 4, 0, 0, 0, 4, 6]
# side3, 1 3 "6" 10 15 21 28 36 45 55, 10
pyramid_1side_3__2side_1__3side_1_4side_1_5s1_6s1 = [6, 1, 1, 0, 1, 1, 6]
pyramid_1side_3__2side_2__3side_1_4side_1_5s1_6s1 = [6, 2, 1, 0, 1, 2, 6]
pyramid_1side_3__2side_2__3side_2_4side_1_5s1_6s1 = [6, 3, 1, 0, 1, 3, 6]
pyramid_1side_3__2side_3__3side_1_4side_1_5s1_6s1 = [6, 2, 2, 0, 2, 2, 6]
pyramid_1side_3__2side_3__3side_2_4side_1_5s1_6s1 = [6, 3, 2, 0, 2, 3, 6]
pyramid_1side_3__2side_3__3side_3_4side_1_5s1_6s1 = [6, 3, 3, 0, 3, 3, 6]
pyramid_1side_3__2side_2__3side_2_4side_2_5s1_6s1 = [6, 4, 1, 0, 1, 4, 6]
pyramid_1side_3__2side_3__3side_2_4side_2_5s1_6s1 = [6, 4, 2, 0, 2, 4, 6]
pyramid_1side_3__2side_3__3side_3_4side_2_5s1_6s1 = [6, 4, 3, 0, 3, 4, 6]
pyramid_1side_3__2side_3__3side_3_4side_3_5s1_6s1 = [6, 4, 4, 0, 4, 4, 6]
# side4, 1 3 6 "10" 15 21 28 36 45 55, 20
pyramid_1side_4__2side_1__3side_1_4side_1_5s1_6s1 = [6, 1, 1, 1, 1, 1, 6]
pyramid_1side_4__2side_2__3side_1_4side_1_5s1_6s1 = [6, 2, 1, 1, 1, 2, 6]
pyramid_1side_4__2side_2__3side_2_4side_1_5s1_6s1 = [6, 3, 1, 1, 1, 3, 6]
pyramid_1side_4__2side_3__3side_1_4side_1_5s1_6s1 = [6, 2, 2, 1, 2, 2, 6]
pyramid_1side_4__2side_3__3side_2_4side_1_5s1_6s1 = [6, 3, 2, 1, 2, 3, 6]
pyramid_1side_4__2side_3__3side_3_4side_1_5s1_6s1 = [6, 3, 3, 1, 3, 3, 6]
pyramid_1side_4__2side_4__3side_1_4side_1_5s1_6s1 = [6, 2, 2, 2, 2, 2, 6]
pyramid_1side_4__2side_4__3side_2_4side_1_5s1_6s1 = [6, 3, 2, 2, 2, 3, 6]
pyramid_1side_4__2side_4__3side_3_4side_1_5s1_6s1 = [6, 3, 3, 2, 3, 3, 6]
pyramid_1side_4__2side_4__3side_4_4side_1_5s1_6s1 = [6, 3, 3, 3, 3, 3, 6]
pyramid_1side_4__2side_2__3side_2_4side_2_5s1_6s1 = [6, 4, 1, 1, 1, 4, 6]
pyramid_1side_4__2side_3__3side_2_4side_2_5s1_6s1 = [6, 4, 2, 1, 2, 4, 6]
pyramid_1side_4__2side_3__3side_3_4side_2_5s1_6s1 = [6, 4, 3, 1, 3, 4, 6]
pyramid_1side_4__2side_4__3side_2_4side_2_5s1_6s1 = [6, 4, 2, 2, 2, 4, 6]
pyramid_1side_4__2side_4__3side_3_4side_2_5s1_6s1 = [6, 4, 3, 2, 3, 4, 6]
pyramid_1side_4__2side_4__3side_4_4side_2_5s1_6s1 = [6, 4, 3, 3, 3, 4, 6]
pyramid_1side_4__2side_3__3side_3_4side_3_5s1_6s1 = [6, 4, 4, 1, 4, 4, 6]
pyramid_1side_4__2side_4__3side_3_4side_3_5s1_6s1 = [6, 4, 4, 2, 4, 4, 6]
pyramid_1side_4__2side_4__3side_4_4side_3_5s1_6s1 = [6, 4, 4, 3, 4, 4, 6]
pyramid_1side_4__2side_4__3side_4_4side_4_5s1_6s1 = [6, 4, 4, 4, 4, 4, 6]
##### 5side2
# side2, "1" 3 6 10 15 21 28 36 45 55, 1
pyramid_1side_2__2side_2__3side_2_4side_2_5s2_6s1 = [6, 5, 0, 0, 0, 5, 6]
# side3, 1 "3" 6 10 15 21 28 36 45 55, 4
pyramid_1side_3__2side_2__3side_2_4side_2_5s2_6s1 = [6, 5, 1, 0, 1, 5, 6]
pyramid_1side_3__2side_3__3side_2_4side_2_5s2_6s1 = [6, 5, 2, 0, 2, 5, 6]
pyramid_1side_3__2side_3__3side_3_4side_2_5s2_6s1 = [6, 5, 3, 0, 3, 5, 6]
pyramid_1side_3__2side_3__3side_3_4side_3_5s2_6s1 = [6, 5, 4, 0, 4, 5, 6]
# side4, 1 3 "6" 10 15 21 28 36 45 55, 10
pyramid_1side_4__2side_2__3side_2_4side_2_5s2_6s1 = [6, 5, 1, 1, 1, 5, 6]
pyramid_1side_4__2side_3__3side_2_4side_2_5s2_6s1 = [6, 5, 2, 1, 2, 5, 6]
pyramid_1side_4__2side_3__3side_3_4side_2_5s2_6s1 = [6, 5, 3, 1, 3, 5, 6]
pyramid_1side_4__2side_4__3side_2_4side_2_5s2_6s1 = [6, 5, 2, 2, 2, 5, 6]
pyramid_1side_4__2side_4__3side_3_4side_2_5s2_6s1 = [6, 5, 3, 2, 3, 5, 6]
pyramid_1side_4__2side_4__3side_4_4side_2_5s2_6s1 = [6, 5, 3, 3, 3, 5, 6]
pyramid_1side_4__2side_3__3side_3_4side_3_5s2_6s1 = [6, 5, 4, 1, 4, 5, 6]
pyramid_1side_4__2side_4__3side_3_4side_3_5s2_6s1 = [6, 5, 4, 2, 4, 5, 6]
pyramid_1side_4__2side_4__3side_4_4side_3_5s2_6s1 = [6, 5, 4, 3, 4, 5, 6]
pyramid_1side_4__2side_4__3side_4_4side_4_5s2_6s1 = [6, 5, 4, 4, 4, 5, 6]
##### 5side3
# side3, "1" 3 6 10 15 21 28 36 45 55, 1
pyramid_1side_3__2side_3__3side_3_4side_3_5s3_6s1 = [6, 5, 5, 0, 5, 5, 6]
# side4, 1 "3" 6 10 15 21 28 36 45 55, 4
pyramid_1side_4__2side_3__3side_3_4side_3_5s3_6s1 = [6, 5, 5, 1, 5, 5, 6]
pyramid_1side_4__2side_4__3side_3_4side_3_5s3_6s1 = [6, 5, 5, 2, 5, 5, 6]
pyramid_1side_4__2side_4__3side_4_4side_3_5s3_6s1 = [6, 5, 5, 3, 5, 5, 6]
pyramid_1side_4__2side_4__3side_4_4side_4_5s3_6s1 = [6, 5, 5, 4, 5, 5, 6]
##### 5side4
# side4, "1" 3 6 10 15 21 28 36 45 55, 1
pyramid_1side_4__2side_4__3side_4_4side_4_5s4_6s1 = [6, 5, 5, 5, 5, 5, 6]
##################################
### 6side2
##################################
##### 5side2
# side2, "1" 3 6 10 15 21 28 36 45 55, 1
pyramid_1side_2__2side_2__3side_2_4side_2_5s2_6s2 = [6, 6, 0, 0, 0, 6, 6]
# side3, 1 "3" 6 10 15 21 28 36 45 55, 4
pyramid_1side_3__2side_2__3side_2_4side_2_5s2_6s2 = [6, 6, 1, 0, 1, 6, 6]
pyramid_1side_3__2side_3__3side_2_4side_2_5s2_6s2 = [6, 6, 2, 0, 2, 6, 6]
pyramid_1side_3__2side_3__3side_3_4side_2_5s2_6s2 = [6, 6, 3, 0, 3, 6, 6]
pyramid_1side_3__2side_3__3side_3_4side_3_5s2_6s2 = [6, 6, 4, 0, 4, 6, 6]
# side4, 1 3 "6" 10 15 21 28 36 45 55, 10
pyramid_1side_4__2side_2__3side_2_4side_2_5s2_6s2 = [6, 6, 1, 1, 1, 6, 6]
pyramid_1side_4__2side_3__3side_2_4side_2_5s2_6s2 = [6, 6, 2, 1, 2, 6, 6]
pyramid_1side_4__2side_3__3side_3_4side_2_5s2_6s2 = [6, 6, 3, 1, 3, 6, 6]
pyramid_1side_4__2side_4__3side_2_4side_2_5s2_6s2 = [6, 6, 2, 2, 2, 6, 6]
pyramid_1side_4__2side_4__3side_3_4side_2_5s2_6s2 = [6, 6, 3, 2, 3, 6, 6]
pyramid_1side_4__2side_4__3side_4_4side_2_5s2_6s2 = [6, 6, 3, 3, 3, 6, 6]
pyramid_1side_4__2side_3__3side_3_4side_3_5s2_6s2 = [6, 6, 4, 1, 4, 6, 6]
pyramid_1side_4__2side_4__3side_3_4side_3_5s2_6s2 = [6, 6, 4, 2, 4, 6, 6]
pyramid_1side_4__2side_4__3side_4_4side_3_5s2_6s2 = [6, 6, 4, 3, 4, 6, 6]
pyramid_1side_4__2side_4__3side_4_4side_4_5s2_6s2 = [6, 6, 4, 4, 4, 6, 6]
##### 5side3
# side3, "1" 3 6 10 15 21 28 36 45 55, 1
pyramid_1side_3__2side_3__3side_3_4side_3_5s3_6s2 = [6, 6, 5, 0, 5, 6, 6]
# side4, 1 "3" 6 10 15 21 28 36 45 55, 4
pyramid_1side_4__2side_3__3side_3_4side_3_5s3_6s2 = [6, 6, 5, 1, 5, 6, 6]
pyramid_1side_4__2side_4__3side_3_4side_3_5s3_6s2 = [6, 6, 5, 2, 5, 6, 6]
pyramid_1side_4__2side_4__3side_4_4side_3_5s3_6s2 = [6, 6, 5, 3, 5, 6, 6]
pyramid_1side_4__2side_4__3side_4_4side_4_5s3_6s2 = [6, 6, 5, 4, 5, 6, 6]
##### 5side4
# side4, "1" 3 6 10 15 21 28 36 45 55, 1
pyramid_1side_4__2side_4__3side_4_4side_4_5s4_6s2 = [6, 6, 5, 5, 5, 6, 6]
##################################
### 6side3
##################################
##### 5side3
# side3, "1" 3 6 10 15 21 28 36 45 55, 1
pyramid_1side_3__2side_3__3side_3_4side_3_5s3_6s3 = [6, 6, 6, 0, 6, 6, 6]
# side4, 1 "3" 6 10 15 21 28 36 45 55, 4
pyramid_1side_4__2side_3__3side_3_4side_3_5s3_6s3 = [6, 6, 6, 1, 6, 6, 6]
pyramid_1side_4__2side_4__3side_3_4side_3_5s3_6s3 = [6, 6, 6, 2, 6, 6, 6]
pyramid_1side_4__2side_4__3side_4_4side_3_5s3_6s3 = [6, 6, 6, 3, 6, 6, 6]
pyramid_1side_4__2side_4__3side_4_4side_4_5s3_6s3 = [6, 6, 6, 4, 6, 6, 6]
##### 5side4
# side4, "1" 3 6 10 15 21 28 36 45 55, 1
pyramid_1side_4__2side_4__3side_4_4side_4_5s4_6s3 = [6, 6, 6, 5, 6, 6, 6]
##################################
### 6side4
##################################
##### 5side4
# side4, "1" 3 6 10 15 21 28 36 45 55, 1
pyramid_1side_4__2side_4__3side_4_4side_4_5s4_6s4 = [6, 6, 6, 6, 6, 6, 6]
###############################################################################################################################################################################################
###############################################################################################################################################################################################
###############################################################################################################################################################################################
###################
############# 1s1
######### 2s1
##### 3s1
### 4s1
ch032_pyramid_1side_1__2side_1__3side_1_4side_1_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_1__2side_1__3side_1_4side_1_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
###################
############# 1s2
######### 2s1
##### 3s1
### 4s1
ch032_pyramid_1side_2__2side_1__3side_1_4side_1_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_2__2side_1__3side_1_4side_1_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
######### 2s1
##### 3s1
### 4s1
ch032_pyramid_1side_2__2side_2__3side_1_4side_1_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_2__2side_2__3side_1_4side_1_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
##### 3s2
### 4s1
ch032_pyramid_1side_2__2side_2__3side_2_4side_1_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_2__2side_2__3side_2_4side_1_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
### 4s2
ch032_pyramid_1side_2__2side_2__3side_2_4side_2_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_2__2side_2__3side_2_4side_2_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_2__2side_2__3side_2_4side_2_5s2_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_2__2side_2__3side_2_4side_2_5s2_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_2__2side_2__3side_2_4side_2_5s2_6s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_2__2side_2__3side_2_4side_2_5s2_6s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
###################
############# 1s3
######### 2s1
##### 3s1
### 4s1
ch032_pyramid_1side_3__2side_1__3side_1_4side_1_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_1__3side_1_4side_1_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
######### 2s2
##### 3s1
### 4s1
ch032_pyramid_1side_3__2side_2__3side_1_4side_1_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_2__3side_1_4side_1_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
##### 3s2
### 4s1
ch032_pyramid_1side_3__2side_2__3side_2_4side_1_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_2__3side_2_4side_1_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
### 4s2
ch032_pyramid_1side_3__2side_2__3side_2_4side_2_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_2__3side_2_4side_2_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3__2side_2__3side_2_4side_2_5s2_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_2__3side_2_4side_2_5s2_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3__2side_2__3side_2_4side_2_5s2_6s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_2__3side_2_4side_2_5s2_6s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
######### 2s3
##### 3s1
### 4s1
ch032_pyramid_1side_3__2side_3__3side_1_4side_1_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_1_4side_1_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
##### 3s2
### 4s1
ch032_pyramid_1side_3__2side_3__3side_2_4side_1_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_2_4side_1_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
### 4s2
ch032_pyramid_1side_3__2side_3__3side_2_4side_2_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_2_4side_2_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3__2side_3__3side_2_4side_2_5s2_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_2_4side_2_5s2_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3__2side_3__3side_2_4side_2_5s2_6s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_2_4side_2_5s2_6s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
##### 3s3
### 4s1
ch032_pyramid_1side_3__2side_3__3side_3_4side_1_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_3_4side_1_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
### 4s2
ch032_pyramid_1side_3__2side_3__3side_3_4side_2_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_3_4side_2_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3__2side_3__3side_3_4side_2_5s2_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_3_4side_2_5s2_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3__2side_3__3side_3_4side_2_5s2_6s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_3_4side_2_5s2_6s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
### 4s3
ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_3_4side_3_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s2_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_3_4side_3_5s2_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s2_6s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_3_4side_3_5s2_6s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s3_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_3_4side_3_5s3_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s3_6s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_3_4side_3_5s3_6s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s3_6s3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_3_4side_3_5s3_6s3, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
###################
############# 1s4
######### 2s1
##### 3s1
### 4s1
ch032_pyramid_1side_4__2side_1__3side_1_4side_1_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_1__3side_1_4side_1_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
######### 2s2
##### 3s1
### 4s1
ch032_pyramid_1side_4__2side_2__3side_1_4side_1_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_2__3side_1_4side_1_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
##### 3s2
### 4s1
ch032_pyramid_1side_4__2side_2__3side_2_4side_1_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_2__3side_2_4side_1_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
### 4s2
ch032_pyramid_1side_4__2side_2__3side_2_4side_2_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_2__3side_2_4side_2_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_2__3side_2_4side_2_5s2_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_2__3side_2_4side_2_5s2_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_2__3side_2_4side_2_5s2_6s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_2__3side_2_4side_2_5s2_6s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
######### 2s3
##### 3s1
### 4s1
ch032_pyramid_1side_4__2side_3__3side_1_4side_1_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_1_4side_1_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
##### 3s2
### 4s1
ch032_pyramid_1side_4__2side_3__3side_2_4side_1_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_2_4side_1_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
### 4s2
ch032_pyramid_1side_4__2side_3__3side_2_4side_2_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_2_4side_2_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_3__3side_2_4side_2_5s2_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_2_4side_2_5s2_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_3__3side_2_4side_2_5s2_6s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_2_4side_2_5s2_6s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
##### 3s3
### 4s1
ch032_pyramid_1side_4__2side_3__3side_3_4side_1_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_3_4side_1_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
### 4s2
ch032_pyramid_1side_4__2side_3__3side_3_4side_2_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_3_4side_2_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_3__3side_3_4side_2_5s2_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_3_4side_2_5s2_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_3__3side_3_4side_2_5s2_6s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_3_4side_2_5s2_6s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
### 4s3
ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_3_4side_3_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s2_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_3_4side_3_5s2_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s2_6s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_3_4side_3_5s2_6s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s3_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_3_4side_3_5s3_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s3_6s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_3_4side_3_5s3_6s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s3_6s3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_3_4side_3_5s3_6s3, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
######### 2s4
##### 3s1
### 4s1
ch032_pyramid_1side_4__2side_4__3side_1_4side_1_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_1_4side_1_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
##### 3s2
### 4s1
ch032_pyramid_1side_4__2side_4__3side_2_4side_1_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_2_4side_1_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
### 4s2
ch032_pyramid_1side_4__2side_4__3side_2_4side_2_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_2_4side_2_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_2_4side_2_5s2_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_2_4side_2_5s2_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_2_4side_2_5s2_6s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_2_4side_2_5s2_6s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
##### 3s3
### 4s1
ch032_pyramid_1side_4__2side_4__3side_3_4side_1_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_3_4side_1_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
### 4s2
ch032_pyramid_1side_4__2side_4__3side_3_4side_2_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_3_4side_2_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_3_4side_2_5s2_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_3_4side_2_5s2_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_3_4side_2_5s2_6s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_3_4side_2_5s2_6s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
### 4s3
ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_3_4side_3_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s2_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_3_4side_3_5s2_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s2_6s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_3_4side_3_5s2_6s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s3_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_3_4side_3_5s3_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s3_6s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_3_4side_3_5s3_6s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s3_6s3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_3_4side_3_5s3_6s3, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
##### 3s4
### 4s1
ch032_pyramid_1side_4__2side_4__3side_4_4side_1_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_1_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
### 4s2
ch032_pyramid_1side_4__2side_4__3side_4_4side_2_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_2_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_2_5s2_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_2_5s2_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_2_5s2_6s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_2_5s2_6s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
### 4s3
ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_3_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s2_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_3_5s2_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s2_6s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_3_5s2_6s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s3_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_3_5s3_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s3_6s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_3_5s3_6s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s3_6s3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_3_5s3_6s3, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
### 4s4
ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s1_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_4_5s1_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s2_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_4_5s2_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s2_6s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_4_5s2_6s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s3_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_4_5s3_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s3_6s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_4_5s3_6s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s3_6s3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_4_5s3_6s3, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s4_6s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_4_5s4_6s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s4_6s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_4_5s4_6s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s4_6s3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_4_5s4_6s3, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s4_6s4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_4_5s4_6s4, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
###############################################################################################################################################################################################
###############################################################################################################################################################################################
if(__name__ == "__main__"):
import numpy as np
print("build_model cost time:", time.time() - start_time)
data = np.zeros(shape=(1, 512, 512, 1), dtype=np.float32)
use_model = ch032_pyramid_1side_1__2side_1__3side_1_4side_1_5s1_6s1
use_model = use_model.build()
result = use_model.generator(data)
print(result[0].shape)
from kong_util.tf_model_util import Show_model_weights
Show_model_weights(use_model.generator)
use_model.generator.summary()
print(use_model.model_describe)
| [
"[email protected]"
] | |
00c84dfe665dbb738ca50b70040e7c837c595038 | 4f9930e15c02cb9a09af70d66b794480b8c9bd57 | /hail/python/hailtop/pipeline/task.py | a2472c4ff18e3f5de5f9d7a3a7b3edab693c4253 | [
"MIT"
] | permissive | gsarma/hail | d76aa16d718618c1915b629077fd80cbc4d3b526 | 6aa2d945bb7d57c463d5ab9afb686f18c2941b25 | refs/heads/master | 2020-06-20T06:09:43.408615 | 2019-10-29T21:40:23 | 2019-10-29T21:40:23 | 196,250,453 | 0 | 0 | MIT | 2019-07-10T17:44:48 | 2019-07-10T17:44:47 | null | UTF-8 | Python | false | false | 12,654 | py | import re
from .resource import ResourceFile, ResourceGroup
from .utils import PipelineException
def _add_resource_to_set(resource_set, resource, include_rg=True):
if isinstance(resource, ResourceGroup):
rg = resource
if include_rg:
resource_set.add(resource)
else:
resource_set.add(resource)
if isinstance(resource, ResourceFile) and resource._has_resource_group():
rg = resource._get_resource_group()
else:
rg = None
if rg is not None:
for _, resource_file in rg._resources.items():
resource_set.add(resource_file)
class Task:
"""
Object representing a single job to execute.
Examples
--------
Create a pipeline object:
>>> p = Pipeline()
Create a new pipeline task that prints hello to a temporary file `t.ofile`:
>>> t = p.new_task()
>>> t.command(f'echo "hello" > {t.ofile}')
Write the temporary file `t.ofile` to a permanent location
>>> p.write_output(t.ofile, 'hello.txt')
Execute the DAG:
>>> p.run()
Notes
-----
This class should never be created directly by the user. Use `Pipeline.new_task` instead.
"""
_counter = 0
_uid_prefix = "__TASK__"
_regex_pattern = r"(?P<TASK>{}\d+)".format(_uid_prefix)
@classmethod
def _new_uid(cls):
uid = "{}{}".format(cls._uid_prefix, cls._counter)
cls._counter += 1
return uid
def __init__(self, pipeline, name=None, attributes=None):
self._pipeline = pipeline
self.name = name
self.attributes = attributes
self._cpu = None
self._memory = None
self._storage = None
self._image = None
self._command = []
self._resources = {} # dict of name to resource
self._resources_inverse = {} # dict of resource to name
self._uid = Task._new_uid()
self._inputs = set()
self._internal_outputs = set()
self._external_outputs = set()
self._mentioned = set() # resources used in the command
self._valid = set() # resources declared in the appropriate place
self._dependencies = set()
def _get_resource(self, item):
if item not in self._resources:
r = self._pipeline._new_task_resource_file(self)
self._resources[item] = r
self._resources_inverse[r] = item
return self._resources[item]
def __getitem__(self, item):
return self._get_resource(item)
def __getattr__(self, item):
return self._get_resource(item)
def _add_internal_outputs(self, resource):
_add_resource_to_set(self._internal_outputs, resource, include_rg=False)
def _add_inputs(self, resource):
_add_resource_to_set(self._inputs, resource, include_rg=False)
def declare_resource_group(self, **mappings):
"""
Declare a resource group for a task.
Examples
--------
Declare a resource group:
>>> input = p.read_input_group(bed='data/example.bed',
... bim='data/example.bim',
... fam='data/example.fam')
>>> t = p.new_task()
>>> t.declare_resource_group(tmp1={'bed': '{root}.bed',
... 'bim': '{root}.bim',
... 'fam': '{root}.fam',
... 'log': '{root}.log'})
>>> t.command(f"plink --bfile {input} --make-bed --out {t.tmp1}")
Caution
-------
Be careful when specifying the expressions for each file as this is Python
code that is executed with `eval`!
Parameters
----------
mappings: :obj:`dict` of :obj:`str` to :obj:`dict` of :obj:`str` to :obj:`str`
Keywords are the name(s) of the resource group(s). The value is a dict
mapping the individual file identifier to a string expression representing
how to transform the resource group root name into a file. Use `{root}`
for the file root.
Returns
-------
:class:`.Task`
Same task object with resource groups set.
"""
for name, d in mappings.items():
assert name not in self._resources
if not isinstance(d, dict):
raise PipelineException(f"value for name '{name}' is not a dict. Found '{type(d)}' instead.")
rg = self._pipeline._new_resource_group(self, d)
self._resources[name] = rg
_add_resource_to_set(self._valid, rg)
return self
def depends_on(self, *tasks):
"""
Explicitly set dependencies on other tasks.
Examples
--------
Create the first task:
>>> t1 = p.new_task()
>>> t1.command(f'echo "hello"')
Create the second task that depends on `t1`:
>>> t2 = p.new_task()
>>> t2.depends_on(t1)
>>> t2.command(f'echo "world"')
Notes
-----
Dependencies between tasks are automatically created when resources from
one task are used in a subsequent task. This method is only needed when
no intermediate resource exists and the dependency needs to be explicitly
set.
Parameters
----------
tasks: :class:`.Task`, varargs
Sequence of tasks to depend on.
Returns
-------
:class:`.Task`
Same task object with dependencies set.
"""
for t in tasks:
self._dependencies.add(t)
def command(self, command):
"""
Set the task's command to execute.
Examples
--------
Simple task with no output files:
>>> p = Pipeline()
>>> t1 = p.new_task()
>>> t1.command(f'echo "hello"')
>>> p.run()
Simple task with one temporary file `t2.ofile` that is written to a
permanent location:
>>> p = Pipeline()
>>> t2 = p.new_task()
>>> t2.command(f'echo "hello world" > {t2.ofile}')
>>> p.write_output(t2.ofile, 'output/hello.txt')
>>> p.run()
Two tasks with a file interdependency:
>>> p = Pipeline()
>>> t3 = p.new_task()
>>> t3.command(f'echo "hello" > {t3.ofile}')
>>> t4 = p.new_task()
>>> t4.command(f'cat {t3.ofile} > {t4.ofile}')
>>> p.write_output(t4.ofile, 'output/cat_output.txt')
>>> p.run()
Specify multiple commands in the same task:
>>> p = Pipeline()
>>> t5 = p.new_task()
>>> t5.command(f'echo "hello" > {t5.tmp1}')
>>> t5.command(f'echo "world" > {t5.tmp2}')
>>> t5.command(f'echo "!" > {t5.tmp3}')
>>> t5.command(f'cat {t5.tmp1} {t5.tmp2} {t5.tmp3} > {t5.ofile}')
>>> p.write_output(t5.ofile, 'output/concatenated.txt')
>>> p.run()
Notes
-----
This method can be called more than once. It's behavior is to
append commands to run to the set of previously defined commands
rather than overriding an existing command.
To declare a resource file of type :class:`.TaskResourceFile`, use either
the get attribute syntax of `task.{identifier}` or the get item syntax of
`task['identifier']`. If an object for that identifier doesn't exist,
then one will be created automatically (only allowed in the :meth:`.command`
method). The identifier name can be any valid Python identifier
such as `ofile5000`.
All :class:`.TaskResourceFile` are temporary files and must be written
to a permanent location using :func:`.Pipeline.write_output` if the output needs
to be saved.
Only Resources can be referred to in commands. Referencing a :class:`.Pipeline`
or :class:`.Task` will result in an error.
Parameters
----------
command: :obj:`str`
Returns
-------
:class:`.Task`
Same task object with command appended.
"""
def handler(match_obj):
groups = match_obj.groupdict()
if groups['TASK']:
raise PipelineException(f"found a reference to a Task object in command '{command}'.")
if groups['PIPELINE']:
raise PipelineException(f"found a reference to a Pipeline object in command '{command}'.")
assert groups['RESOURCE_FILE'] or groups['RESOURCE_GROUP']
r_uid = match_obj.group()
r = self._pipeline._resource_map.get(r_uid)
if r is None:
raise PipelineException(f"undefined resource '{r_uid}' in command '{command}'.\n"
f"Hint: resources must be from the same pipeline as the current task.")
if r._source != self:
self._add_inputs(r)
if r._source is not None:
if r not in r._source._valid:
name = r._source._resources_inverse[r]
raise PipelineException(f"undefined resource '{name}'\n"
f"Hint: resources must be defined within "
"the task methods 'command' or 'declare_resource_group'")
self._dependencies.add(r._source)
r._source._add_internal_outputs(r)
else:
_add_resource_to_set(self._valid, r)
self._mentioned.add(r)
return f"${{{r_uid}}}"
from .pipeline import Pipeline # pylint: disable=cyclic-import
subst_command = re.sub(f"({ResourceFile._regex_pattern})|({ResourceGroup._regex_pattern})"
f"|({Task._regex_pattern})|({Pipeline._regex_pattern})",
handler,
command)
self._command.append(subst_command)
return self
def storage(self, storage):
"""
Set the task's storage size.
Examples
--------
Set the task's disk requirements to 1 Gi:
>>> t1 = p.new_task()
>>> (t1.storage('1Gi')
... .command(f'echo "hello"'))
Parameters
----------
storage: :obj:`str`
Returns
-------
:class:`.Task`
Same task object with storage set.
"""
self._storage = storage
return self
def memory(self, memory):
"""
Set the task's memory requirements.
Examples
--------
Set the task's memory requirement to 5GB:
>>> t1 = p.new_task()
>>> (t1.memory(5)
... .command(f'echo "hello"'))
Parameters
----------
memory: :obj:`str` or :obj:`float` or :obj:`int`
Value is in GB.
Returns
-------
:class:`.Task`
Same task object with memory requirements set.
"""
self._memory = memory
return self
def cpu(self, cores):
"""
Set the task's CPU requirements.
Examples
--------
Set the task's CPU requirement to 2 cores:
>>> t1 = p.new_task()
>>> (t1.cpu(2)
... .command(f'echo "hello"'))
Parameters
----------
cores: :obj:`str` or :obj:`float` or :obj:`int`
Returns
-------
:class:`.Task`
Same task object with CPU requirements set.
"""
self._cpu = cores
return self
def image(self, image):
"""
Set the task's docker image.
Examples
--------
Set the task's docker image to `alpine`:
>>> t1 = p.new_task()
>>> (t1.image('alpine:latest')
... .command(f'echo "hello"'))
Parameters
----------
image: :obj:`str`
Docker image to use.
Returns
-------
:class:`.Task`
Same task object with docker image set.
"""
self._image = image
return self
def _pretty(self):
s = f"Task '{self._uid}'" \
f"\tName:\t'{self.name}'" \
f"\tAttributes:\t'{self.attributes}'" \
f"\tImage:\t'{self._image}'" \
f"\tCPU:\t'{self._cpu}'" \
f"\tMemory:\t'{self._memory}'" \
f"\tStorage:\t'{self._storage}'" \
f"\tCommand:\t'{self._command}'"
return s
def __str__(self):
return self._uid
| [
"[email protected]"
] | |
921a1439c4b41746c803c1027e09f0d1502c2b93 | 55dc6e337e634acb852c570274a1d0358b7300a5 | /tests/core/intz/intz.py | 32ff67f7a362dffe3e6e8699ccb651f1b494c791 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | fifoteam/veriloggen | 97ad45671f053c85f495b08a030f735fd9822146 | 23cb7251c0f126d40d249982cad33ef37902afef | refs/heads/master | 2020-05-27T00:28:37.575411 | 2017-02-20T01:47:00 | 2017-02-20T01:47:00 | 82,518,602 | 2 | 0 | null | 2017-02-20T05:02:37 | 2017-02-20T05:02:37 | null | UTF-8 | Python | false | false | 989 | py | from __future__ import absolute_import
from __future__ import print_function
import sys
import os
# the next line can be removed after installation
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))))
from veriloggen import *
def mkLed():
m = Module('blinkled')
width = m.Parameter('WIDTH', 8)
clk = m.Input('CLK')
rst = m.Input('RST')
led = m.OutputReg('LED', width)
count = m.Reg('count', 32)
m.Always(Posedge(clk))(
If(rst)(
count(0)
).Else(
If(count == 1023)(
count(0)
).Else(
count(count + 1)
)
))
m.Always(Posedge(clk))(
If(rst)(
led(0)
).Else(
If(count == 1024 - 1)(
led(IntZ())
)
))
return m
if __name__ == '__main__':
led = mkLed()
verilog = led.to_verilog('')
print(verilog)
| [
"[email protected]"
] | |
daf504ddb048bd9ff53c1be218bdef13eb0e3612 | 978d8f24f4985c61c2dce534a279abe6ffeff433 | /custom_components/blueprint/__init__.py | 7f90a41bded995d9a9e736289b3e45a104db0064 | [
"MIT"
] | permissive | JiriKursky/blueprint | 3c1ad02c4726539ab07fc407b6c53ef4c903448b | 92ae97dc5fec3a9a6e6e14031c32bbf2f1953ff6 | refs/heads/master | 2022-01-27T16:24:27.521422 | 2019-07-20T10:52:46 | 2019-07-20T10:52:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,577 | py | """
Component to integrate with blueprint.
For more details about this component, please refer to
https://github.com/custom-components/blueprint
"""
import os
from datetime import timedelta
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers import discovery
from homeassistant.util import Throttle
from .const import (
CONF_BINARY_SENSOR,
CONF_ENABLED,
CONF_NAME,
CONF_PASSWORD,
CONF_SENSOR,
CONF_SWITCH,
CONF_USERNAME,
DEFAULT_NAME,
DOMAIN_DATA,
DOMAIN,
ISSUE_URL,
PLATFORMS,
REQUIRED_FILES,
STARTUP,
VERSION,
)
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=30)
_LOGGER = logging.getLogger(__name__)
BINARY_SENSOR_SCHEMA = vol.Schema(
{
vol.Optional(CONF_ENABLED, default=True): cv.boolean,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
SENSOR_SCHEMA = vol.Schema(
{
vol.Optional(CONF_ENABLED, default=True): cv.boolean,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
SWITCH_SCHEMA = vol.Schema(
{
vol.Optional(CONF_ENABLED, default=True): cv.boolean,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_BINARY_SENSOR): vol.All(
cv.ensure_list, [BINARY_SENSOR_SCHEMA]
),
vol.Optional(CONF_SENSOR): vol.All(cv.ensure_list, [SENSOR_SCHEMA]),
vol.Optional(CONF_SWITCH): vol.All(cv.ensure_list, [SWITCH_SCHEMA]),
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up this component."""
# Import client from a external python package hosted on PyPi
from sampleclient.client import Client
# Print startup message
startup = STARTUP.format(name=DOMAIN, version=VERSION, issueurl=ISSUE_URL)
_LOGGER.info(startup)
# Check that all required files are present
file_check = await check_files(hass)
if not file_check:
return False
# Create DATA dict
hass.data[DOMAIN_DATA] = {}
# Get "global" configuration.
username = config[DOMAIN].get(CONF_USERNAME)
password = config[DOMAIN].get(CONF_PASSWORD)
# Configure the client.
client = Client(username, password)
hass.data[DOMAIN_DATA]["client"] = BlueprintData(hass, client)
# Load platforms
for platform in PLATFORMS:
# Get platform specific configuration
platform_config = config[DOMAIN].get(platform, {})
# If platform is not enabled, skip.
if not platform_config:
continue
for entry in platform_config:
entry_config = entry
# If entry is not enabled, skip.
if not entry_config[CONF_ENABLED]:
continue
hass.async_create_task(
discovery.async_load_platform(
hass, platform, DOMAIN, entry_config, config
)
)
return True
class BlueprintData:
"""This class handle communication and stores the data."""
def __init__(self, hass, client):
"""Initialize the class."""
self.hass = hass
self.client = client
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def update_data(self):
"""Update data."""
# This is where the main logic to update platform data goes.
try:
data = self.client.get_data()
self.hass.data[DOMAIN_DATA]["data"] = data
except Exception as error: # pylint: disable=broad-except
_LOGGER.error("Could not update data - %s", error)
async def check_files(hass):
"""Return bool that indicates if all files are present."""
# Verify that the user downloaded all files.
base = "{}/custom_components/{}/".format(hass.config.path(), DOMAIN)
missing = []
for file in REQUIRED_FILES:
fullpath = "{}{}".format(base, file)
if not os.path.exists(fullpath):
missing.append(file)
if missing:
_LOGGER.critical("The following files are missing: %s", str(missing))
returnvalue = False
else:
returnvalue = True
return returnvalue
| [
"[email protected]"
] | |
e7b1a107e606889f4d2ea63f1cc95c913cd2cef3 | 13800b7827598e76428a335559b7bf11867ec2f0 | /python/ccxt/async_support/binancecoinm.py | 62ca72174bcc92699c5987d6f42bca5163a236e1 | [
"MIT"
] | permissive | ccxt/ccxt | b40a0466f5c430a3c0c6026552ae697aa80ba6c6 | e4065f6a490e6fc4dd7a72b375428b2faa570668 | refs/heads/master | 2023-09-04T03:41:29.787733 | 2023-09-03T19:25:57 | 2023-09-03T19:25:57 | 91,253,698 | 30,798 | 8,190 | MIT | 2023-09-14T21:59:09 | 2017-05-14T15:41:56 | Python | UTF-8 | Python | false | false | 1,683 | py | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.binance import binance
from ccxt.abstract.binancecoinm import ImplicitAPI
class binancecoinm(binance, ImplicitAPI):
def describe(self):
return self.deep_extend(super(binancecoinm, self).describe(), {
'id': 'binancecoinm',
'name': 'Binance COIN-M',
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/117738721-668c8d80-b205-11eb-8c49-3fad84c4a07f.jpg',
'doc': [
'https://binance-docs.github.io/apidocs/delivery/en/',
'https://binance-docs.github.io/apidocs/spot/en',
],
},
'has': {
'CORS': None,
'spot': False,
'margin': False,
'swap': True,
'future': True,
'option': None,
'createStopMarketOrder': True,
},
'options': {
'fetchMarkets': ['inverse'],
'defaultSubType': 'inverse',
'leverageBrackets': None,
},
})
async def transfer_in(self, code: str, amount, params={}):
# transfer from spot wallet to coinm futures wallet
return await self.futuresTransfer(code, amount, 3, params)
async def transfer_out(self, code: str, amount, params={}):
# transfer from coinm futures wallet to spot wallet
return await self.futuresTransfer(code, amount, 4, params)
| [
"[email protected]"
] | |
7163f816dfd5db84ab30220ee8fb101ce0b68c6c | 66e6360325b781ed0791868765f1fd8a6303726f | /TB2009/WorkDirectory/5173 Pulse Timing/Pion_108538.py | e53460495e0c5366f4f533ec68de84b6c0a8447d | [] | no_license | alintulu/FHead2011PhysicsProject | c969639b212d569198d8fce2f424ce866dcfa881 | 2568633d349810574354ad61b0abab24a40e510e | refs/heads/master | 2022-04-28T14:19:30.534282 | 2020-04-23T17:17:32 | 2020-04-23T17:17:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,320 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("EventDisplay")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.source = cms.Source("HcalTBSource",
fileNames = cms.untracked.vstring("file:/tmp/chenyi/HTB_108538.root"),
streams = cms.untracked.vstring('Chunk699', 'HCAL_Trigger', 'HCAL_SlowData', 'HCAL_QADCTDC', 'HCAL_DCC021')
)
process.tbunpack = cms.EDFilter("HcalTBObjectUnpacker",
#IncludeUnmatchedHits = cms.untracked.bool(False),
HcalTriggerFED = cms.untracked.int32(1),
HcalVLSBFED = cms.untracked.int32(699),
HcalTDCFED = cms.untracked.int32(8),
HcalQADCFED = cms.untracked.int32(8),
HcalSlowDataFED = cms.untracked.int32(3),
ConfigurationFile = cms.untracked.string('configQADCTDC_TB2009.txt')
)
process.vlsbinfo = cms.EDProducer("VLSBInformationProducer",
minSample = cms.untracked.uint32(0),
maxSample = cms.untracked.uint32(31),
baselineSamples = cms.untracked.uint32(2),
mip = cms.untracked.string("MIP_EarlyRejection.txt"),
useMotherBoard0 = cms.untracked.bool(True),
useMotherBoard1 = cms.untracked.bool(False),
useMotherBoard2 = cms.untracked.bool(False),
useMotherBoard3 = cms.untracked.bool(False),
usePedestalMean = cms.untracked.bool(False)
)
process.ABCcut = cms.EDFilter("SingleTowerParticleFilter")
process.hitcut = cms.EDFilter("HitXFilter",
maximum = cms.untracked.double(-5)
)
process.MessageLogger = cms.Service("MessageLogger",
default = cms.untracked.PSet(
reportEvery = cms.untracked.int32(100)
)
)
process.alignpion2 = cms.EDAnalyzer("AlignPulseAnalyzer",
rejectionSample = cms.untracked.int32(2),
rejectionHeight = cms.untracked.double(0.1),
output = cms.untracked.string("Time_108538_2.root"),
maxsample = cms.untracked.double(1000),
minsample = cms.untracked.double(15)
)
process.alignpion1 = cms.EDAnalyzer("AlignPulseAnalyzer",
rejectionSample = cms.untracked.int32(2),
rejectionHeight = cms.untracked.double(0.1),
output = cms.untracked.string("Time_108538_1.root"),
maxsample = cms.untracked.double(40),
minsample = cms.untracked.double(0)
)
process.p = cms.Path(
process.tbunpack *
process.ABCcut *
process.vlsbinfo *
process.hitcut *
process.alignpion1 *
process.alignpion2
)
| [
"[email protected]"
] | |
ddb0511c7da10557a74469f32fdf621eef3c6942 | 3a093f6a40e8fb24957d277ad8f4b097d08c6d04 | /result_scons/tools/cards.py | 289cbf4c6c50e49e16d0902fa369f486a474e093 | [] | no_license | dlont/FourTops2016 | ab9e953760e93b0e777b23478938efd30d640286 | 88c929bf98625735a92a31210f7233f799c5a10c | refs/heads/master | 2021-01-18T22:23:52.796080 | 2019-07-31T12:34:03 | 2019-07-31T12:34:03 | 72,439,490 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,985 | py | #!/usr/bin/env python
"""
Script for Higgs Combine cards creation
"""
import os
import sys
import time
import argparse
import logging
import json
from datetime import datetime
import pandas as pd
import numpy as np
import ROOT
from cards_proc_list import proc_id
from cards_syst_list import systtypelist
from cards_syst_list import syst_norm_size_list, syst_shape_size_list
from cards_bin_list import binlist
#Global definitions
def getObservation(ch,file,observable):
'''
Fill per-bin datacounts list
'''
logging.debug("----getObservation:-----")
obs = {ch:{}}
for ibin in binlist[ch]:
histname = ibin.replace(ch,'') #Remove channel prefix e.g. mu6J2M->6J2M
histname = histname + '/' + observable
logging.debug("Observations filename: "+file.GetName())
logging.debug("Observations histname: "+histname)
integral = file.Get(histname).Integral()
logging.debug("Integral: "+str(integral))
obs[ch][ibin]=integral
return obs
def mcRate(ch,files,observable):
'''
Get MC predictions for each process
'''
logging.debug("----mcRate:-----")
rate = {}
logging.debug(files)
for proc in proc_id.keys():
rate[proc]=getObservation(ch,files[proc],observable)
return rate
def printCardHeader(arguments):
print >> arguments.outfile, '#',str(datetime.now()), arguments
print >> arguments.outfile, '-'*100
print >> arguments.outfile, 'imax', len(binlist[arguments.channel])
print >> arguments.outfile, 'jmax', len(proc_id)-1
print >> arguments.outfile, 'kmax', '*'
print >> arguments.outfile, '-'*100
def printShapeFilesBlock(arguments):
print >> arguments.outfile, '-'*100
for ibin in binlist[arguments.channel]:
histname = ibin.replace(arguments.channel,'')
histname = histname + '/' + arguments.observable
logging.debug(histname)
print >> arguments.outfile, 'shapes', 'data_obs', ibin, arguments.data, histname
for proc in proc_id.keys():
filename = arguments.sources[proc]
logging.debug(filename)
systname = ibin.replace(arguments.channel,'')+'_$SYSTEMATIC/'+arguments.observable
print >> arguments.outfile, 'shapes', proc, ibin, \
filename, histname, systname
print >> arguments.outfile, '-'*100
return
def main(arguments):
#pandas printing setting
pd.set_option('expand_frame_repr', False)
pd.set_option('max_columns', 999)
#Read-in input ROOT files
files = {}
for proc in arguments.sources.keys():
files[proc] = ROOT.TFile.Open(arguments.sources[proc],"READ")
printCardHeader(arguments)
printShapeFilesBlock(arguments)
#Get observations
datafile = ROOT.TFile.Open(arguments.data,"READ")
obs = getObservation(arguments.channel, datafile,arguments.observable)
logging.debug( obs )
#Printout observation block to file
obsline = pd.DataFrame(obs[arguments.channel], columns=binlist[arguments.channel], index=['observation'])
print >> arguments.outfile, '-'*100
print >> arguments.outfile, 'bin', obsline
print >> arguments.outfile, '-'*100
#Get MC rate predictions
rate = mcRate(arguments.channel,files,arguments.observable)
logging.debug( rate )
ch_dfs = []
for proc in proc_id.keys():
#Create new table for given process
s = pd.DataFrame('NA',
columns=binlist[arguments.channel],
index=systtypelist[arguments.channel].keys()
)
#Fill systematics desctiption for this process
#Normalization
df_update = pd.DataFrame.from_dict(syst_norm_size_list[arguments.channel][proc], orient='index')
df_update.columns = binlist[arguments.channel]
s.update(df_update)
#Shape
df_update = pd.DataFrame.from_dict(syst_shape_size_list[arguments.channel][proc], orient='index')
df_update.columns = binlist[arguments.channel]
s.update(df_update)
#Add process labels and id (first and second line, respectively)
processline = pd.DataFrame(proc, columns=binlist[arguments.channel], index=['process'])
s = pd.concat([s.ix[:0], processline, s.ix[0:]])
processline = pd.DataFrame(proc_id[proc], columns=binlist[arguments.channel], index=['process '])
s = pd.concat([s.ix[:1], processline, s.ix[1:]])
rateline = pd.DataFrame(rate[proc][arguments.channel], columns=binlist[arguments.channel], index=['rate'])
s = pd.concat([s.ix[:2], rateline, s.ix[2:]])
print arguments.channel, proc
logging.debug(s)
ch_dfs.append(s)
result = pd.concat(ch_dfs,axis=1)
#Add column with systematic type (normalization or shape)
lam = lambda x: systtypelist[arguments.channel][x] if x in systtypelist[arguments.channel] else ''
result.insert(0,' ',result.index.map(lam))
#Printout MC (rate and systematics) block to file
print >> arguments.outfile, 'bin', result
return 0
if __name__ == '__main__':
start_time = time.time()
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--data', help="Data rootfile", required=True)
parser.add_argument("--source", type=json.loads, dest='sources',
help='json dictionary with input definition', required=True)
parser.add_argument('--channel', help="channel",default='mu')
parser.add_argument('--observable', help="observable",default='allSF/bdt')
parser.add_argument('-o', '--outfile', help="Output file",
default=sys.stdout, type=argparse.FileType('w'))
parser.add_argument(
'-d', '--debug',
help="Print lots of debugging statements",
action="store_const", dest="loglevel", const=logging.DEBUG,
default=logging.WARNING,
)
parser.add_argument(
'-v', '--verbose',
help="Be verbose",
action="store_const", dest="loglevel", const=logging.INFO,
)
args = parser.parse_args(sys.argv[1:])
print(args)
logging.basicConfig(level=args.loglevel)
logging.info( time.asctime() )
exitcode = main(args)
logging.info( time.asctime() )
logging.info( 'TOTAL TIME IN MINUTES:' + str((time.time() - start_time) / 60.0))
sys.exit(exitcode)
| [
"[email protected]"
] | |
1f1529473302b02d543365662b5ea486c153d200 | 0cf7dd2c3c0b28b52f1273e8fe2ea0a87cacc6af | /ToLeftandRight.py | b17d2c4f0c6445bb843c71a099e74b7f273f481e | [] | no_license | EngrDevDom/Everyday-Coding-in-Python | 61b0e4fcbc6c7f399587deab2fa55763c9d519b5 | 93329ad485a25e7c6afa81d7229147044344736c | refs/heads/master | 2023-02-25T05:04:50.051111 | 2021-01-30T02:43:40 | 2021-01-30T02:43:40 | 274,971,215 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | # ToLeftandRight.py
nums = []
num_of_space = 0
current_num = int(input("Enter a number: "))
nums.append(current_num)
while True:
num = int(input("Enter a number: "))
if num > current_num: num_of_space += 1
elif num == current_num: continue
else: num_of_space -= 1
current_num = num
nums.append(" " * num_of_space + str(num))
if num_of_space == 0: break
for num in nums: print(num)
| [
"[email protected]"
] | |
3cc3eff0e75bc844fb12fcaa253b0afbd4c3e476 | 1a6d5f58a5aaf478e3af1a880f155a2bcbd06aff | /PX4/MAVSDK-Python/offboard_velocity_body.py | d14407e6d1504bb49547099d1e336c087e9f2eaa | [
"MIT"
] | permissive | yingshaoxo/suicide-squad | 5b8858376bffe9d80e66debbd75e83b6fb6f5b6e | cadbd0d48e860a8747b59190fc67a5a114c3462b | refs/heads/master | 2020-11-24T02:46:38.604339 | 2019-10-29T05:47:44 | 2019-10-29T05:47:44 | 227,932,669 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,055 | py | #!/usr/bin/env python3
"""
Some caveats when attempting to run the examples in non-gps environments:
- `drone.action.arm()` will return a `COMMAND_DENIED` result because the action requires switching
to LOITER mode first, something that is currently not supported in a non-gps environment. You will
need to temporarily disable this part here:
`https://github.com/mavlink/MAVSDK/blob/develop/plugins/action/action_impl.cpp#L61-L65`
- `drone.offboard.stop()` will also return a `COMMAND_DENIED` result because it requires a mode
switch to HOLD, something that is currently not supported in a non-gps environment.
"""
import asyncio
from mavsdk import System
from mavsdk import (OffboardError, VelocityBodyYawspeed)
async def run():
""" Does Offboard control using velocity body coordinates. """
drone = System()
await drone.connect(system_address="udp://:14540")
# Set parameters
await drone.param.set_float_param("MIS_TAKEOFF_ALT", 1.0) # set takeoff height to 1 meter
await drone.param.set_int_param("COM_TAKEOFF_ACT", 0) # hold after takeoff
await drone.param.set_int_param("COM_OBL_ACT", 0) # 0: land if lost offboard signal; 1: hold if lost offboard signal
# Start parallel tasks
asyncio.ensure_future(print_altitude(drone))
print("-- Arming")
await drone.action.arm()
print("-- Setting initial setpoint")
await drone.offboard.set_velocity_body(VelocityBodyYawspeed(0.0, 0.0, 0.0, 0.0))
print("-- Starting offboard")
try:
await drone.offboard.start()
except OffboardError as error:
print(f"Starting offboard mode failed with error code: {error._result.result}")
print("-- Disarming")
await drone.action.disarm()
return
print("-- Turn clock-wise and climb")
await drone.offboard.set_velocity_body(VelocityBodyYawspeed(0.0, 0.0, -1, 0.0))
await asyncio.sleep(5)
print("-- Turn clock-wise and climb")
await drone.offboard.set_velocity_body(VelocityBodyYawspeed(0.0, 0.1, 0.0, 0.0))
await asyncio.sleep(5)
print("-- Wait for a bit")
await drone.offboard.set_velocity_body(VelocityBodyYawspeed(0.0, -0.1, 0.0, 0.0))
await asyncio.sleep(5)
print("-- Wait for a bit")
await drone.offboard.set_velocity_body(VelocityBodyYawspeed(0.0, 0.0, 0.0, 2.0))
await asyncio.sleep(20)
print("-- Stopping offboard")
try:
await drone.offboard.stop()
except OffboardError as error:
print(f"Stopping offboard mode failed with error code: {error._result.result}")
print("-- Landing")
await drone.action.land()
async def print_altitude(drone):
""" Prints the altitude when it changes """
previous_altitude = None
async for position in drone.telemetry.position():
altitude = round(position.relative_altitude_m)
if altitude != previous_altitude:
previous_altitude = altitude
print(f"Altitude: {altitude}")
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(run())
| [
"[email protected]"
] | |
37448d7967ed493b56ddd9b94af1582157f26f15 | 3d016301728a4428ec466653587f0f80c4f7eb11 | /plugin/lighthouse/metadata.py | ea0afcf3dc444279943f34cbf0dc8925b321eb9b | [
"MIT"
] | permissive | MosheWagner/lighthouse | bcb16e24612645cdcf441011b430d6b8408b0687 | ca1454b2680b31d882339ff56efd34b546ba908d | refs/heads/master | 2020-04-26T23:50:43.001011 | 2019-03-05T09:32:30 | 2019-03-05T09:32:30 | 173,915,816 | 2 | 0 | MIT | 2019-03-05T09:26:59 | 2019-03-05T09:26:55 | Python | UTF-8 | Python | false | false | 35,794 | py | import time
import Queue
import bisect
import logging
import weakref
import threading
import collections
from lighthouse.util.misc import *
from lighthouse.util.disassembler import disassembler
logger = logging.getLogger("Lighthouse.Metadata")
#------------------------------------------------------------------------------
# Metadata
#------------------------------------------------------------------------------
#
# To aid in performance, Lighthouse lifts and indexes an in-memory limited
# representation of the disassembler's open database. This is commonly
# referred to as 'metadata' throughout this codebase.
#
# Once built, the lifted metadata cache stands completely independent of
# the disassembler. This effectively eliminates the need for Lighthouse to
# communicate with the underlying disassembler / API (which is slow) when
# mapping coverage, or doing coverage composition logic.
#
# With this model, we have been able to move the heavy director based
# coverage composition logic to python-only threads without disrupting the
# user, or IDA. (added in v0.4.0)
#
# However, there are two main caveats of this model -
#
# 1. The cached 'metadata' representation may not always be true to state
# of the database. For example, if the user defines/undefines functions,
# the metadata cache will not be aware of such changes.
#
# Lighthouse will try to update the director's metadata cache when
# applicable, but there are instances when it will be in the best
# interest of the user to manually trigger a refresh of the metadata.
#
# 2. Building the metadata comes with an upfront cost, but this cost has
# been reduced as much as possible. For example, generating metadata for
# a database with ~17k functions, ~95k nodes (basic blocks), and ~563k
# instructions takes only ~6 seconds.
#
# This will be negligible for small-medium sized databases, but may still
# be jarring for larger databases.
#
# Ultimately, this model provides us a more responsive user experience at
# the expense of the occasional inaccuracies that can be corrected by
# reasonably low cost refresh.
#
#------------------------------------------------------------------------------
# Database Metadata
#------------------------------------------------------------------------------
class DatabaseMetadata(object):
"""
Database level metadata cache.
"""
def __init__(self):
# name & imagebase of the executable this metadata is based on
self.filename = ""
self.imagebase = -1
# database metadata cache status
self.cached = False
# the cache of key database structures
self.nodes = {}
self.functions = {}
self.instructions = []
# internal members to help index & navigate the cached metadata
self._stale_lookup = False
self._name2func = {}
self._last_node = [] # HACK: blank iterable for now
self._node_addresses = []
self._function_addresses = []
# placeholder attribute for disassembler event hooks
self._rename_hooks = None
# metadata callbacks (see director for more info)
self._function_renamed_callbacks = []
# asynchronous metadata collection thread
self._refresh_worker = None
self._stop_threads = False
def terminate(self):
"""
Cleanup & terminate the metadata object.
"""
self.abort_refresh(join=True)
if self._rename_hooks:
self._rename_hooks.unhook()
#--------------------------------------------------------------------------
# Providers
#--------------------------------------------------------------------------
def get_instructions_slice(self, start_address, end_address):
"""
Get the instructions addresses that fall within a given range.
"""
index_start = bisect.bisect_left(self.instructions, start_address)
index_end = bisect.bisect_left(self.instructions, end_address)
return self.instructions[index_start:index_end]
def get_node(self, address):
"""
Get the node (basic block) metadata for a given address.
"""
assert not self._stale_lookup, "Stale metadata is unsafe to use..."
# fast path, effectively a LRU cache of 1 ;P
if address in self._last_node:
return self._last_node
#
# use the lookup lists to do a 'fuzzy' lookup of the given address,
# locating the index of the closest known node address (rounding down)
#
index = bisect.bisect_right(self._node_addresses, address) - 1
node_metadata = self.nodes.get(self._node_addresses[index], None)
#
# if the given address does not fall within the selected node (or the
# node simply does not exist), then we have no match/metadata to return
#
if not (node_metadata and address in node_metadata):
return None
#
# if the selected node metadata contains the given target address, it
# is a positive hit and we should cache this node (in last_node) for
# faster consecutive lookups
#
self._last_node = node_metadata
# return the located node_metadata
return node_metadata
def get_function(self, address):
"""
Get the function metadata for a given address.
"""
node_metadata = self.get_node(address)
if not node_metadata:
return None
return node_metadata.function
def get_function_by_name(self, function_name):
"""
Get the function metadata for a given function name.
"""
try:
return self.functions[self._name2func[function_name]]
except (IndexError, KeyError):
return None
def get_function_by_index(self, index):
"""
Get the function metadata for a given function index.
"""
try:
return self.functions[self._function_addresses[index]]
except (IndexError, KeyError):
return None
def get_function_index(self, address):
"""
Get the function index for a given address.
"""
return self._function_addresses.index(address)
def get_closest_function(self, address):
"""
Get the function metadata for the function closest to the give address.
"""
# sanity check
if not self._function_addresses:
return None
# get the closest insertion point of the given address
index = bisect.bisect_left(self._function_addresses, address)
# the given address is a min, return the first known function
if index == 0:
return self.functions[self._function_addresses[0]]
# given address is a max, return the last known function
if index == len(self._function_addresses):
return self.functions[self._function_addresses[-1]]
# select the two candidate addresses
before = self._function_addresses[index - 1]
after = self._function_addresses[index]
# return the function closest to the given address
if after - address < address - before:
return self.functions[after]
else:
return self.functions[before]
def flatten_blocks(self, basic_blocks):
"""
Flatten a list of basic blocks (address, size) to instruction addresses.
This function provides a way to convert a list of (address, size) basic
block entries into a list of individual instruction (or byte) addresses
based on the current metadata.
"""
output = []
for address, size in basic_blocks:
instructions = self.get_instructions_slice(address, address+size)
output.extend(instructions)
return output
def is_big(self):
"""
Return a bool indicating whether we think the database is 'big'.
"""
return len(self.functions) > 50000
#--------------------------------------------------------------------------
# Refresh
#--------------------------------------------------------------------------
def refresh(self, function_addresses=None, progress_callback=None):
"""
Request an asynchronous refresh of the database metadata.
TODO/FUTURE: we should make a synchronous refresh available
"""
assert self._refresh_worker == None, 'Refresh already running'
result_queue = Queue.Queue()
#
# reset the async abort/stop flag that can be used used to cancel the
# ongoing refresh task
#
self._stop_threads = False
#
# kick off an asynchronous metadata collection task
#
self._refresh_worker = threading.Thread(
target=self._async_refresh,
args=(result_queue, function_addresses, progress_callback,)
)
self._refresh_worker.start()
#
# immediately return a queue to the caller which it can use to listen
# on and wait for a refresh completion message
#
return result_queue
def abort_refresh(self, join=False):
"""
Abort an asynchronous refresh.
To guarantee an asynchronous refresh has been canceled, the caller can
optionally wait for the result_queue from refresh() to return 'None'.
Alternatively, the `join` parameter can be set to `True`, making this
function block until the refresh is canceled.
"""
#
# the refresh worker (if it exists) can be ripped away at any time.
# take a local reference to avoid a double fetch problems
#
worker = self._refresh_worker
#
# if there is no worker present or running (cleaning up?) there is
# nothing for us to abort. Simply reset the abort flag (just in case)
# and return immediately
#
if not (worker and worker.is_alive()):
self._stop_threads = False
self._refresh_worker = None
return
# signal the worker thread to stop
self._stop_threads = True
# if requested, don't return until the worker thread has stopped...
if join:
worker.join()
def _refresh_instructions(self):
"""
Refresh the list of database instructions (from function metadata).
"""
instructions = []
for function_metadata in self.functions.itervalues():
instructions.extend(function_metadata.instructions)
instructions = list(set(instructions))
instructions.sort()
# commit the updated instruction list
self.instructions = instructions
def _refresh_lookup(self):
"""
Refresh the internal fast lookup address lists.
Fast lookup lists are simply sorted address lists of function metadata,
node metadata, or possibly other forms of metadata (in the future). We
create sorted lists of metadata object addresses so that we can use them
for fast, fuzzy address lookup (eg, bisect).
c.f:
- get_node(ea)
- get_function(ea)
"""
self._last_node = []
self._name2func = { f.name: f.address for f in self.functions.itervalues() }
self._node_addresses = sorted(self.nodes.keys())
self._function_addresses = sorted(self.functions.keys())
self._stale_lookup = False
#--------------------------------------------------------------------------
# Metadata Collection
#--------------------------------------------------------------------------
@not_mainthread
def _async_refresh(self, result_queue, function_addresses, progress_callback):
"""
The main routine for the asynchronous metadata refresh worker.
TODO/FUTURE: this should be cleaned up / refactored
"""
# pause our rename listening hooks (more performant collection)
if self._rename_hooks:
self._rename_hooks.unhook()
#
# if the caller provided no function addresses to target for refresh,
# we will perform a complete metadata refresh of all database defined
# functions. let's retrieve that list from the disassembler now...
#
if not function_addresses:
function_addresses = disassembler.execute_read(
disassembler.get_function_addresses
)()
# refresh database properties that we wish to cache
self._async_refresh_properties()
# refresh the core database metadata asynchronously
completed = self._async_collect_metadata(
function_addresses,
progress_callback
)
# regenerate the instruction list from collected metadata
self._refresh_instructions()
# refresh the internal function/node fast lookup lists
self._refresh_lookup()
#
# NOTE:
#
# creating the hooks inline like this is less than ideal, but they
# they have been moved here (from the metadata constructor) to
# accomodate shortcomings of the Binary Ninja API.
#
# TODO/FUTURE/V35:
#
# it would be nice to move these back to the constructor once the
# Binary Ninja API allows us to detect BV / sessions as they are
# created, and able to load plugins on such events.
#
#----------------------------------------------------------------------
# create the disassembler hooks to listen for rename events
if not self._rename_hooks:
self._rename_hooks = disassembler.create_rename_hooks()
self._rename_hooks.renamed = self._name_changed
self._rename_hooks.metadata = weakref.proxy(self)
#----------------------------------------------------------------------
# reinstall the rename listener hooks now that the refresh is done
self._rename_hooks.hook()
# send the refresh result (good/bad) incase anyone is still listening
if completed:
self.cached = True
result_queue.put(True)
else:
result_queue.put(False)
# clean up our thread's reference as it is basically done/dead
self._refresh_worker = None
# thread exit...
return
@disassembler.execute_read
def _async_refresh_properties(self):
"""
Refresh a selection of interesting database properties.
"""
self.filename = disassembler.get_root_filename()
self.imagebase = disassembler.get_imagebase()
@not_mainthread
def _async_collect_metadata(self, function_addresses, progress_callback):
"""
Collect metadata from the underlying database (interruptable).
"""
CHUNK_SIZE = 150
completed = 0
start = time.time()
#----------------------------------------------------------------------
for addresses_chunk in chunks(function_addresses, CHUNK_SIZE):
#
# collect function metadata from the open database in groups of
# CHUNK_SIZE. collect_function_metadata() takes a list of function
# addresses and collects their metadata in a thread-safe manner
#
fresh_metadata = collect_function_metadata(addresses_chunk)
# update our database metadata cache with the new function metadata
self._update_functions(fresh_metadata)
# report incremental progress to an optional progress_callback
if progress_callback:
completed += len(addresses_chunk)
progress_callback(completed, len(function_addresses))
# if the refresh was canceled, stop collecting metadata and bail
if self._stop_threads:
return False
# sleep some so we don't choke the mainthread
time.sleep(.0015)
#----------------------------------------------------------------------
end = time.time()
logger.debug("Metadata collection took %s seconds" % (end - start))
# refresh completed normally / was not interrupted
return True
def _update_functions(self, fresh_metadata):
"""
Update stored function metadata with the given fresh metadata.
Returns a map of {address: function metadata} that has been updated.
"""
blank_function = FunctionMetadata(-1)
#
# the first step is to loop through the 'fresh' function metadata that
# has been given to us, and identify what is truly new or different
# from any existing metadata we hold.
#
for function_address, new_metadata in fresh_metadata.iteritems():
# extract the 'old' metadata from the database metadata cache
old_metadata = self.functions.get(function_address, blank_function)
#
# if the fresh metadata for this function is identical to the
# existing metadata we have collected for it, there's nothing
# else for us to do -- just ignore it.
#
if old_metadata == new_metadata:
continue
# delete nodes that explicitly no longer exist
old = old_metadata.nodes.viewkeys() - new_metadata.nodes.viewkeys()
for node_address in old:
del self.nodes[node_address]
#
# the newly collected metadata for a given function is empty, this
# indicates that the function has been deleted. we go ahead and
# remove its old function metadata from the db metadata entirely
#
if new_metadata.empty:
del self.functions[function_address]
continue
# add or overwrite the new/updated basic blocks
self.nodes.update(new_metadata.nodes)
# save the new/updated function
self.functions[function_address] = new_metadata
#
# since the node / function metadata cache has probably changed, we
# will need to refresh the internal fast lookup lists. this flag is
# only really used for debugging, and will probably be removed
# in the TODO/FUTURE collection refactor (v0.9?)
#
self._stale_lookup = True
#--------------------------------------------------------------------------
# Signal Handlers
#--------------------------------------------------------------------------
@mainthread
def _name_changed(self, address, new_name, local_name=None):
"""
Handler for rename event in IDA.
TODO/FUTURE: refactor this to not be so IDA-specific
"""
# we should never care about local renames (eg, loc_40804b), ignore
if local_name or new_name.startswith("loc_"):
return 0
# get the function that this address falls within
function = self.get_function(address)
# if the address does not fall within a function (might happen?), ignore
if not function:
return 0
#
# ensure the renamed address matches the function start before
# renaming the function in our metadata cache.
#
# I am not sure when this would not be the case (globals? maybe)
# but I'd rather not find out.
#
if address != function.address:
return
# if the name isn't actually changing (misfire?) nothing to do
if new_name == function.name:
return
logger.debug("Name changing @ 0x%X" % address)
logger.debug(" Old name: %s" % function.name)
logger.debug(" New name: %s" % new_name)
# rename the function, and notify metadata listeners
#function.name = new_name
function.refresh_name()
self._notify_function_renamed()
# necessary for IDP/IDB_Hooks
return 0
#--------------------------------------------------------------------------
# Callbacks
#--------------------------------------------------------------------------
def function_renamed(self, callback):
"""
Subscribe a callback for function rename events.
"""
register_callback(self._function_renamed_callbacks, callback)
def _notify_function_renamed(self):
"""
Notify listeners of a function rename event.
"""
notify_callback(self._function_renamed_callbacks)
#------------------------------------------------------------------------------
# Function Metadata
#------------------------------------------------------------------------------
class FunctionMetadata(object):
"""
Function level metadata cache.
"""
def __init__(self, address):
# function metadata
self.address = address
self.name = None
# node metadata
self.nodes = {}
self.edges = collections.defaultdict(list)
# fixed/baked/computed metrics
self.size = 0
self.node_count = 0
self.edge_count = 0
self.instruction_count = 0
self.cyclomatic_complexity = 0
# collect metdata from the underlying database
if address != -1:
self._build_metadata()
#--------------------------------------------------------------------------
# Properties
#--------------------------------------------------------------------------
@property
def instructions(self):
"""
Return the instruction addresses in this function.
"""
return set([ea for node in self.nodes.itervalues() for ea in node.instructions])
@property
def empty(self):
"""
Return a bool indicating whether the object is populated.
"""
return len(self.nodes) == 0
#--------------------------------------------------------------------------
# Public
#--------------------------------------------------------------------------
@disassembler.execute_read
def refresh_name(self):
"""
Refresh the function name against the open database.
"""
self.name = disassembler.get_function_name_at(self.address)
#--------------------------------------------------------------------------
# Metadata Population
#--------------------------------------------------------------------------
def _build_metadata(self):
"""
Collect function metadata from the underlying database.
"""
self.name = disassembler.get_function_name_at(self.address)
self._refresh_nodes()
self._finalize()
def _refresh_nodes(self):
"""
This will be replaced with a disassembler-specific function at runtime.
NOTE: Read the 'MONKEY PATCHING' section at the end of this file.
"""
raise RuntimeError("This function should have been monkey patched...")
def _ida_refresh_nodes(self):
"""
Refresh function node metadata against an open IDA database.
"""
function_metadata = self
function_metadata.nodes = {}
# get function & flowchart object from IDA database
function = idaapi.get_func(self.address)
flowchart = idaapi.qflow_chart_t("", function, idaapi.BADADDR, idaapi.BADADDR, 0)
#
# now we will walk the flowchart for this function, collecting
# information on each of its nodes (basic blocks) and populating
# the function & node metadata objects.
#
for node_id in xrange(flowchart.size()):
node = flowchart[node_id]
# NOTE/COMPAT
if disassembler.USING_IDA7API:
node_start = node.start_ea
node_end = node.end_ea
else:
node_start = node.startEA
node_end = node.endEA
#
# the node current node appears to have a size of zero. This means
# that another flowchart / function owns this node so we can just
# ignore it...
#
if node_start == node_end:
continue
# create a new metadata object for this node
node_metadata = NodeMetadata(node_start, node_end, node_id)
#
# establish a relationship between this node (basic block) and
# this function metadata (its parent)
#
node_metadata.function = function_metadata
function_metadata.nodes[node_start] = node_metadata
# compute all of the edges between nodes in the current function
for node_metadata in function_metadata.nodes.itervalues():
edge_src = node_metadata.instructions[-1]
for edge_dst in idautils.CodeRefsFrom(edge_src, True):
if edge_dst in function_metadata.nodes:
function_metadata.edges[edge_src].append(edge_dst)
def _binja_refresh_nodes(self):
"""
Refresh function node metadata against an open Binary Ninja database.
"""
function_metadata = self
function_metadata.nodes = {}
# get the function from the Binja database
function = disassembler.bv.get_function_at(self.address)
#
# now we will walk the flowchart for this function, collecting
# information on each of its nodes (basic blocks) and populating
# the function & node metadata objects.
#
for node in function.basic_blocks:
# create a new metadata object for this node
node_metadata = NodeMetadata(node.start, node.end, node.index)
#
# establish a relationship between this node (basic block) and
# this function metadata (its parent)
#
node_metadata.function = function_metadata
function_metadata.nodes[node.start] = node_metadata
#
# enumerate the edges produced by this node (basic block) with a
# destination that falls within this function.
#
edge_src = node_metadata.instructions[-1]
for edge in node.outgoing_edges:
function_metadata.edges[edge_src].append(edge.target.start)
def _compute_complexity(self):
"""
Walk the function CFG to determine approximate cyclomatic complexity.
The purpose of this function is mostly to account for IDA's inclusion
of additional floating nodes in function flowcharts. These blocks tend
to be for exception handlers, but can manifest in various other cases.
By walking the function CFG, we can identify these 'disembodied'
blocks that have no incoming edge and ignore them in our cyclomatic
complexity calculation. Not doing so will radically throw off the
cyclomatic complexity score.
"""
confirmed_nodes = set()
confirmed_edges = {}
#
# to_walk contains a list of node addresses. we draw from this list
# one at a time, walking across all of the outgoing edges from the
# current node (node_address) to walk the function graph
#
to_walk = set([self.address])
while to_walk:
# this is the address of the node we will 'walk' from
node_address = to_walk.pop()
confirmed_nodes.add(node_address)
# now we loop through all edges that originate from this block
current_src = self.nodes[node_address].instructions[-1]
for current_dest in self.edges[current_src]:
# ignore nodes we have already visited
if current_dest in confirmed_nodes:
continue
#
# it appears that this node has not been visited yet, so we
# will want to walk its edges sometime soon to continue the
# graph exploration
#
to_walk.add(current_dest)
# update the map of confirmed (walked) edges
confirmed_edges[current_src] = self.edges.pop(current_src)
# compute the final cyclomatic complexity for the function
num_edges = sum(len(x) for x in confirmed_edges.itervalues())
num_nodes = len(confirmed_nodes)
return num_edges - num_nodes + 2
def _finalize(self):
"""
Finalize function metadata for use.
"""
self.size = sum(node.size for node in self.nodes.itervalues())
self.node_count = len(self.nodes)
self.edge_count = len(self.edges)
self.instruction_count = sum(node.instruction_count for node in self.nodes.itervalues())
self.cyclomatic_complexity = self._compute_complexity()
#--------------------------------------------------------------------------
# Operator Overloads
#--------------------------------------------------------------------------
def __eq__(self, other):
"""
Compute function metadata equality (==)
"""
result = True
result &= self.name == other.name
result &= self.size == other.size
result &= self.address == other.address
result &= self.node_count == other.node_count
result &= self.instruction_count == other.instruction_count
result &= self.nodes.viewkeys() == other.nodes.viewkeys()
return result
#------------------------------------------------------------------------------
# Node Metadata
#------------------------------------------------------------------------------
class NodeMetadata(object):
"""
Node (basic block) level metadata cache.
"""
def __init__(self, start_ea, end_ea, node_id=None):
# node metadata
self.size = end_ea - start_ea
self.address = start_ea
self.instruction_count = 0
# flowchart node_id
self.id = node_id
# parent function_metadata
self.function = None
# instruction addresses
self.instructions = []
#----------------------------------------------------------------------
# collect metadata from the underlying database
self._build_metadata()
#--------------------------------------------------------------------------
# Metadata Population
#--------------------------------------------------------------------------
def _build_metadata(self):
"""
This will be replaced with a disassembler-specific function at runtime.
NOTE: Read the 'MONKEY PATCHING' section at the end of this file.
"""
raise RuntimeError("This function should have been monkey patched...")
def _ida_build_metadata(self):
"""
Collect node metadata from the underlying database.
"""
current_address = self.address
node_end = self.address + self.size
#
# loop through the node's entire address range and count its
# instructions. Note that we are assuming that every defined
# 'head' (in IDA) is an instruction
#
while current_address < node_end:
instruction_size = idaapi.get_item_end(current_address) - current_address
self.instructions.append(current_address)
current_address += instruction_size
# save the number of instructions in this block
self.instruction_count = len(self.instructions)
def _binja_build_metadata(self):
"""
Collect node metadata from the underlying database.
"""
bv = disassembler.bv
current_address = self.address
node_end = self.address + self.size
#
# Note that we 'iterate over' the instructions using their byte length
# because it is far more performant than Binary Ninja's instruction
# generators which also produce instruction text, tokens etc...
#
while current_address < node_end:
self.instructions.append(current_address)
current_address += bv.get_instruction_length(current_address)
# save the number of instructions in this block
self.instruction_count = len(self.instructions)
#--------------------------------------------------------------------------
# Operator Overloads
#--------------------------------------------------------------------------
def __str__(self):
"""
Printable NodeMetadata.
"""
output = ""
output += "Node 0x%08X Info:\n" % self.address
output += " Address: 0x%08X\n" % self.address
output += " Size: %u\n" % self.size
output += " Instruction Count: %u\n" % self.instruction_count
output += " Id: %u\n" % self.id
output += " Function: %s\n" % self.function
output += " Instructions: %s" % self.instructions
return output
def __contains__(self, address):
"""
Overload python's 'in' keyword for this object.
This allows us to use `in` to check if an address falls within a node.
"""
if self.address <= address < self.address + self.size:
return True
return False
def __eq__(self, other):
"""
Compute node equality (==)
"""
result = True
result &= self.size == other.size
result &= self.address == other.address
result &= self.instruction_count == other.instruction_count
result &= self.function == other.function
result &= self.id == other.id
return result
#------------------------------------------------------------------------------
# Async Metadata Helpers
#------------------------------------------------------------------------------
@disassembler.execute_read
def collect_function_metadata(function_addresses):
"""
Collect function metadata for a list of addresses.
"""
return { ea: FunctionMetadata(ea) for ea in function_addresses }
@disassembler.execute_ui
def metadata_progress(completed, total):
"""
Handler for metadata collection callback, updates progress dialog.
"""
disassembler.replace_wait_box(
"Collected metadata for %u/%u Functions" % (completed, total)
)
#------------------------------------------------------------------------------
# MONKEY PATCHING
#------------------------------------------------------------------------------
#
# We use 'monkey patching' to modify the Metadata class definitions at
# runtime. Specifically, we use it to swap in metadata collection routines
# that have been carefully tailored for a given disassembler.
#
# The reason for this is that the metadata collection code is very
# disassembler-specific, and that it needs to be as performant as possible.
# Shimming metadata collection code to be disassembler agnostic is going
# to be messy and slow.
#
if disassembler.NAME == "IDA":
import idaapi
import idautils
FunctionMetadata._refresh_nodes = FunctionMetadata._ida_refresh_nodes
NodeMetadata._build_metadata = NodeMetadata._ida_build_metadata
elif disassembler.NAME == "BINJA":
import binaryninja
FunctionMetadata._refresh_nodes = FunctionMetadata._binja_refresh_nodes
NodeMetadata._build_metadata = NodeMetadata._binja_build_metadata
else:
raise NotImplementedError("DISASSEMBLER-SPECIFIC SHIM MISSING")
| [
"[email protected]"
] | |
6c78fccd11b2ca769683b6527aa888e158fea647 | d9e26e516ab3863b6e7d00c4e3cdecf1af7028eb | /src/oaklib/io/streaming_nl_writer.py | ecde169932c3e55baa59bfdfd1aef1e274f6109a | [
"Apache-2.0"
] | permissive | INCATools/ontology-access-kit | 2f08a64b7308e8307d1aaac2a81764e7d98b5928 | 8d2a124f7af66fe2e796f9e0ece55585438796a5 | refs/heads/main | 2023-08-30T14:28:57.201198 | 2023-08-29T17:40:19 | 2023-08-29T17:40:19 | 475,072,415 | 67 | 15 | Apache-2.0 | 2023-09-07T01:06:04 | 2022-03-28T15:50:45 | Jupyter Notebook | UTF-8 | Python | false | false | 1,152 | py | from dataclasses import dataclass
from linkml_runtime.utils.yamlutils import YAMLRoot
from oaklib.datamodels import obograph
from oaklib.io.streaming_writer import StreamingWriter
from oaklib.utilities.nlp.natual_language_generation import NaturalLanguageGenerator
@dataclass
class StreamingNaturalLanguageWriter(StreamingWriter):
"""
A writer that streams basic line by line reporting info
"""
natural_language_generator: NaturalLanguageGenerator = None
def emit_curie(self, curie, label=None, **kwargs):
self._ensure_init()
self.file.write(self.natural_language_generator.render_entity(curie))
self.file.write("\n")
def emit_obj(self, obj: YAMLRoot):
self._ensure_init()
if isinstance(obj, obograph.LogicalDefinitionAxiom):
self.file.write(self.natural_language_generator.render_logical_definition(obj))
self.file.write("\n")
else:
raise NotImplementedError
def _ensure_init(self):
if self.natural_language_generator is None:
self.natural_language_generator = NaturalLanguageGenerator(self.ontology_interface)
| [
"[email protected]"
] | |
3cc7c17ee582aaba4ab4d5771286ac2e1ae8b9e8 | 1b45d1162bd60a356844fc4dced068da2e6cc438 | /Arrays/Merge.py | 8ee66ae39f1687b433e476fa1b9e3be1d2e31015 | [
"MIT"
] | permissive | AnkitAvi11/Data-Structures-And-Algorithms | de9584e439861254cdce265af789c8b484c01c69 | 703f78819a41d4dd88caf71156a4a515651edc1b | refs/heads/master | 2023-02-19T21:53:39.405934 | 2021-01-24T17:27:21 | 2021-01-24T17:27:21 | 297,752,655 | 6 | 3 | MIT | 2021-01-24T17:27:22 | 2020-09-22T19:33:55 | Python | UTF-8 | Python | false | false | 669 | py | """
QUESTION STATEMENT : MERGE TWO SORTED ARRAYS WITHOUT USING ANY EXTRA SPACE
example :
arr1 = {1,3,5,7,9} size = n
arr2 = {2,4,6,8,10} size = m
arr1 after merging = {1,2,3,4,5,6,7,8,9,10}
"""
def mergeArrays(arr : list, arr2 : list) :
i = 0;j = 0;
while i < len(arr) : # O(n)
if arr[i] > arr2[j] :
arr[i], arr2[j] = arr2[j], arr[i] # swapping the elements
arr2.sort() # O(mlog2m)
i+=1
# total complexity = (n*m)log2m
for el in arr2 :
arr.append(el)
if __name__ == '__main__' :
arr = [1,3,5,7,9]
arr2 = [2,4,6,8,10]
mergeArrays(arr, arr2)
print(arr)
| [
"[email protected]"
] | |
aa27042ddeb0ddff82f1c8f4312778d7feb8da3e | cee65c4806593554662330368c799c14ec943454 | /src/sqlvm-preview/azext_sqlvm_preview/vendored_sdks/sqlvirtualmachine/models/wsfc_domain_profile_py3.py | 0d7864768ad80fab17f0ea7f8ca57ea27cec3b41 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | azclibot/azure-cli-extensions | d5d1a4ecdfc87fd79f5ad042fb85cdbf881897d2 | c230646258d4b56efb7d44eb7a0230f2943da6f6 | refs/heads/master | 2023-08-28T03:55:02.311902 | 2019-04-04T16:05:45 | 2019-04-04T16:05:45 | 179,548,695 | 1 | 1 | MIT | 2021-07-28T15:26:17 | 2019-04-04T17:54:39 | Python | UTF-8 | Python | false | false | 3,274 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class WsfcDomainProfile(Model):
"""Active Directory account details to operate Windows Server Failover
Cluster.
:param domain_fqdn: Fully qualified name of the domain.
:type domain_fqdn: str
:param ou_path: Organizational Unit path in which the nodes and cluster
will be present.
:type ou_path: str
:param cluster_bootstrap_account: Account name used for creating cluster
(at minimum needs permissions to 'Create Computer Objects' in domain).
:type cluster_bootstrap_account: str
:param cluster_operator_account: Account name used for operating cluster
i.e. will be part of administrators group on all the participating virtual
machines in the cluster.
:type cluster_operator_account: str
:param sql_service_account: Account name under which SQL service will run
on all participating SQL virtual machines in the cluster.
:type sql_service_account: str
:param file_share_witness_path: Optional path for fileshare witness.
:type file_share_witness_path: str
:param storage_account_url: Fully qualified ARM resource id of the witness
storage account.
:type storage_account_url: str
:param storage_account_primary_key: Primary key of the witness storage
account.
:type storage_account_primary_key: str
"""
_attribute_map = {
'domain_fqdn': {'key': 'domainFqdn', 'type': 'str'},
'ou_path': {'key': 'ouPath', 'type': 'str'},
'cluster_bootstrap_account': {'key': 'clusterBootstrapAccount', 'type': 'str'},
'cluster_operator_account': {'key': 'clusterOperatorAccount', 'type': 'str'},
'sql_service_account': {'key': 'sqlServiceAccount', 'type': 'str'},
'file_share_witness_path': {'key': 'fileShareWitnessPath', 'type': 'str'},
'storage_account_url': {'key': 'storageAccountUrl', 'type': 'str'},
'storage_account_primary_key': {'key': 'storageAccountPrimaryKey', 'type': 'str'},
}
def __init__(self, *, domain_fqdn: str=None, ou_path: str=None, cluster_bootstrap_account: str=None, cluster_operator_account: str=None, sql_service_account: str=None, file_share_witness_path: str=None, storage_account_url: str=None, storage_account_primary_key: str=None, **kwargs) -> None:
super(WsfcDomainProfile, self).__init__(**kwargs)
self.domain_fqdn = domain_fqdn
self.ou_path = ou_path
self.cluster_bootstrap_account = cluster_bootstrap_account
self.cluster_operator_account = cluster_operator_account
self.sql_service_account = sql_service_account
self.file_share_witness_path = file_share_witness_path
self.storage_account_url = storage_account_url
self.storage_account_primary_key = storage_account_primary_key
| [
"[email protected]"
] | |
d905ee37aa6ecea6a752fbc54249897a44a54d0e | 66e6360325b781ed0791868765f1fd8a6303726f | /TB2009/WorkDirectory/5223 All Charges/ExportCharge.py | 0256e8dcc77eb233c47742a482097e9b389b68a6 | [] | no_license | alintulu/FHead2011PhysicsProject | c969639b212d569198d8fce2f424ce866dcfa881 | 2568633d349810574354ad61b0abab24a40e510e | refs/heads/master | 2022-04-28T14:19:30.534282 | 2020-04-23T17:17:32 | 2020-04-23T17:17:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,613 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("PrintCharges")
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(50000))
process.source = cms.Source("HcalTBSource",
fileNames = cms.untracked.vstring('file:/tmp/chenyi/HTB_.root'),
streams = cms.untracked.vstring('HCAL_Trigger','HCAL_SlowData','HCAL_QADCTDC','HCAL_DCC021','Chunk699')
)
process.hcal_db_producer = cms.ESProducer("HcalDbProducer",
dump = cms.untracked.vstring(''),
file = cms.untracked.string('')
)
process.es_hardcode = cms.ESSource("HcalHardcodeCalibrations",
toGet = cms.untracked.vstring('GainWidths','PedestalWidths','QIEData','ChannelQuality','ZSThresholds','RespCorrs')
)
process.es_ascii = cms.ESSource("HcalTextCalibrations",
input = cms.VPSet(
cms.PSet(
object = cms.string('ElectronicsMap'),
file = cms.FileInPath('emap_TB2009_A.txt')
),
cms.PSet(
object = cms.string('Pedestals'),
file = cms.FileInPath('pedestals_TB2009_.txt')
),
cms.PSet(
object = cms.string('Gains'),
file = cms.FileInPath('gains_TB2009_LMIP_newpedestal.txt')
)
)
)
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = 1000
process.tbUnpacker = cms.EDFilter("HcalTBObjectUnpacker",
IncludeUnmatchedHits = cms.untracked.bool(False),
HcalTDCFED = cms.untracked.int32(8),
HcalQADCFED = cms.untracked.int32(8),
HcalSlowDataFED = cms.untracked.int32(3),
HcalTriggerFED = cms.untracked.int32(1),
HcalVLSBFED = cms.untracked.int32(699),
ConfigurationFile = cms.untracked.string('configQADCTDC_TB2009.txt')
)
process.hcalDigis = cms.EDFilter("HcalRawToDigi",
UnpackZDC = cms.untracked.bool(True),
FilterDataQuality = cms.bool(True),
ExceptionEmptyData = cms.untracked.bool(True),
InputLabel = cms.InputTag("source"),
ComplainEmptyData = cms.untracked.bool(False),
UnpackCalib = cms.untracked.bool(False),
firstSample = cms.int32(0),
lastSample = cms.int32(9),
FEDs = cms.untracked.vint32(21),
HcalFirstFED = cms.untracked.int32(21)
)
process.load("RecoLocalCalo.HcalRecProducers.HcalSimpleReconstructor_hbhe_cfi")
process.hbhereco.firstSample = 5
process.hbhereco.samplesToAdd = 4
process.options = cms.untracked.PSet(
Rethrow = cms.untracked.vstring('ProductNotFound',
'TooManyProducts',
'TooFewProducts')
)
process.triggerfilter = cms.EDFilter("TriggerFilter",
allowBeamTrigger = cms.untracked.bool(True),
allowOutOfSpillPedestalTrigger = cms.untracked.bool(False),
allowOthers = cms.untracked.bool(False)
)
process.oneparticle = cms.EDFilter("SingleTowerParticleFilter",
particleNumber = cms.untracked.int32(1)
)
process.muonveto = cms.EDFilter("MuonVetoFilter")
process.export = cms.EDAnalyzer("ExportChargeAnalyzer",
normalModule = cms.untracked.string('hbhereco')
)
process.vlsbinfo = cms.EDProducer("VLSBInformationProducer",
minSample = cms.untracked.uint32(0),
maxSample = cms.untracked.uint32(31),
baselineSamples = cms.untracked.uint32(2),
useMotherBoard0 = cms.untracked.bool(True),
useMotherBoard1 = cms.untracked.bool(True),
useMotherBoard2 = cms.untracked.bool(False),
useMotherBoard3 = cms.untracked.bool(True),
usePedestalMean = cms.untracked.bool(False),
mip = cms.untracked.string('MIP_EarlyRejection_Median.txt'),
adcMap = cms.untracked.string('FinalAdcMapping_All.txt'),
beamEnergy = cms.untracked.double()
)
process.vlsbreco = cms.EDProducer("HcalTBVLSBReconstructor",
minSample = cms.untracked.uint32(0),
maxSample = cms.untracked.uint32(31),
mipFileName = cms.untracked.string("MIP_EarlyRejection_Median.txt"),
adcMapFileName = cms.untracked.string("FinalAdcMapping_All.txt")
)
process.energydistribution = cms.EDAnalyzer("FillRHEnergyDistributionAnalyzer",
vlsbModule = cms.untracked.string("vlsbreco"),
normalModule = cms.untracked.string("hbhereco"),
output = cms.untracked.string("EnergyDistribution_ABC_.root")
)
process.timecut = cms.EDFilter("HighestSampleTimeFilter",
minimum = cms.untracked.double(7.5),
threshold = cms.untracked.double(100)
)
process.hitcut = cms.EDFilter("HitXFilter",
maximum = cms.untracked.double(-5)
)
process.mincut = cms.EDFilter("RHTotalEnergyCut",
minimum = cms.untracked.double(),
vlsbModule = cms.untracked.string("vlsbreco"),
normalModule = cms.untracked.string("hbhereco")
)
process.maxcut = cms.EDFilter("RHTotalEnergyCut",
minimum = cms.untracked.double(),
vlsbModule = cms.untracked.string("vlsbreco"),
normalModule = cms.untracked.string("hbhereco")
)
process.merge = cms.EDProducer("CombineCollectionProducer",
vlsbModule = cms.untracked.string("vlsbreco"),
normalModule = cms.untracked.string("hbhereco")
# interCalibration = cms.untracked.string("InterCalibration_Secondary.txt")
)
process.export = cms.EDAnalyzer("CExportChargeAnalyzer",
moduleName = cms.untracked.string('merge'),
simplified = cms.untracked.bool(True),
exportVlsb = cms.untracked.bool(True)
)
process.runinfo = cms.EDProducer("RunInformationProducer",
beamEnergy = cms.untracked.double()
)
process.p = cms.Path(
process.tbUnpacker *
process.vlsbinfo *
process.runinfo *
process.vlsbreco *
process.hcalDigis *
process.hbhereco *
process.triggerfilter *
process.oneparticle *
process.muonveto *
process.timecut *
process.hitcut *
process.mincut *
~process.maxcut *
process.merge *
process.export
)
| [
"[email protected]"
] | |
c477af6c57995ecddcbfdc254fe373d15f3999c8 | 321b4ed83b6874eeb512027eaa0b17b0daf3c289 | /252/252.meeting-rooms.234346443.Runtime-Error.leetcode.py | 92868ca8e540837d3283eb90122ea37aa2b82d4d | [] | no_license | huangyingw/submissions | 7a610613bdb03f1223cdec5f6ccc4391149ca618 | bfac1238ecef8b03e54842b852f6fec111abedfa | refs/heads/master | 2023-07-25T09:56:46.814504 | 2023-07-16T07:38:36 | 2023-07-16T07:38:36 | 143,352,065 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | class Solution:
def canAttendMeetings(self, intervals):
overlap = []
for interval in sorted(intervals, key=lambda x: x.start):
if overlap and overlap[-1].end > interval.start:
return False
else:
overlap.append(interval)
return True
| [
"[email protected]"
] | |
fc02fda54534594dd3a8358ecf562fc2cbd36a7e | 0a1716384ac3425b0f457e210e43c0a499bd66d2 | /process_files/_old/fix_processed_names.py | 27e83d345283a04bd753cafb4edbf2a7f9b3850a | [] | no_license | ilbarlow/process-rig-data | d54d0489ad42ef92e422915d01ac43feeb62bed3 | 89fc296628eb7f9260b099ee3cb2f25680905686 | refs/heads/master | 2020-03-18T21:50:05.775230 | 2018-03-28T20:13:41 | 2018-03-28T20:13:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,596 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 27 16:15:39 2016
@author: worm_rig
"""
import os
import shutil
import glob
import numpy as np
import pandas as pd
import warnings
from functools import partial
if __name__ == '__main__':
output_root = '/Volumes/behavgenom_archive$/Avelino/Worm_Rig_Tests/short_movies_new/'
#'/Volumes/behavgenom_archive$/Avelino/PeterAskjaer/'
exp_name = 'Double_pick_090217'#'Mutant_worm_screening_Y32H12A.7(ok3452)_220217'
tsv_file = os.path.join(output_root, 'ExtraFiles', exp_name + '_renamed.tsv')
tab = pd.read_table(tsv_file, names=['old', 'new'])
for _, row in tab.iterrows():
parts = row['old'].split(os.sep)
delP = [int(x[2:]) for x in parts if x.startswith('PC')][0]
old_base_name = os.path.splitext(os.path.basename(row['old']))[0]
old_ch = [int(x[2:]) for x in old_base_name.split('_') if x.startswith('Ch')][0]
base_name = os.path.splitext(os.path.basename(row['new']))[0]
real_ch = 'Ch{}'.format(2*(delP-1)+old_ch)
fparts = base_name.split('_')
ff = [x.strip() if not x.startswith('Ch') else real_ch for x in fparts ]
new_base_name = '_'.join(ff)
search_str = os.path.join(output_root,'**', exp_name, base_name + '*')
fnames = glob.glob(search_str)
for bad_name in fnames:
good_name = bad_name.replace(base_name, new_base_name)
print(bad_name, good_name)
#shutil.move(bad_name, good_name)
| [
"[email protected]"
] | |
148ea8e659b1f395932dd56bb4319bd9d6022474 | 9ec58308459dc95405d1a32fcf8fae7f687a207b | /test/test_k_bank.py | 71dc290f6f4630d2eaa7649866a90201a40f7e18 | [
"MIT"
] | permissive | ivanlyon/exercises | 067aed812486dbd7a3d7de6e47a692c8b9383163 | 0792976ae2acb85187b26a52812f9ebdd119b5e8 | refs/heads/master | 2021-05-24T04:17:29.012329 | 2021-05-11T17:26:50 | 2021-05-11T17:26:50 | 65,584,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,597 | py | import io
import unittest
from unittest.mock import patch
from kattis import k_bank
###############################################################################
class SampleInput(unittest.TestCase):
'''Problem statement sample inputs and outputs'''
def test_sample_input_1(self):
'''Run and assert problem statement sample 1 input and output.'''
inputs = []
inputs.append('4 4')
inputs.append('1000 1')
inputs.append('2000 2')
inputs.append('500 2')
inputs.append('1200 0')
inputs = '\n'.join(inputs) + '\n'
outputs = '4200\n'
with patch('sys.stdin', io.StringIO(inputs)) as stdin,\
patch('sys.stdout', new_callable=io.StringIO) as stdout:
k_bank.main()
self.assertEqual(stdout.getvalue(), outputs)
self.assertEqual(stdin.read(), '')
def test_sample_input_2(self):
'''Run and assert problem statement sample 2 input and output.'''
inputs = []
inputs.append('3 4')
inputs.append('1000 0')
inputs.append('2000 1')
inputs.append('500 1')
inputs = '\n'.join(inputs) + '\n'
outputs = '3000\n'
with patch('sys.stdin', io.StringIO(inputs)) as stdin,\
patch('sys.stdout', new_callable=io.StringIO) as stdout:
k_bank.main()
self.assertEqual(stdout.getvalue(), outputs)
self.assertEqual(stdin.read(), '')
###############################################################################
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
dcdfd17496925a85400ab2e195a3c8e50d5401e6 | d7f486eebaa164bf3274c843e1932c7eef596e5e | /importer/facebook.py | 352a80e06ffca4048160d7b028cf173373aa9667 | [
"MIT"
] | permissive | Galaxyvintage/journal-1 | aafe107645a6dde038b0010496c041ac635e966d | f666a3b38f0eeb2cc1f5576e0668f174bf1cbd8d | refs/heads/master | 2020-03-20T09:15:09.269993 | 2018-07-05T16:31:17 | 2018-07-05T16:31:17 | 137,332,462 | 0 | 0 | null | 2018-06-14T08:54:38 | 2018-06-14T08:54:37 | null | UTF-8 | Python | false | false | 7,091 | py | import events
from database import db
import json
import datetime
import os
def load_to_json(filename):
json_data = open(filename).read()
return json.loads(json_data)
def read_app_posts(directory):
data = load_to_json(directory + "apps/posts_from_apps.json")
for post in data["app_posts"]:
attachment_data = post["attachments"][0]["data"][0]["external_context"]
time = datetime.datetime.fromtimestamp(post["timestamp"])
message = attachment_data["name"]
title = post["title"]
app_name = "unknown app"
if "via" in title:
app_name = title[title.index("via") + 4 : -1]
kvps = {"message": message, "title": title, "app": app_name}
if attachment_data.has_key("url"):
kvps["url"] = attachment_data["url"]
events.add("Facebook post via " + app_name + ": " + message, time, ["facebook", "post", "app"], kvps)
def read_app_installs(directory):
data = load_to_json(directory + "apps/installed_apps.json")
for item in data["installed_apps"]:
events.add("Added Facebook app " + item["name"] + ".", datetime.datetime.fromtimestamp(item["time_added"]), ["facebook", "app"], {"app": item["name"]})
def read_comments(directory):
data = load_to_json(directory + "comments/comments.json")
for comment in data["comments"]:
time = datetime.datetime.fromtimestamp(comment["timestamp"])
message = comment["data"][0]["comment"]["comment"]
events.add("Facebook: " + comment["title"], time, ["facebook", "comment"], {"message": message})
def read_events(directory):
data = load_to_json(directory + "events/event_responses.json")
for event in data["event_responses"]["events_joined"]:
time = datetime.datetime.fromtimestamp(event["start_timestamp"])
name = event["name"]
events.add("Participated in Facebook event: " + name, time, ["facebook", "event"], {"name": name})
data = load_to_json(directory + "events/your_events.json")
for event in data["your_events"]:
time = datetime.datetime.fromtimestamp(event["start_timestamp"])
name = event["name"]
location = event["place"]["name"]
events.add("Hosted Facebook event: " + name, time, ["facebook", "event"], {"name": name, "location": location, "message": event["description"]})
def read_friends(directory):
data = load_to_json(directory + "friends/friends_added.json")
for friend in data["friends"]:
time = datetime.datetime.fromtimestamp(friend["timestamp"])
name = friend["name"]
events.add("Added Facebook friend " + name + ".", time, ["facebook", "friend"], {"name": name})
def create_conversation_event(title, message_count, time, participants, history, first):
kvps = {"participants": participants, "message": history}
if first:
events.add(
"Started a Facebook conversation with " + title + " (" + str(message_count) + " message" + (
"s" if message_count > 1 else "") + ").",
time, ["facebook", "message"], kvps)
else:
events.add(
"Exchanged " + str(message_count) + " Facebook message" + (
"s" if message_count > 1 else "") + " with " + title + ".",
time, ["facebook", "message"], kvps)
def read_messages(directory):
message_directory = directory + "messages/"
for conversation in [os.path.join(message_directory, name) for name in os.listdir(message_directory) if os.path.isdir(os.path.join(message_directory, name)) and name != "stickers_used"]:
data = load_to_json(conversation + "/message.json")
if not data.has_key("title"):
continue
title = data["title"]
participants = [title]
if data.has_key("participants"):
participants = data["participants"]
messages = data["messages"]
session_start_time = None
last_message_time = None
history = ""
message_count = 0
session_count = 0
for message in reversed(messages):
if message.has_key("content"):
message_time = datetime.datetime.fromtimestamp(message["timestamp"])
if session_start_time is None:
session_start_time = message_time
elif (message_time - last_message_time).total_seconds() > 4 * 60 * 60:
create_conversation_event(title, message_count, session_start_time, ", ".join(participants), history, session_count == 0)
session_start_time = message_time
message_count = 0
session_count += 1
history = ""
last_message_time = message_time
message_count += 1
history += message["sender_name"] + ": " + message["content"] + "\n"
if message.has_key("photos") and not message["sender_name"] in participants:
events.add("Sent " + (str(len(message["photos"])) + " images" if len(message["photos"]) > 1 else "an image") + " to " + title + ".",
datetime.datetime.fromtimestamp(message["timestamp"]),
["facebook", "message", "image"], kvps={"participants": ", ".join(participants)}, images=[directory + photo["uri"] for photo in message["photos"]])
if message.has_key("photos") and message["sender_name"] in participants:
events.add("Received " + (str(len(message["photos"])) + " images" if len(
message["photos"]) > 1 else "an image") + " from " + message["sender_name"] + ".",
datetime.datetime.fromtimestamp(message["timestamp"]),
["facebook", "message", "image"], kvps={"participants": ", ".join(participants)},
images=[directory + photo["uri"] for photo in message["photos"]])
create_conversation_event(title, message_count, session_start_time, ", ".join(participants), history, session_count == 0)
def read_photos(directory):
photo_directory = directory + "photos/album/"
for album_file in [os.path.join(photo_directory, name) for name in os.listdir(photo_directory)]:
data = load_to_json(album_file)
album_name = data["name"]
for photo in data["photos"]:
file = directory + photo["uri"]
metadata = photo["media_metadata"]["photo_metadata"]
time = datetime.datetime.fromtimestamp(metadata["taken_timestamp"]) if metadata.has_key("taken_timestamp") else datetime.datetime.fromtimestamp(metadata["modified_timestamp"])
tags = ["facebook", "photo"]
kvps = {}
if metadata.has_key("camera_make") and metadata.has_key("camera_model"):
camera = metadata["camera_make"] + " " + metadata["camera_model"]
tags.append(camera)
kvps["camera"] = camera
events.add("Added photo to Facebook album " + album_name + ".",
time,
tags,
kvps,
hash=file,
latitude=(metadata["latitude"] if metadata.has_key("latitude") else None),
longitude=(metadata["longitude"] if metadata.has_key("longitude") else None),
images=[file])
def import_facebook_data(directory = "data/facebook/"):
with db.atomic():
print "Reading Facebook app posts..."
read_app_posts(directory)
read_app_installs(directory)
print "Reading Facebook comments..."
read_comments(directory)
print "Reading Facebook events..."
read_events(directory)
print "Reading Facebook friends..."
read_friends(directory)
print "Reading Facebook messages..."
read_messages(directory)
print "Reading Facebook photos..."
read_photos(directory)
if __name__ == "__main__":
import_facebook_data() | [
"[email protected]"
] | |
9088845ee4cd9fc4f784727bc6f020bc4213b6a6 | 786de89be635eb21295070a6a3452f3a7fe6712c | /Detector/tags/V00-00-05/SConscript | d6fb3976c08526bf2e9adb925905a3b3a1b85635 | [] | no_license | connectthefuture/psdmrepo | 85267cfe8d54564f99e17035efe931077c8f7a37 | f32870a987a7493e7bf0f0a5c1712a5a030ef199 | refs/heads/master | 2021-01-13T03:26:35.494026 | 2015-09-03T22:22:11 | 2015-09-03T22:22:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,454 | #--------------------------------------------------------------------------
# File and Version Information:
# $Id$
#
# Description:
# SConscript file for package Detector
#------------------------------------------------------------------------
# Do not delete following line, it must be present in
# SConscript file for any SIT project
Import('*')
#
# For the standard SIT packages which build libraries, applications,
# and Python modules it is usually sufficient to call
# standardSConscript() function which defines rules for all
# above targets. Many standard packages do not need any special options,
# but those which need can modify standardSConscript() behavior using
# a number of arguments, here is a complete list:
#
# LIBS - list of additional libraries needed by this package
# LIBPATH - list of directories for additional libraries
# BINS - dictionary of executables and their corresponding source files
# TESTS - dictionary of test applications and their corresponding source files
# SCRIPTS - list of scripts in app/ directory
# UTESTS - names of the unit tests to run, if not given then all tests are unit tests
# PYEXTMOD - name of the Python extension module, package name used by default
# CCFLAGS - additional flags passed to C/C++ compilers
# NEED_QT - set to True to enable Qt support
#
#
#standardSConscript()
standardSConscript(PYEXTMOD="detector_ext")
#, DOCGEN="doxy-all psana-modules-doxy")
| [
"[email protected]@b967ad99-d558-0410-b138-e0f6c56caec7"
] | [email protected]@b967ad99-d558-0410-b138-e0f6c56caec7 |
|
3bb4b250c9e481e8342d3d85a655fadd62014d8a | 82c7adb0bfaa667c50ac7b336bb815863b378fa9 | /finace/items.py | 60984524386545327a13568ee270fe67c087fc4d | [
"Apache-2.0"
] | permissive | pythonyhd/finace | c8a7dca65dfe33cabcb90630d8791d3a5b942bc9 | 614d98ad92e1bbaa6cf7dc1d6dfaba4f24431688 | refs/heads/master | 2022-11-30T17:53:40.947747 | 2020-08-14T03:47:26 | 2020-08-14T03:47:26 | 287,253,978 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py | # Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class FinaceItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
| [
"[email protected]"
] | |
5da193ab8f0e2efa5b0645b1029e0314fd56b029 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_011/ch92_2019_10_02_17_54_14_425785.py | 043154a806fa8650cc4d1a71882bef7df3c5440f | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 292 | py | def simplifica_dict(dicionario):
lista = []
for chave in dicionario:
if chave not in lista:
lista.append(chave)
for valor in dicionario[chave]:
if dicionario[chave] not in lista:
lista.append(dicionario[chave])
return lista | [
"[email protected]"
] | |
ba8c4775490031f4b1abd9541e76e7d99773e96c | 44845df9198ae8c80fabecb6ed3ae6a44e43f38c | /modo/admin.py | 4aa582f42f92bbc0b441d3019c6b6fb02550a96f | [] | no_license | CarlosSanz81/cima | 570da404bddd0a813a025163a9e94676b9d0b4a9 | 3ad9b37af4a2d8a5789915208afffec7b6af3c0e | refs/heads/master | 2021-01-23T08:00:04.964713 | 2017-03-28T14:33:09 | 2017-03-28T14:33:09 | 72,184,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py | from django.contrib import admin
from .models import Modo
@admin.register(Modo)
class AdminModo(admin.ModelAdmin):
list_display = ('nombre',) | [
"[email protected]"
] | |
fcb878a2819bc83a0ed79bdb5b844916fa3fbdbe | 794e14945c0521b4eab03e8b9a3f93b8fa14e021 | /src/compas_rhino/utilities/constructors.py | e71275fa0d0e525a4bf92e58e2154310209ae1c9 | [
"MIT"
] | permissive | KEERTHANAUDAY/compas | 5e8ada865bc87ee48ba77b3f6fd03661a9b9c17d | 4d1101cf302f95a4472a01a1265cc64eaec6aa4a | refs/heads/master | 2021-07-11T16:26:19.452926 | 2020-09-10T14:27:11 | 2020-09-10T14:27:11 | 294,453,684 | 0 | 0 | MIT | 2020-09-10T15:47:31 | 2020-09-10T15:47:30 | null | UTF-8 | Python | false | false | 2,494 | py | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from compas.utilities import geometric_key
import Rhino
import scriptcontext as sc
__all__ = ['volmesh_from_polysurfaces']
def volmesh_from_polysurfaces(cls, guids):
"""Construct a volumetric mesh from given polysurfaces.
Essentially, this function does the following:
* find each of the polysurfaces and check if they have a boundary representation (b-rep)
* convert to b-rep and extract the edge loops
* make a face of each loop by referring to vertices using their geometric keys
* add a cell per brep
* and add the faces of a brep to the cell
* create a volmesh from the found vertices and cells
Parameters
----------
cls : :class:`compas.datastructures.VolMesh`
The class of volmesh.
guids : sequence of str or System.Guid
The *globally unique identifiers* of the polysurfaces.
Returns
-------
:class:`compas.datastructures.Volmesh`
The volumetric mesh object.
"""
gkey_xyz = {}
cells = []
for guid in guids:
cell = []
obj = sc.doc.Objects.Find(guid)
if not obj.Geometry.HasBrepForm:
continue
brep = Rhino.Geometry.Brep.TryConvertBrep(obj.Geometry)
for loop in brep.Loops:
curve = loop.To3dCurve()
segments = curve.Explode()
face = []
sp = segments[0].PointAtStart
ep = segments[0].PointAtEnd
sp_gkey = geometric_key(sp)
ep_gkey = geometric_key(ep)
gkey_xyz[sp_gkey] = sp
gkey_xyz[ep_gkey] = ep
face.append(sp_gkey)
face.append(ep_gkey)
for segment in segments[1:-1]:
ep = segment.PointAtEnd
ep_gkey = geometric_key(ep)
face.append(ep_gkey)
gkey_xyz[ep_gkey] = ep
cell.append(face)
cells.append(cell)
gkey_index = dict((gkey, index) for index, gkey in enumerate(gkey_xyz))
vertices = [list(xyz) for gkey, xyz in gkey_xyz.items()]
cells = [[[gkey_index[gkey] for gkey in face] for face in cell] for cell in cells]
return cls.from_vertices_and_cells(vertices, cells)
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
pass
| [
"[email protected]"
] | |
ff9d5d5e5194ae62f6f8a2888b5e8c36abe265af | 8cb6d50076c527b4c81d21b992fc93f77263adc5 | /orden/models.py | f0b736f5697c7219518a2d729725177f23df2fa5 | [] | no_license | alrvivas/CrevenApp | 6b9fefc4661a32cdf00ebb4a3eb869bf778f67e7 | 190291cfc798cbc52ba4cdbfa258ef0b983f7249 | refs/heads/master | 2020-06-04T20:24:03.685451 | 2015-02-12T17:36:01 | 2015-02-12T17:36:01 | 30,713,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,473 | py | from django.db import models
from distutils.version import LooseVersion
from django.contrib.auth.models import User
from cliente.models import Cliente
from producto.models import Product
from orden.managers import OrderManager
from util.fields import CurrencyField
from jsonfield.fields import JSONField
from django.db.models.aggregates import Sum
from django.core.urlresolvers import reverse
import django
# Create your models here.
class Order(models.Model):
objects = OrderManager()
"""
A model representing an Order.
An order is the "in process" counterpart of the shopping cart, which holds
stuff like the shipping and billing addresses (copied from the User
profile) when the Order is first created), list of items, and holds stuff
like the status, shipping costs, taxes, etc...
"""
PROCESSING = 10 # New order, addresses and shipping/payment methods chosen (user is in the shipping backend)
CONFIRMING = 20 # The order is pending confirmation (user is on the confirm view)
CONFIRMED = 30 # The order was confirmed (user is in the payment backend)
COMPLETED = 40 # Payment backend successfully completed
SHIPPED = 50 # The order was shipped to client
CANCELED = 60 # The order was canceled
CANCELLED = CANCELED # DEPRECATED SPELLING
PAYMENT = 30 # DEPRECATED!
STATUS_CODES = (
(PROCESSING, ('Procesando')),
(CONFIRMING, ('Confirmando')),
(CONFIRMED, ('Confirmada')),
(COMPLETED, ('Completada')),
(SHIPPED, ('Enviada')),
(CANCELED, ('Cancelada')),
)
# If the user is null, the order was created with a session
user = models.ForeignKey(User, blank=True, null=True, verbose_name=('User'))
cliente = models.ForeignKey(Cliente,null=True, blank=True)
status = models.IntegerField(choices=STATUS_CODES, default=PROCESSING,verbose_name=('Status'))
order_subtotal = CurrencyField(verbose_name=('Orden subtotal'))
order_total = CurrencyField(verbose_name=('Orden Total'))
order_totalpeso = models.DecimalField(max_digits=10,decimal_places=3,null=True)
shipping_address_text = models.TextField(('Direccion de Envio'), blank=True, null=True)
billing_address_text = models.TextField(('Direccion de Facturacion'), blank=True, null=True)
created = models.DateTimeField(auto_now_add=True,verbose_name=('Creado'))
modified = models.DateTimeField(auto_now=True, verbose_name=('Updated'))
cart_pk = models.PositiveIntegerField(('Cart primary key'), blank=True, null=True)
class Meta(object):
verbose_name = ('Orden')
verbose_name_plural = ('Ordenes')
def __unicode__(self):
return ('Orden ID: %(id)s') % {'id': self.pk}
def get_absolute_url(self):
return reverse('order_detail', kwargs={'pk': self.pk})
def is_paid(self):
"""Has this order been integrally paid for?"""
return self.amount_paid >= self.order_total
is_payed = is_paid #Backward compatability, deprecated spelling
def is_completed(self):
return self.status == self.COMPLETED
def get_status_name(self):
return dict(self.STATUS_CODES)[self.status]
@property
def amount_paid(self):
"""
The amount paid is the sum of related orderpayments
"""
from .models import OrderPayment
sum_ = OrderPayment.objects.filter(order=self).aggregate(sum=Sum('amount'))
result = sum_.get('sum')
if result is None:
result = Decimal(0)
return result
amount_payed = amount_paid #Backward compatability, deprecated spelling
@property
def shipping_costs(self):
from .models import ExtraOrderPriceField
sum_ = Decimal('0.00')
cost_list = ExtraOrderPriceField.objects.filter(order=self).filter(
is_shipping=True)
for cost in cost_list:
sum_ += cost.value
return sum_
@property
def short_name(self):
"""
A short name for the order, to be displayed on the payment processor's
website. Should be human-readable, as much as possible
"""
return "%s-%s" % (self.pk, self.order_total)
def set_billing_address(self, billing_address):
"""
Process billing_address trying to get as_text method from address
and copying.
You can override this method to process address more granulary
e.g. you can copy address instance and save FK to it in your order
class.
"""
if hasattr(billing_address, 'as_text') and callable(billing_address.as_text):
self.billing_address_text = billing_address.as_text()
self.save()
def set_shipping_address(self, shipping_address):
"""
Process shipping_address trying to get as_text method from address
and copying.
You can override this method to process address more granulary
e.g. you can copy address instance and save FK to it in your order
class.
"""
if hasattr(shipping_address, 'as_text') and callable(shipping_address.as_text):
self.shipping_address_text = shipping_address.as_text()
self.save()
# We need some magic to support django < 1.3 that has no support
# models.on_delete option
f_kwargs = {}
if LooseVersion(django.get_version()) >= LooseVersion('1.3'):
f_kwargs['on_delete'] = models.SET_NULL
class OrderItem(models.Model):
"""
A line Item for an order. """
order = models.ForeignKey(Order, related_name='items', verbose_name=('Orden'))
product_reference = models.CharField(max_length=255, verbose_name=('Product reference'))
product_name = models.CharField(max_length=255, null=True, blank=True, verbose_name=('Product name'))
product = models.ForeignKey(Product, verbose_name=('Producto'), null=True, blank=True, **f_kwargs)
unit_price = CurrencyField(verbose_name=('Unit price'))
quantity = models.IntegerField(verbose_name=('Cantidad'))
line_subtotal = CurrencyField(verbose_name=('Line subtotal'))
line_total = CurrencyField(verbose_name=('Line total'))
line_subtotalpeso = models.DecimalField(max_digits = 30,decimal_places = 3,null=True)
line_totalpeso = models.DecimalField(max_digits = 30,decimal_places = 3,null=True)
class Meta(object):
verbose_name = ('Orden item')
verbose_name_plural = ('Orden items')
def save(self, *args, **kwargs):
if not self.product_name and self.product:
self.product_name = self.product.get_name()
super(OrderItem, self).save(*args, **kwargs)
def clear_products(sender, instance, using, **kwargs):
for oi in OrderItem.objects.filter(product=instance):
oi.product = None
oi.save()
if LooseVersion(django.get_version()) < LooseVersion('1.3'):
pre_delete.connect(clear_products, sender=Product)
class OrderExtraInfo(models.Model):
order = models.ForeignKey(Order, related_name="extra_info",verbose_name=('Order'))
text = models.TextField(verbose_name=('Extra info'), blank=True)
class Meta(object):
verbose_name = ('Orden informacion extra')
verbose_name_plural = ('Orden informacion extra')
class ExtraOrderPriceField(models.Model):
"""
This will make Cart-provided extra price fields persistent since we want
to "snapshot" their statuses at the time when the order was made
"""
order = models.ForeignKey(Order, verbose_name=('Order'))
label = models.CharField(max_length=255, verbose_name=('Label'))
value = CurrencyField(verbose_name=('Amount'))
data = JSONField(null=True, blank=True, verbose_name=('Serialized extra data'))
# Does this represent shipping costs?
is_shipping = models.BooleanField(default=False, editable=False, verbose_name=('Is shipping'))
class Meta(object):
verbose_name = ('Extra order price field')
verbose_name_plural = ('Extra order price fields')
class ExtraOrderItemPriceField(models.Model):
"""
This will make Cart-provided extra price fields persistent since we want
to "snapshot" their statuses at the time when the order was made
"""
order_item = models.ForeignKey(OrderItem, verbose_name=('Order item'))
label = models.CharField(max_length=255, verbose_name=('Label'))
value = CurrencyField(verbose_name=('Amount'))
data = JSONField(null=True, blank=True, verbose_name=('Serialized extra data'))
class Meta(object):
verbose_name = ('Extra order item price field')
verbose_name_plural = ('Extra order item price fields')
class OrderPayment(models.Model):
"""
A class to hold basic payment information. Backends should define their own
more complex payment types should they need to store more informtion
"""
order = models.ForeignKey(Order, verbose_name=('Order'))
# How much was paid with this particular transfer
amount = CurrencyField(verbose_name=('Amount'))
transaction_id = models.CharField(max_length=255, verbose_name=('Transaction ID'), help_text=("The transaction processor's reference"))
payment_method = models.CharField(max_length=255, verbose_name=('Payment method'), help_text=("The payment backend used to process the purchase"))
class Meta(object):
verbose_name = ('Order payment')
verbose_name_plural = ('Order payments')
| [
"[email protected]"
] | |
339f9df0dd568b0dac0574b4653c263cc9d9af76 | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /come_old_problem_to_point/ask_thing/see_day/seem_problem/time/find_few_week_over_point.py | b79ee7a454f58433209a2c9c27edba7f4f38079b | [] | no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py |
#! /usr/bin/env python
def government(str_arg):
use_public_day(str_arg)
print('small_man_and_long_world')
def use_public_day(str_arg):
print(str_arg)
if __name__ == '__main__':
government('ask_day_from_year')
| [
"[email protected]"
] | |
a30686b6eabb2cac56f288acadb5c196580ebf70 | e6947a8ecc14ddb3c078321958856f888953f4fa | /my_project.py | d96f10703e7f2af3f045b4ee516f87f077c77cb7 | [] | no_license | raja073/SimpleMovieDB | a5dd4b924f1ecb8d04a61c9884e25e6a51af5c3c | 4d28dba684ea0ebf6ad4b78af4c2bdd13b072406 | refs/heads/master | 2021-09-05T13:59:35.372062 | 2018-01-28T14:06:57 | 2018-01-28T14:06:57 | 118,252,070 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,032 | py |
from flask import Flask, render_template, request, redirect, url_for
app = Flask(__name__) ### Instance of the Flask with name of the running application as an argument
#################################################################################################
# Adding database to Flask application
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import Base, Movie, Actor
engine = create_engine('sqlite:///movieactors.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind = engine)
session = DBSession()
#################################################################################################
@app.route('/')
@app.route('/movies')
def movieList():
movies = session.query(Movie).all()
return render_template('full_movie_list.html', movies = movies)
@app.route('/movie/<int:movie_id>/')
def movieActors(movie_id):
movie = session.query(Movie).filter_by(id = movie_id).one()
actors = session.query(Actor).filter_by(movie_id = movie.id)
return render_template('menu.html', movie = movie, actors = actors)
@app.route('/movie/new/', methods=['GET','POST'])
def newMovie():
if request.method == 'POST':
newMov = Movie(name=request.form['name'])
session.add(newMov)
session.commit()
return redirect(url_for('movieList'))
else:
return render_template('new_movie.html')
# Task 1: Create route for newActor function here
@app.route('/movie/<int:movie_id>/new/', methods=['GET','POST'])
def newActor(movie_id):
if request.method == 'POST':
newAct = Actor(name=request.form['name'], gender=request.form['gender'], \
age=request.form['age'], biography=request.form['bio'], movie_id=movie_id)
session.add(newAct)
session.commit()
return redirect(url_for('movieActors', movie_id=movie_id))
else:
return render_template('new_actor.html', movie_id=movie_id)
# Task 2: Create route for editActor function here
@app.route('/movie/<int:movie_id>/<int:actor_id>/edit/', methods=['GET','POST'])
def editActor(movie_id, actor_id):
editedActor = session.query(Actor).filter_by(id=actor_id).one()
if request.method == 'POST':
if request.form['name']:
editedActor.name = request.form['name']
session.add(editedActor)
session.commit()
return redirect(url_for('movieActors', movie_id=movie_id))
else:
return render_template('edit_actors.html', movie_id=movie_id, actor_id=actor_id, i=editedActor)
# Task 3: Create route for deleteActor function here
@app.route('/movie/<int:movie_id>/<int:actor_id>/delete/', methods=['GET','POST'])
def deleteActor(movie_id, actor_id):
actorToDelete = session.query(Actor).filter_by(id=actor_id).one()
if request.method == 'POST':
session.delete(actorToDelete)
session.commit()
return redirect(url_for('movieActors', movie_id=movie_id))
else:
return render_template('delete_actor.html', i=actorToDelete)
if __name__ == '__main__':
app.debug = True
app.run(host = '0.0.0.0', port = 5000) | [
"[email protected]"
] | |
9fd2adff33eb37163fba31027204557321194233 | 6320fef2ea7376c2b35f97f1a5af004e90f09098 | /1-2주차 실습(복습)/venv/Lib/site-packages/pygments/formatters/irc.py | 49f8b3d13114e627e86ef8bdd693496bd155fd7f | [] | no_license | Dplo1514/ploaistudy | 7aa08d7f71653748a9e32dcc09ee8f6cec0aaed9 | e35e42b1e5f0c90cc1e2a59993a1ef73d8872d0c | refs/heads/master | 2023-09-03T00:45:55.601651 | 2021-10-24T12:19:38 | 2021-10-24T12:19:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,026 | py | """
pygments.formatters.irc
~~~~~~~~~~~~~~~~~~~~~~~
Formatter for IRC output
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
from pygments.formatter import Formatter
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Token, Whitespace
from pygments.util import get_choice_opt
__all__ = ['IRCFormatter']
#: Map token types to a tuple of color values for light and dark
#: backgrounds.
IRC_COLORS = {
Token: ('', ''),
Whitespace: ('gray', 'brightblack'),
Comment: ('gray', 'brightblack'),
Comment.Preproc: ('cyan', 'brightcyan'),
Keyword: ('blue', 'brightblue'),
Keyword.Type: ('cyan', 'brightcyan'),
Operator.Word: ('magenta', 'brightcyan'),
Name.Builtin: ('cyan', 'brightcyan'),
Name.Function: ('green', 'brightgreen'),
Name.Namespace: ('_cyan_', '_brightcyan_'),
Name.Class: ('_green_', '_brightgreen_'),
Name.Exception: ('cyan', 'brightcyan'),
Name.Decorator: ('brightblack', 'gray'),
Name.Variable: ('red', 'brightred'),
Name.Constant: ('red', 'brightred'),
Name.Attribute: ('cyan', 'brightcyan'),
Name.Tag: ('brightblue', 'brightblue'),
String: ('yellow', 'yellow'),
Number: ('blue', 'brightblue'),
Generic.Deleted: ('brightred', 'brightred'),
Generic.Inserted: ('green', 'brightgreen'),
Generic.Heading: ('**', '**'),
Generic.Subheading: ('*magenta*', '*brightmagenta*'),
Generic.Error: ('brightred', 'brightred'),
Error: ('_brightred_', '_brightred_'),
}
IRC_COLOR_MAP = {
'white': 0,
'black': 1,
'blue': 2,
'brightgreen': 3,
'brightred': 4,
'yellow': 5,
'magenta': 6,
'orange': 7,
'green': 7, #compat w/ ansi
'brightyellow': 8,
'lightgreen': 9,
'brightcyan': 9, # compat w/ ansi
'cyan': 10,
'lightblue': 11,
'red': 11, # compat w/ ansi
'brightblue': 12,
'brightmagenta': 13,
'brightblack': 14,
'gray': 15,
}
def ircformat(color, text):
if len(color) < 1:
return text
add = sub = ''
if '_' in color: # italic
add += '\x1D'
sub = '\x1D' + sub
color = color.strip('_')
if '*' in color: # bold
add += '\x02'
sub = '\x02' + sub
color = color.strip('*')
# underline (\x1F) not supported
# backgrounds (\x03FF,BB) not supported
if len(color) > 0: # actual color - may have issues with ircformat("red", "blah")+"10" type stuff
add += '\x03' + str(IRC_COLOR_MAP[color]).zfill(2)
sub = '\x03' + sub
return add + text + sub
return '<'+add+'>'+text+'</'+sub+'>'
class IRCFormatter(Formatter):
r"""
Format tokens with IRC color sequences
The `get_style_defs()` method doesn't do anything special since there is
no support for common styles.
Options accepted:
`bg`
Set to ``"light"`` or ``"dark"`` depending on the terminal's background
(default: ``"light"``).
`colorscheme`
A dictionary mapping token types to (lightbg, darkbg) color names or
``None`` (default: ``None`` = use builtin colorscheme).
`linenos`
Set to ``True`` to have line numbers in the output as well
(default: ``False`` = no line numbers).
"""
name = 'IRC'
aliases = ['irc', 'IRC']
filenames = []
def __init__(self, **options):
Formatter.__init__(self, **options)
self.darkbg = get_choice_opt(options, 'bg',
['light', 'dark'], 'light') == 'dark'
self.colorscheme = options.get('colorscheme', None) or IRC_COLORS
self.linenos = options.get('linenos', False)
self._lineno = 0
def _write_lineno(self, outfile):
self._lineno += 1
outfile.write("\n%04d: " % self._lineno)
def _format_unencoded_with_lineno(self, tokensource, outfile):
self._write_lineno(outfile)
for ttype, value in tokensource:
if value.endswith("\n"):
self._write_lineno(outfile)
value = value[:-1]
color = self.colorscheme.get(ttype)
while color is None:
ttype = ttype[:-1]
color = self.colorscheme.get(ttype)
if color:
color = color[self.darkbg]
spl = value.split('\n')
for line in spl[:-1]:
self._write_lineno(outfile)
if line:
outfile.write(ircformat(color, line[:-1]))
if spl[-1]:
outfile.write(ircformat(color, spl[-1]))
else:
outfile.write(value)
outfile.write("\n")
def format_unencoded(self, tokensource, outfile):
if self.linenos:
self._format_unencoded_with_lineno(tokensource, outfile)
return
for ttype, value in tokensource:
color = self.colorscheme.get(ttype)
while color is None:
ttype = ttype[:-1]
color = self.colorscheme.get(ttype)
if color:
color = color[self.darkbg]
spl = value.split('\n')
for line in spl[:-1]:
if line:
outfile.write(ircformat(color, line))
outfile.write('\n')
if spl[-1]:
outfile.write(ircformat(color, spl[-1]))
else:
outfile.write(value)
| [
"[email protected]"
] | |
874faf954ae174bedcfe8ce4f42f219ac04bd355 | 14449108de18a8e956830cd7d5107bb38de41c5d | /workshopvenues/venues/migrations/0009_auto__del_field_venue_address.py | 45329577f0f1c85666401d3a4ba848f7477f2436 | [
"BSD-3-Clause"
] | permissive | andreagrandi/workshopvenues | 736e53ccb6ff0b15503e92a5246b945f615d2ff8 | 21978de36f443296788727d709f7f42676b24484 | refs/heads/master | 2021-05-16T03:00:23.879925 | 2014-03-18T15:10:00 | 2014-03-18T15:10:00 | 8,843,235 | 1 | 3 | null | 2015-10-26T11:11:20 | 2013-03-17T23:19:33 | Python | UTF-8 | Python | false | false | 3,698 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Venue.address'
db.delete_column(u'venues_venue', 'address_id')
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'Venue.address'
raise RuntimeError("Cannot reverse this migration. 'Venue.address' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration # Adding field 'Venue.address'
db.add_column(u'venues_venue', 'address',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['venues.Address']),
keep_default=False)
models = {
u'venues.address': {
'Meta': {'object_name': 'Address'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['venues.Country']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'town': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'venues.country': {
'Meta': {'object_name': 'Country'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'venues.facility': {
'Meta': {'object_name': 'Facility'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'venues.image': {
'Meta': {'object_name': 'Image'},
'filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'venue': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['venues.Venue']"})
},
u'venues.venue': {
'Meta': {'object_name': 'Venue'},
'capacity': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'contact': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'contact_email': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'contact_twitter': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'cost': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'facilities': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['venues.Facility']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'style': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'})
}
}
complete_apps = ['venues'] | [
"[email protected]"
] | |
d3813671c7b96dd94e66342390d4574c412700a3 | ef32b87973a8dc08ba46bf03c5601548675de649 | /pytglib/api/functions/search_user_by_phone_number.py | 218f9710f017f0467ab39dc7429e7841c3300db5 | [
"MIT"
] | permissive | iTeam-co/pytglib | 1a7580f0e0c9e317fbb0de1d3259c8c4cb90e721 | d3b52d7c74ee5d82f4c3e15e4aa8c9caa007b4b5 | refs/heads/master | 2022-07-26T09:17:08.622398 | 2022-07-14T11:24:22 | 2022-07-14T11:24:22 | 178,060,880 | 10 | 9 | null | null | null | null | UTF-8 | Python | false | false | 751 | py |
from ..utils import Object
class SearchUserByPhoneNumber(Object):
"""
Searches a user by their phone number. Returns a 404 error if the user can't be found
Attributes:
ID (:obj:`str`): ``SearchUserByPhoneNumber``
Args:
phone_number (:obj:`str`):
Phone number to search for
Returns:
User
Raises:
:class:`telegram.Error`
"""
ID = "searchUserByPhoneNumber"
def __init__(self, phone_number, extra=None, **kwargs):
self.extra = extra
self.phone_number = phone_number # str
@staticmethod
def read(q: dict, *args) -> "SearchUserByPhoneNumber":
phone_number = q.get('phone_number')
return SearchUserByPhoneNumber(phone_number)
| [
"[email protected]"
] | |
1c23cd9bec50756f3a2bea2745a173ac45cdc882 | c278b06f77cac0a2942fa2ca0636f2dc72b52505 | /4.blog_project/mydjangoproject/blog/migrations/0004_auto_20190320_0504.py | f8a4d492ef89b65190cfc991db5c4e1a9cab6c16 | [] | no_license | hooong/Django_study | 2d27bc7d5b2ad53fa4c9e1bcd808437af98cbe09 | b760ace8f562d538ad18d552388e48ed52cc78d1 | refs/heads/master | 2022-12-02T15:51:24.510596 | 2019-11-02T07:38:37 | 2019-11-02T07:38:37 | 165,012,883 | 5 | 1 | null | 2022-11-22T03:26:18 | 2019-01-10T07:35:07 | Python | UTF-8 | Python | false | false | 318 | py | # Generated by Django 2.1.5 on 2019-03-20 05:04
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0003_blog_blog_hit'),
]
operations = [
migrations.AlterModelOptions(
name='comment',
options={},
),
]
| [
"[email protected]"
] | |
81d70837b62ed7c9dbad2ad8927c5d723e1d4953 | 63e2bed7329c79bf67279f9071194c9cba88a82c | /SevOneApi/python-client/test/test_performance_metrics_settings.py | 5471c91fa1f10ac623252fd1733b391f5e288962 | [] | no_license | jsthomason/LearningPython | 12422b969dbef89578ed326852dd65f65ab77496 | 2f71223250b6a198f2736bcb1b8681c51aa12c03 | refs/heads/master | 2021-01-21T01:05:46.208994 | 2019-06-27T13:40:37 | 2019-06-27T13:40:37 | 63,447,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,001 | py | # coding: utf-8
"""
SevOne API Documentation
Supported endpoints by the new RESTful API # noqa: E501
OpenAPI spec version: 2.1.18, Hash: db562e6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.performance_metrics_settings import PerformanceMetricsSettings # noqa: E501
from swagger_client.rest import ApiException
class TestPerformanceMetricsSettings(unittest.TestCase):
"""PerformanceMetricsSettings unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPerformanceMetricsSettings(self):
"""Test PerformanceMetricsSettings"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.performance_metrics_settings.PerformanceMetricsSettings() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
2e56f5cdcb6487d4631e61f2dd8ee8baa69b504c | 0fb3b73f8e6bb9e931afe4dcfd5cdf4ba888d664 | /Web-UI/scrapyproject/migrations/0010_auto_20170406_1835.py | 28afbaa4d39615071f49bc6050e0d270de3e4686 | [
"MIT"
] | permissive | mrpal39/ev_code | 6c56b1a4412503604260b3346a04ef53a2ba8bf2 | ffa0cf482fa8604b2121957b7b1d68ba63b89522 | refs/heads/master | 2023-03-24T03:43:56.778039 | 2021-03-08T17:48:39 | 2021-03-08T17:48:39 | 345,743,264 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scrapyproject', '0009_auto_20170215_0657'),
]
operations = [
migrations.RemoveField(
model_name='mongopass',
name='user',
),
migrations.DeleteModel(
name='MongoPass',
),
]
| [
"[email protected]"
] | |
716b77deb7f8f935eada888a20f2b54d08a47dd3 | 9ae6ce54bf9a2a86201961fdbd5e7b0ec913ff56 | /google/ads/googleads/v9/services/services/hotel_group_view_service/client.py | b9c83f2d85aa2f2fdc21c12add578c9da910ff31 | [
"Apache-2.0"
] | permissive | GerhardusM/google-ads-python | 73b275a06e5401e6b951a6cd99af98c247e34aa3 | 676ac5fcb5bec0d9b5897f4c950049dac5647555 | refs/heads/master | 2022-07-06T19:05:50.932553 | 2022-06-17T20:41:17 | 2022-06-17T20:41:17 | 207,535,443 | 0 | 0 | Apache-2.0 | 2019-09-10T10:58:55 | 2019-09-10T10:58:55 | null | UTF-8 | Python | false | false | 18,873 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.ads.googleads.v9.resources.types import hotel_group_view
from google.ads.googleads.v9.services.types import hotel_group_view_service
from .transports.base import HotelGroupViewServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import HotelGroupViewServiceGrpcTransport
class HotelGroupViewServiceClientMeta(type):
"""Metaclass for the HotelGroupViewService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[HotelGroupViewServiceTransport]]
_transport_registry["grpc"] = HotelGroupViewServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[HotelGroupViewServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class HotelGroupViewServiceClient(metaclass=HotelGroupViewServiceClientMeta):
"""Service to manage Hotel Group Views."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
HotelGroupViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
HotelGroupViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> HotelGroupViewServiceTransport:
"""Return the transport used by the client instance.
Returns:
HotelGroupViewServiceTransport: The transport used by the client instance.
"""
return self._transport
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
@staticmethod
def hotel_group_view_path(
customer_id: str, ad_group_id: str, criterion_id: str,
) -> str:
"""Return a fully-qualified hotel_group_view string."""
return "customers/{customer_id}/hotelGroupViews/{ad_group_id}~{criterion_id}".format(
customer_id=customer_id,
ad_group_id=ad_group_id,
criterion_id=criterion_id,
)
@staticmethod
def parse_hotel_group_view_path(path: str) -> Dict[str, str]:
"""Parse a hotel_group_view path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/hotelGroupViews/(?P<ad_group_id>.+?)~(?P<criterion_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, HotelGroupViewServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the hotel group view service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.HotelGroupViewServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in (
"true",
"false",
):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
use_client_cert = (
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true"
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, HotelGroupViewServiceTransport):
# transport is a HotelGroupViewServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = HotelGroupViewServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_hotel_group_view(
self,
request: Union[
hotel_group_view_service.GetHotelGroupViewRequest, dict
] = None,
*,
resource_name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> hotel_group_view.HotelGroupView:
r"""Returns the requested Hotel Group View in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (Union[google.ads.googleads.v9.services.types.GetHotelGroupViewRequest, dict]):
The request object. Request message for
[HotelGroupViewService.GetHotelGroupView][google.ads.googleads.v9.services.HotelGroupViewService.GetHotelGroupView].
resource_name (:class:`str`):
Required. Resource name of the Hotel
Group View to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v9.resources.types.HotelGroupView:
A hotel group view.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a hotel_group_view_service.GetHotelGroupViewRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, hotel_group_view_service.GetHotelGroupViewRequest
):
request = hotel_group_view_service.GetHotelGroupViewRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_hotel_group_view
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("HotelGroupViewServiceClient",)
| [
"[email protected]"
] | |
6fcc525132976c116ea70511282befacca492375 | 573a516233447c8384f26ed56ae4e356e3995153 | /ques6.py | c06b87f3ab0dae128a898dd372ba780d807a5d97 | [] | no_license | BhagyashreeKarale/if-else | 437b0867247f827c44f469a90efeecbf9444803d | 1224fca2bdda389b22897f17b22f21320260e75f | refs/heads/main | 2023-07-19T15:03:03.351825 | 2021-09-11T19:16:07 | 2021-09-11T19:16:07 | 397,150,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 441 | py | # Question 6
# Draw a flowchart for this question and write the program.
# Take two numbers as input from the user in variables varx and vary.
# Check whether varx is divisible by vary.
# If yes, print Divisible else print Not Divisible.
varx=int(input("Enter dividend:\n"))
vary=int(input("Enter divisor:\n"))
if varx % vary == 0:
print(varx,"is completely divisible by",vary)
else:
print(varx,"isn't completely divisible by",vary)
| [
"[email protected]"
] | |
3ddc20aebdc144d9693019af06524d5ea4513712 | 78a28bd6b95041bfe67d8aa6a3a3c111911afaab | /18.Web Scraping with Python Scrapy - RM/03_Advanced_Techniques/news_scraper/news_scraper/settings.py | dec217105fcd124cbb665b4076642b6d93bf5eb9 | [
"MIT"
] | permissive | jailukanna/Python-Projects-Dojo | 8200a60ab925bf796bd39cb1977e6f0e0a575c23 | 98c7234b84f0afea99a091c7198342d66bbdff5b | refs/heads/master | 2023-03-15T06:54:38.141189 | 2021-03-11T08:17:02 | 2021-03-11T08:17:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,212 | py | # Scrapy settings for news_scraper project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'news_scraper'
SPIDER_MODULES = ['news_scraper.spiders']
NEWSPIDER_MODULE = 'news_scraper.spiders'
CLOSESPIDER_PAGECOUNT = 10
FEED_URI = 'news_articles.json'
FEED_FORMAT = 'json'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'news_scraper (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
# }
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# 'news_scraper.middlewares.NewsScraperSpiderMiddleware': 543,
# }
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
# 'news_scraper.middlewares.NewsScraperDownloaderMiddleware': 543,
# }
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
# }
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# ITEM_PIPELINES = {
# 'news_scraper.pipelines.NewsScraperPipeline': 300,
# }
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"[email protected]"
] | |
d5f34735f201edeb1130c4cb2a9efc396cbf184e | 1ec8734beba25739979cbd4a9414a95273cce6aa | /10.9/移除元素.py | f3a3c26997d12fbc85a770412e56ce40c9f3a40b | [] | no_license | MATATAxD/untitled1 | 4431e4bc504e74d9a96f54fd6065ce46d5d9de40 | 18463f88ce60036959aabedabf721e9d938bacfb | refs/heads/master | 2023-01-01T23:16:30.140947 | 2020-10-23T04:32:38 | 2020-10-23T04:32:38 | 306,529,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 329 | py | from typing import List
def removeElement(nums:List[int],val:int)->int:
fast = 0
slow = 0
while fast < len(nums):
if nums[fast]== val:
fast +=1
else:
nums[slow] = nums [fast]
slow +=1
fast +=1
return slow
a = [1,2,3,4,5,6]
print(removeElement(a,1)) | [
"[email protected]"
] | |
a7438ca02484cd42c1d46f32f2b6415efa83040e | cadb25b610777d1a91404c7dcfe3d29ca1ddd542 | /apps/localidades/migrations/0010_alter_localidade_nomelocalidade.py | cb9f7aeb7196267ac6b6462739e16d51937b8d84 | [] | no_license | luanaAlm/sistema_ebd | 851b8d98979e33187ec68b301910fe0c309a1ce2 | ec6a97ddf413e5b10ddff20a781e37ddce77794d | refs/heads/main | 2023-08-28T01:10:27.381064 | 2021-10-18T23:11:25 | 2021-10-18T23:11:25 | 415,992,258 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | py | # Generated by Django 3.2.7 on 2021-10-06 18:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('localidades', '0009_alter_localidade_nomelocalidade'),
]
operations = [
migrations.AlterField(
model_name='localidade',
name='nomeLocalidade',
field=models.CharField(max_length=100, verbose_name='Igreja'),
),
]
| [
"[email protected]"
] | |
c45e8e0400ff9d0a80d3861ee9d4f16481928447 | 845e3c428e18232777f17b701212dcbb1b72acc1 | /lib/fast_rcnn/test_upper_body.py | a0d72a59ea4af92083ed2328b831359d2c136799 | [
"BSD-2-Clause",
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | chuanxinlan/ohem-1 | dd10b2f5ff15e81ab9e42e936bb44d98e01c6795 | b7552ceb8ed1e9768e0d522258caa64b79834b54 | refs/heads/master | 2021-09-16T18:31:25.651432 | 2018-06-23T10:09:24 | 2018-06-23T10:09:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,043 | py | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Test a Fast R-CNN network on an imdb (image database)."""
from fast_rcnn.config import cfg, get_output_dir
from fast_rcnn.bbox_transform import clip_boxes, bbox_transform_inv
import argparse
from utils.timer import Timer
import numpy as np
import cv2
import caffe
from fast_rcnn.nms_wrapper import nms
import cPickle
from utils.blob import im_list_to_blob
import os
from utils.cython_bbox import bbox_vote
def _get_image_blob(im):
"""Converts an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale_factors (list): list of image scales (relative to im) used
in the image pyramid
"""
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
processed_ims = []
im_scale_factors = []
for target_size in cfg.TEST.SCALES:
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
# Make width and height be multiples of a specified number
im_scale_x = np.floor(im.shape[1] * im_scale / cfg.TEST.SCALE_MULTIPLE_OF) * cfg.TEST.SCALE_MULTIPLE_OF / im.shape[1]
im_scale_y = np.floor(im.shape[0] * im_scale / cfg.TEST.SCALE_MULTIPLE_OF) * cfg.TEST.SCALE_MULTIPLE_OF / im.shape[0]
im = cv2.resize(im_orig, None, None, fx=im_scale_x, fy=im_scale_y,
interpolation=cv2.INTER_LINEAR)
im_scale_factors.append(np.array([im_scale_x, im_scale_y, im_scale_x, im_scale_y]))
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, np.array(im_scale_factors)
def _get_rois_blob(im_rois, im_scale_factors):
"""Converts RoIs into network inputs.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
im_scale_factors (list): scale factors as returned by _get_image_blob
Returns:
blob (ndarray): R x 5 matrix of RoIs in the image pyramid
"""
rois, levels = _project_im_rois(im_rois, im_scale_factors)
rois_blob = np.hstack((levels, rois))
return rois_blob.astype(np.float32, copy=False)
def _project_im_rois(im_rois, scales):
"""Project image RoIs into the image pyramid built by _get_image_blob.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
scales (list): scale factors as returned by _get_image_blob
Returns:
rois (ndarray): R x 4 matrix of projected RoI coordinates
levels (list): image pyramid levels used by each projected RoI
"""
im_rois = im_rois.astype(np.float, copy=False)
if len(scales) > 1:
widths = im_rois[:, 2] - im_rois[:, 0] + 1
heights = im_rois[:, 3] - im_rois[:, 1] + 1
areas = widths * heights
scaled_areas = areas[:, np.newaxis] * (scales[np.newaxis, :] ** 2)
diff_areas = np.abs(scaled_areas - 224 * 224)
levels = diff_areas.argmin(axis=1)[:, np.newaxis]
else:
levels = np.zeros((im_rois.shape[0], 1), dtype=np.int)
rois = im_rois * scales[levels]
return rois, levels
def _get_blobs(im, rois):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {'data' : None, 'rois' : None}
blobs['data'], im_scale_factors = _get_image_blob(im)
if not cfg.TEST.HAS_RPN:
blobs['rois'] = _get_rois_blob(rois, im_scale_factors)
return blobs, im_scale_factors
def im_detect(net, im, _t, boxes=None):
"""Detect object classes in an image given object proposals.
Arguments:
net (caffe.Net): Fast R-CNN network to use
im (ndarray): color image to test (in BGR order)
boxes (ndarray): R x 4 array of object proposals or None (for RPN)
Returns:
scores (ndarray): R x K array of object class scores (K includes
background as object category 0)
boxes (ndarray): R x (4*K) array of predicted bounding boxes
"""
_t['im_preproc'].tic()
blobs, im_scales = _get_blobs(im, boxes)
# When mapping from image ROIs to feature map ROIs, there's some aliasing
# (some distinct image ROIs get mapped to the same feature ROI).
# Here, we identify duplicate feature ROIs, so we only compute features
# on the unique subset.
if cfg.TEST.HAS_RPN:
im_blob = blobs['data']
blobs['im_info'] = np.array(
[np.hstack((im_blob.shape[2], im_blob.shape[3], im_scales[0]))],
dtype=np.float32)
# reshape network inputs
net.blobs['data'].reshape(*(blobs['data'].shape))
if cfg.TEST.HAS_RPN:
net.blobs['im_info'].reshape(*(blobs['im_info'].shape))
else:
net.blobs['rois'].reshape(*(blobs['rois'].shape))
# do forward
net.blobs['data'].data[...] = blobs['data']
#forward_kwargs = {'data': blobs['data'].astype(np.float32, copy=False)}
if cfg.TEST.HAS_RPN:
net.blobs['im_info'].data[...] = blobs['im_info']
#forward_kwargs['im_info'] = blobs['im_info'].astype(np.float32, copy=False)
else:
net.blobs['rois'].data[...] = blobs['rois']
#forward_kwargs['rois'] = blobs['rois'].astype(np.float32, copy=False)
_t['im_preproc'].toc()
_t['im_net'].tic()
blobs_out = net.forward()
_t['im_net'].toc()
#blobs_out = net.forward(**forward_kwargs)
_t['im_postproc'].tic()
if cfg.TEST.HAS_RPN:
assert len(im_scales) == 1, "Only single-image batch implemented"
rois = net.blobs['rois'].data.copy()
# unscale back to raw image space
boxes = rois[:, 1:5] / im_scales[0]
if cfg.TEST.SVM:
# use the raw scores before softmax under the assumption they
# were trained as linear SVMs
scores = net.blobs['cls_score'].data
else:
# use softmax estimated probabilities
scores = blobs_out['cls_prob']
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = blobs_out['bbox_pred']
pred_boxes = bbox_transform_inv(boxes, box_deltas)
pred_boxes = clip_boxes(pred_boxes, im.shape)
#---------------_cg_ added upper body --------------------
scores_upper_body = blobs_out['cls_prob_upper_body']
rois_upper_body = rois.copy()
rois_upper_body[:, 4] = \
(rois_upper_body[:, 2] + rois_upper_body[:, 4]) / 2
boxes_upper_body = rois_upper_body[:, 1:5] / im_scales[0]
upper_body_deltas = blobs_out['upper_body_pred']
pred_upper_body = bbox_transform_inv(boxes_upper_body, \
upper_body_deltas)
pred_upper_body = clip_boxes(pred_upper_body, im.shape)
#---------------end _cg_ added upper body --------------------
_t['im_postproc'].toc()
return scores, pred_boxes, scores_upper_body, pred_upper_body
def vis_detections(im, class_name, dets, thresh=0.3):
"""Visual debugging of detections."""
import matplotlib.pyplot as plt
im = im[:, :, (2, 1, 0)]
for i in xrange(np.minimum(10, dets.shape[0])):
bbox = dets[i, :4]
score = dets[i, -1]
if score > thresh:
plt.cla()
plt.imshow(im)
plt.gca().add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='g', linewidth=3)
)
plt.title('{} {:.3f}'.format(class_name, score))
plt.show()
def apply_nms(all_boxes, thresh):
"""Apply non-maximum suppression to all predicted boxes output by the
test_net method.
"""
num_classes = len(all_boxes)
num_images = len(all_boxes[0])
nms_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(num_classes)]
for cls_ind in xrange(num_classes):
for im_ind in xrange(num_images):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
# CPU NMS is much faster than GPU NMS when the number of boxes
# is relative small (e.g., < 10k)
# TODO(rbg): autotune NMS dispatch
keep = nms(dets, thresh, force_cpu=True)
if len(keep) == 0:
continue
nms_boxes[cls_ind][im_ind] = dets[keep, :].copy()
return nms_boxes
def test_net(net, imdb, max_per_image=100, thresh=0.05, vis=False):
"""Test a Fast R-CNN network on an image database."""
num_images = len(imdb.image_index)
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
all_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(imdb.num_classes + 1)]
output_dir = get_output_dir(imdb, net)
# timers
_t = {'im_preproc': Timer(), 'im_net' : Timer(), 'im_postproc': Timer(), 'misc' : Timer()}
if not cfg.TEST.HAS_RPN:
roidb = imdb.roidb
for i in xrange(num_images):
# filter out any ground truth boxes
if cfg.TEST.HAS_RPN:
box_proposals = None
else:
# The roidb may contain ground-truth rois (for example, if the roidb
# comes from the training or val split). We only want to evaluate
# detection on the *non*-ground-truth rois. We select those the rois
# that have the gt_classes field set to 0, which means there's no
# ground truth.
box_proposals = roidb[i]['boxes'][roidb[i]['gt_classes'] == 0]
im = cv2.imread(imdb.image_path_at(i))
scores, boxes, scores_upper_body, boxes_upper_body = \
im_detect(net, im, _t, box_proposals)
_t['misc'].tic()
# skip j = 0, because it's the background class
for j in xrange(1, imdb.num_classes):
inds = np.where(scores[:, j] > thresh)[0]
cls_scores = scores[inds, j]
cls_boxes = boxes[inds, j*4:(j+1)*4]
cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
.astype(np.float32, copy=False)
keep = nms(cls_dets, cfg.TEST.NMS)
dets_NMSed = cls_dets[keep, :]
'''
if cfg.TEST.BBOX_VOTE:
cls_dets = bbox_vote(dets_NMSed, cls_dets)
else:
cls_dets = dets_NMSed
'''
cls_dets = dets_NMSed
#--------------- _cg_ added upper body --------------------
inds = np.where(scores_upper_body[:, j] > thresh)[0]
cls_scores_upper_body = scores_upper_body[inds, j]
cls_boxes_upper_body = boxes_upper_body[inds, j*4:(j+1)*4]
cls_dets_upper_body = np.hstack((cls_boxes_upper_body,
cls_scores_upper_body[:, np.newaxis])) \
.astype(np.float32, copy=False)
keep = nms(cls_dets_upper_body, cfg.TEST.NMS)
dets_NMSed = cls_dets_upper_body[keep, :]
cls_dets_upper_body = dets_NMSed
#--------------- end _cg_ added upper body --------------------
if vis:
vis_detections(im, imdb.classes[j], cls_dets)
all_boxes[j][i] = cls_dets
all_boxes[j + 1][i] = cls_dets_upper_body
'''
# Limit to max_per_image detections *over all classes*
if max_per_image > 0:
image_scores = np.hstack([all_boxes[j][i][:, -1]
for j in xrange(1, imdb.num_classes)])
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image]
for j in xrange(1, imdb.num_classes):
keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
all_boxes[j][i] = all_boxes[j][i][keep, :]
'''
_t['misc'].toc()
print 'im_detect: {:d}/{:d} net {:.3f}s preproc {:.3f}s postproc {:.3f}s misc {:.3f}s' \
.format(i + 1, num_images, _t['im_net'].average_time,
_t['im_preproc'].average_time, _t['im_postproc'].average_time,
_t['misc'].average_time)
det_file = os.path.join(output_dir, 'detections.pkl')
with open(det_file, 'wb') as f:
cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL)
# print 'Evaluating detections'
# imdb.evaluate_detections(all_boxes, output_dir)
| [
"[email protected]"
] | |
13279672b8c47331a37e9052b40787fc939702ac | 5b85703aa0dd5a6944d99370a5dde2b6844517ec | /03.Python/15.ZerosandOnes.py | 4d5e2053608bce9ef159ceccd2e274087611e083 | [] | no_license | alda07/hackerrank | 255329196e6a4b9d598c3f51790caf4a99a755bc | a09091f859e87462c95ee856cbbd0ad9b5992159 | refs/heads/master | 2021-10-24T07:38:34.795632 | 2019-03-23T17:29:32 | 2019-03-23T17:29:32 | 90,329,292 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | # zeros
# import numpy
# print (numpy.zeros((1, 2)))
# print (numpy.zeros((1, 2), dtype = numpy.int))
# ones
# import numpy
# print (numpy.ones((1, 2)))
# print (numpy.ones((1, 2), dtype = numpy.int))
import numpy
list_i = list(map(int,input().split()))
print(numpy.zeros(list_i, dtype = numpy.int))
print(numpy.ones(list_i, dtype = numpy.int))
| [
"[email protected]"
] | |
dd7c42bf3677ff4d5c0535593c8a3d205b5bbb4f | 9404a8593ff2d82133897c9e187523d301df7888 | /0x09-Unittests_and_integration_tests/client.py | 09fe617f4bf9b728195056ec7874888a22e52d18 | [] | no_license | emna7/holbertonschool-web_back_end | ac2bc16e47f464530c4dee23497488c77377977e | 744e6cb3bb67b2caa30f967708243b5474046961 | refs/heads/main | 2023-03-06T17:56:10.699982 | 2021-02-12T21:24:04 | 2021-02-12T21:24:04 | 305,394,170 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,473 | py |
#!/usr/bin/env python3
"""A github org client
"""
from typing import (
List,
Dict,
)
from utils import (
get_json,
access_nested_map,
memoize,
)
class GithubOrgClient:
"""A Githib org client
"""
ORG_URL = "https://api.github.com/orgs/{org}"
def __init__(self, org_name: str) -> None:
"""Init method of GithubOrgClient"""
self._org_name = org_name
@memoize
def org(self) -> Dict:
"""Memoize org"""
return get_json(self.ORG_URL.format(org=self._org_name))
@property
def _public_repos_url(self) -> str:
"""Public repos URL"""
return self.org["repos_url"]
@memoize
def repos_payload(self) -> Dict:
"""Memoize repos payload"""
return get_json(self._public_repos_url)
def public_repos(self, license: str = None) -> List[str]:
"""Public repos"""
json_payload = self.repos_payload
public_repos = [
repo["name"] for repo in json_payload
if license is None or self.has_license(repo, license)
]
return public_repos
@staticmethod
def has_license(repo: Dict[str, Dict], license_key: str) -> bool:
"""Static: has_license"""
assert license_key is not None, "license_key cannot be None"
try:
has_license = access_nested_map(repo, ("license", "key")) == license_key
except KeyError:
return False
return has_license
| [
"[email protected]"
] | |
ac60830bcb8ab8d05d3b4a995a1b9e7f2e93a2fa | 2f2e9cd97d65751757ae0a92e8bb882f3cbc5b5b | /665.非递减数列.py | 69ccfdcba89fb3679104b70233a147b4b5ee3c0d | [] | no_license | mqinbin/python_leetcode | 77f0a75eb29f8d2f9a789958e0120a7df4d0d0d3 | 73e0c81867f38fdf4051d8f58d0d3dc245be081e | refs/heads/main | 2023-03-10T18:27:36.421262 | 2021-02-25T07:24:10 | 2021-02-25T07:24:10 | 314,410,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 594 | py | #
# @lc app=leetcode.cn id=665 lang=python3
#
# [665] 非递减数列
#
# @lc code=start
class Solution:
def checkPossibility(self, nums: List[int]) -> bool:
c = 0
for i in range(len(nums) -1):
if nums[i] > nums[i+1]:
c +=1
if i > 0 :
if nums[i-1] <= nums[i+1]:
nums[i] = nums[i-1]
else :
nums[i+1] = nums[i]
if c > 1:
return False
return True
# @lc code=end
| [
"[email protected]"
] | |
27d214b5b033cb21e812b5568854396b459d8ab9 | bdd40ea113fdf2f04ef7d61a096a575322928d1d | /Rupesh/DjangoTutorial/TOdo/TOdo/Task/migrations/0002_auto_20200219_0600.py | 56b743b8b63b2342fd7f88303c0256f187fcae5f | [] | no_license | rupesh7399/rupesh | 3eebf924d33790c29636ad59433e10444b74bc2f | 9b746acf37ab357c147cdada1de5458c5fc64f53 | refs/heads/master | 2020-12-22T05:01:29.176696 | 2020-03-03T10:32:36 | 2020-03-03T10:32:36 | 202,111,967 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 357 | py | # Generated by Django 2.2 on 2020-02-19 06:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Task', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='task',
name='lastDate',
field=models.DateField(),
),
]
| [
"[email protected]"
] | |
93218602b7a30997b8ff0defd0e336e8bd93427c | 07acf11fadb7fbbf342e1f5005c9197d36b79f56 | /aliyun-python-sdk-vod/aliyunsdkvod/request/v20170321/DescribeUserAvgTimeByDayRequest.py | 9a798b69525f04e4ccee97970a5bb855ed2ec093 | [
"Apache-2.0"
] | permissive | ccflying/aliyun-openapi-python-sdk | 9ce8d43a39b8fa10a78fdf4f4831befbfc48ad4e | 2ddb938c366c06526deeec0ec46f8266235e52f6 | refs/heads/master | 2020-04-19T09:18:02.597601 | 2019-01-29T05:02:01 | 2019-01-29T05:02:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,822 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DescribeUserAvgTimeByDayRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'vod', '2017-03-21', 'DescribeUserAvgTimeByDay','vod')
def get_VideoType(self):
return self.get_query_params().get('VideoType')
def set_VideoType(self,VideoType):
self.add_query_param('VideoType',VideoType)
def get_SecurityToken(self):
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self,SecurityToken):
self.add_query_param('SecurityToken',SecurityToken)
def get_EndTime(self):
return self.get_query_params().get('EndTime')
def set_EndTime(self,EndTime):
self.add_query_param('EndTime',EndTime)
def get_StartTime(self):
return self.get_query_params().get('StartTime')
def set_StartTime(self,StartTime):
self.add_query_param('StartTime',StartTime)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId) | [
"[email protected]"
] | |
adbedc8206330810c70cdc570b3140a5f632e51e | f7e5a206c5ca75043b662c8f9905a070b7e37060 | /cart/views.py | 54f1f59dc1f21f1a4b7c6b04e842911c7f358e15 | [] | no_license | sweetmentor/Str4-eCommerce-App | 4d22945f7b5dc0a40b577b8ed45caf22c9e644d4 | e50edff9183a207c8e7daff16136059fcb5f9002 | refs/heads/master | 2020-03-22T00:26:36.973580 | 2019-01-29T01:13:56 | 2019-01-29T01:13:56 | 139,244,613 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,193 | py | from django.shortcuts import render, redirect, get_object_or_404, HttpResponse
from product.models import Product
from .utils import get_cart_items_and_total
# Create your views here.
def view_cart(request):
cart = request.session.get('cart', {})
context = get_cart_items_and_total(cart)
return render(request, "cart/cart.html", context)
def remove_from_cart(request):
id = request.POST['product_id']
product = get_object_or_404(Product, pk=id)
cart = request.session.get('cart', {})
if id in cart:
# Subtract 1 from the quantity
cart[id] -= 1
# If the quantity is now 0, then delete the item
if cart[id] == 0:
del cart[id]
request.session['cart'] = cart
return redirect('view_cart')
def add_to_cart(request):
# Get the product we're adding
id = request.POST['product_id']
product = get_object_or_404(Product, pk=id)
# Get the current Cart
cart = request.session.get('cart', {})
# Update the Cart
cart[id] = cart.get(id, 0) + 1
# Save the Cart back to the session
request.session['cart'] = cart
# Redirect somewhere
return redirect("/") | [
"[email protected]"
] | |
42d2ccd0a08c1520cae02783637eee771aedda4f | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_196/ch31_2020_03_14_15_42_06_957078.py | 7229a92343174b1d0b472e5e5af883e664d7d8d9 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | def eh_primo(a):
if a == 2:
return True
x=1
elif (a%2 == 0) or (a%x == 0):
x+=2
return False
elif (a==0) or (a==1):
return False
else:
return True | [
"[email protected]"
] | |
91ad79fe802f8441997c7574f787866330f8fdaf | 7a0334693cd31fe4fdef06324ede0d72c6530c40 | /event_crud/migrations/0001_initial.py | 414c9f942def602edac8017b35088131a4404944 | [] | no_license | lilitotaryan/eventnet-back-end | 7949668a4108b36a6e1a2f6439d6e966991d64ba | 5828b1520b8feeb363fdac0b85b08e001572991e | refs/heads/main | 2023-02-18T02:24:45.475978 | 2021-01-22T18:15:42 | 2021-01-22T18:15:42 | 332,027,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,402 | py | # Generated by Django 2.2.5 on 2020-02-26 19:01
import authentication.utils
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('user_crud', '0005_remove_customuser_phone_number'),
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(default=None, max_length=100)),
('description', models.CharField(default=None, max_length=500)),
('date', models.DateTimeField(default=authentication.utils.get_current_time)),
('is_responsible', models.BooleanField(default=False)),
('contact_phone_number', models.CharField(default=None, max_length=100, unique=True)),
('address', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, to='user_crud.Address')),
('categories', models.ManyToManyField(blank=True, to='user_crud.Category')),
('users', models.ManyToManyField(blank=True, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
] | |
122a05dc3115f6ed66c2747d3dc1e78c44cd4955 | 52e0e1ef7675d8bac51899f23b2722e7e7f58992 | /core/data/base_collector.py | 972479bf987185887d7e79d61ee4b166286f1b46 | [
"Apache-2.0"
] | permissive | knowmefly/DI-drive | 2c8963a04d00aa8b3c3354630b6df9e3e6a6770e | ade3c9dadca29530f20ab49b526ba32818ea804b | refs/heads/main | 2023-07-08T14:40:39.625522 | 2021-07-21T15:54:48 | 2021-07-21T15:54:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,420 | py | import copy
from abc import abstractmethod
from typing import Any, Dict
from easydict import EasyDict
from ding.utils import EasyTimer
class BaseCollector(object):
config = dict()
def __init__(
self,
cfg: Dict,
env: Any = None,
policy: Any = None,
) -> None:
if 'cfg_type' not in cfg:
self._cfg = self.__class__.default_config()
self._cfg.update(cfg)
else:
self._cfg = cfg
self._end_flag = False
self._timer = EasyTimer()
if env is not None:
self.env = env
if policy is not None:
self.policy = policy
@property
def env(self) -> Any:
return self._env
@env.setter
def env(self, _env: Any) -> None:
self._env = _env
@property
def policy(self) -> Any:
return self._policy
@policy.setter
def policy(self, _policy: Any) -> None:
self._policy = _policy
@abstractmethod
def reset(self) -> Any:
raise NotImplementedError
@abstractmethod
def close(self) -> Any:
raise NotImplementedError
@abstractmethod
def collect(self) -> Any:
raise NotImplementedError
@classmethod
def default_config(cls: type) -> EasyDict:
cfg = EasyDict(cls.config)
cfg.cfg_type = cls.__name__ + 'Config'
return copy.deepcopy(cfg)
| [
"[email protected]"
] | |
7b2f3ffb266a6b73b251aa0bed91d044d1201bd4 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03162/s990109089.py | 40f2258c0867493398fd6c13585706e99574813b | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | n=int(input())
happines=[list(map(int,input().split())) for _ in range(n)]
solution=[[0,0,0] for _ in range(n)]
solution[0][0]=happines[0][0]
solution[0][1]=happines[0][1]
solution[0][2]=happines[0][2]
for i in range(1,n):
for j in range(3):
solution[i][j]=happines[i][j]+max(solution[i-1][(j+1)%3],solution[i-1][(j+2)%3])
print(max(solution[-1])) | [
"[email protected]"
] | |
e3b3126e6676609e20aa10a8b485b3a059b0fd77 | 8787b2fbb5017b61dcf6075a5261071b403847bf | /Programmers/N으로 표현.py | 21d160641aee1be033211795680b2a0e5c76564b | [] | no_license | khw5123/Algorithm | a6fe0009e33289813959553c2366d77c93d7b4b9 | 323a829f17a10276ab6f1aec719c496a3e76b974 | refs/heads/master | 2023-01-02T00:12:21.848924 | 2020-10-23T06:37:41 | 2020-10-23T06:37:41 | 282,162,235 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,202 | py | def calc(s, N, number):
result = 0
start = 0
tmp = ''
for i in range(len(s)):
if s[i] != str(N):
start = i
result = int(tmp)
break
else:
tmp += s[i]
tmp = ''
operator = []
for i in range(start, len(s)):
if s[i] == str(N):
tmp += s[i]
if i == len(s) - 1 and len(operator) != 0:
if operator[0] == '+':
result += int(tmp)
elif operator[0] == '-':
result -= int(tmp)
elif operator[0] == '*':
result *= int(tmp)
elif operator[0] == '/':
result //= int(tmp)
else:
if len(operator) == 1:
if operator[0] == '+':
result += int(tmp)
elif operator[0] == '-':
result -= int(tmp)
elif operator[0] == '*':
result *= int(tmp)
elif operator[0] == '/':
result //= int(tmp)
tmp = ''
operator.pop()
operator.append(s[i])
return result
def solve(s, N, number):
answer = 9
if s.count(str(N)) < 9:
if s[-1] == str(N):
if eval(''.join(s)) == number or calc(s, N, number) == number:
answer = min(answer, s.count(str(N)))
s.append(str(N))
answer = min(answer, solve(s, N, number))
s.pop()
if s[-1] != '+' and s[-1] != '-' and s[-1] != '*' and s[-1] != '/':
s.append('+')
answer = min(answer, solve(s, N, number))
s.pop()
s.append('-')
answer = min(answer, solve(s, N, number))
s.pop()
s.append('*')
answer = min(answer, solve(s, N, number))
s.pop()
s.append('/')
answer = min(answer, solve(s, N, number))
s.pop()
return answer
return answer
def solution(N, number):
answer = solve([str(N)], N, number)
return -1 if answer == 9 else answer | [
"[email protected]"
] | |
2edda813a68b94ffdf1c3d6201c1cff73d0ddad3 | aaad70e69d37f92c160c07e4ca03de80becf2c51 | /filesystem/usr/lib/python3.6/asyncio/base_events.py | 32b4f0adcd1093409fe44dc22121f8affc046568 | [] | no_license | OSWatcher/ubuntu-server | 9b4dcad9ced1bff52ec9cdb4f96d4bdba0ad3bb9 | 17cb333124c8d48cf47bb9cec1b4e1305626b17a | refs/heads/master | 2023-02-10T18:39:43.682708 | 2020-12-26T01:02:54 | 2020-12-26T01:02:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | {
"MIME": "text/plain",
"inode_type": "REG",
"magic_type": "Python script, ASCII text executable",
"mode": "-rw-r--r--",
"sha1": "23f1c44c157099ef5d66c87ba91eb7128afa4867"
} | [
"[email protected]"
] | |
2ad7f8907bd282c066e9db3e2553e053f204e9a8 | a70778e730f6d3e3be04ba449e6ed0a9ff7d7e6d | /classifier_5b_rough_fine_tune_from3z.py | 4e132cf8513dd1cd901bd4a0c5a2f1a6c88b44fc | [] | no_license | previtus/two_classes_ml | 0351e62544cc46f9c09847de641fd84aac94d38b | 0f780e2e3736e6280dddd25540911d60c9d721d8 | refs/heads/master | 2021-05-10T10:05:38.526602 | 2018-08-06T19:59:26 | 2018-08-06T19:59:26 | 118,946,345 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,192 | py |
img_size = None #(20,20)
img_size = (150,150)
epochs_first = 10
epochs_second = 40
batch_size = 16
validation_split = 0.3
RESCALE = 1. / 255 # put data from 0-255 into 0-1
# GET ALL DATA
# define the classes in here directly
from data_handling import LOAD_DATASET, LOAD_DATASET_VAL_LONGER_THR2, sample_random_subset_from_list, y_from_x
from data_handling import load_images_with_keras, convert_labels_to_int, convert_back_from_categorical_data, how_many_are_in_each_category
TRAIN_WITH_LONGER_THAN = 1000
TRAIN_C_balanced = 5000
SPLIT = 0.3 # 70% and 30%
FOLDER = 'chillan_saved_images_square_224_ALL_with_len'
folders = ['data/'+FOLDER+'/LP/', 'data/'+FOLDER+'/TR/', 'data/'+FOLDER+'/VT/']
VAL_ONLY_LONGER_THR2 = 1000
BalancedVal = False
StillBalance10to1to1 = True
X_TRAIN_BAL, X_VAL_FULL = LOAD_DATASET_VAL_LONGER_THR2(
TRAIN_WITH_LONGER_THAN, TRAIN_C_balanced, SPLIT, FOLDER, folders, VAL_ONLY_LONGER_THR2,
BalancedVal=BalancedVal,StillBalance10to1to1 = StillBalance10to1to1)
specialname = '__Finetuned'
classes_names = ['LP', 'TR', 'VT']
num_classes = len(classes_names)
labels_texts = classes_names
labels = [0, 1, 2]
DROP=0.2
SUBSET_FOR_TRAIN = 8000
SUBSET_FOR_VAL = 8000
############ Whats bellow doesn't have to be changed dramatically
X_TRAIN_BAL,_ = sample_random_subset_from_list(X_TRAIN_BAL, SUBSET_FOR_TRAIN)
Y_TRAIN_BAL = y_from_x(X_TRAIN_BAL)
X_VAL,_ = sample_random_subset_from_list(X_VAL_FULL, SUBSET_FOR_VAL)
Y_VAL = y_from_x(X_VAL)
from keras.preprocessing.image import load_img, img_to_array
import numpy as np
import keras
from matplotlib import pyplot as plt
print("Loading image data!")
# X_TRAIN_BAL, Y_TRAIN_BAL
x_train = load_images_with_keras(X_TRAIN_BAL, target_size=img_size)
y_train = convert_labels_to_int(Y_TRAIN_BAL, classes_names, labels)
y_train = keras.utils.to_categorical(y_train, num_classes=num_classes)
# X_VAL, Y_VAL
x_test = load_images_with_keras(X_VAL, target_size=img_size)
y_test = convert_labels_to_int(Y_VAL, classes_names, labels)
y_test = keras.utils.to_categorical(y_test, num_classes=num_classes)
print("x_train:", x_train.shape)
print("y_train:", y_train.shape)#, y_train[0:10])
print("x_test:", x_test.shape)
print("y_test:", y_test.shape)#, y_test[0:10])
print("---")
print("SanityCheck Test dist:")
how_many_are_in_each_category(convert_back_from_categorical_data(y_test))
print("SanityCheck Train dist:")
how_many_are_in_each_category(convert_back_from_categorical_data(y_train))
print("---")
x_train *= RESCALE
x_test *= RESCALE
# =============================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ROUGH
from keras import optimizers
from keras.applications import VGG16
vgg_conv = VGG16(weights='imagenet', include_top=False, input_shape=(img_size[0], img_size[1], 3))
print("calculating high lvl features...")
X_bottleneck_train = vgg_conv.predict(x_train)
X_bottleneck_test = vgg_conv.predict(x_test)
print("X_bottleneck_train:", X_bottleneck_train.shape)
print("y_test:", y_train.shape)#, y_train[0:10])
print("X_bottleneck_test:", X_bottleneck_test.shape)
print("y_test:", y_test.shape)#, y_test[0:10])
print("---")
print("train_data.shape[1:]", X_bottleneck_train.shape[1:])
from keras.models import Sequential
from keras.layers import Dropout, Flatten, Dense
classifier_model = Sequential()
classifier_model.add(Flatten(input_shape=X_bottleneck_train.shape[1:]))
classifier_model.add(Dense(256, activation='relu'))
classifier_model.add(Dropout(0.5))
classifier_model.add(Dense(num_classes, activation='sigmoid'))
print("FIRST ROUGH MODEL:")
classifier_model.summary()
#classifier_model.compile(loss='categorical_crossentropy', optimizer=optimizers.RMSprop(lr=1e-4),metrics=['accuracy'])
classifier_model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
# ==============================================================================
# TRAIN 1
# ==============================================================================
#
history1 = classifier_model.fit(X_bottleneck_train, y_train,
batch_size=batch_size,
epochs=epochs_first,
validation_data=(X_bottleneck_test, y_test),
verbose=1)
# Works well, gets us till cca 96% even in 10 epochs (possibly even 5)
# ==============================================================================
# ==============================================================================
# Freeze the layers except the last 4 layers
for layer in vgg_conv.layers[:-4]:
layer.trainable = False
# Check the trainable status of the individual layers
for layer in vgg_conv.layers:
print(layer, layer.trainable)
from keras import models
from keras import layers
# Create the model
fine_model = models.Sequential()
fine_model.add(vgg_conv)
fine_model.add(classifier_model)
print("SECOND FINE MODEL:")
fine_model.summary()
# Compile the model
# TRY other?
#fine_model.compile(loss='categorical_crossentropy', optimizer=optimizers.RMSprop(lr=1e-4),metrics=['accuracy'])
# clip norm didnt help with loss: nan
#fine_model.compile(loss='categorical_crossentropy', optimizer=optimizers.RMSprop(lr=1e-4, clipnorm=1.),metrics=['accuracy'])
#model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) # default lr lr=0.001
# TRY
sgd = optimizers.SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
fine_model.compile(loss='categorical_crossentropy', optimizer=sgd,metrics=['accuracy'])
# ==============================================================================
# TRAIN 2
# ==============================================================================
#
history2 = fine_model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs_second,
validation_data=(x_test, y_test),
verbose=1)
# Whoops, sudden drop to loss: nan
# ==============================================================================
# REPORT
# ==============================================================================
#
#print(history1.history)
#print(history2.history)
split_n = len(history1.history['val_loss'])
# val_loss', 'val_acc', 'loss', 'acc
history1.history['val_loss'] += history2.history['val_loss']
history1.history['val_acc'] += history2.history['val_acc']
history1.history['loss'] += history2.history['loss']
history1.history['acc'] += history2.history['acc']
from visualize_history import visualize_history
plt = visualize_history(history1.history, show_also='acc', show=False, save=False)
#visualize_history(history2.history, show_also='acc', save=False, save_path='classifier5b_'+str(epochs)+'epochs_')
plt.axvline(x=split_n-0.5, linestyle='dashed', color='black')
filename = 'classifier5b_CHILL_'+str(epochs_first)+'+'+str(epochs_second)+'epochs_'
plt.savefig(filename)
plt.show()
fine_model.save('5b_final_fine_model.h5')
| [
"[email protected]"
] | |
8871896d5379ec5750e6fb6433622c846811c30b | b8fed8222b41e447cd5ce83513eb4d014c01742b | /ad_report_salesadmin/po/po_form.py | ae2a831ae88665d254b25eafbddb16d0e61cf761 | [] | no_license | lajayuhniyarsyah/ERP-Supra | e993d8face6e022b6f863d1dff7cb51cda36be8d | 5a64dbb57ee40070354926700091fb9025c1350c | refs/heads/master | 2021-01-25T22:09:46.306990 | 2017-11-08T05:32:04 | 2017-11-08T05:32:04 | 23,605,825 | 0 | 10 | null | 2017-11-08T05:32:05 | 2014-09-03T03:58:28 | Python | UTF-8 | Python | false | false | 2,035 | py | import time
from report import report_sxw
from osv import osv,fields
from report.render import render
#from ad_num2word_id import num2word
import pooler
#from report_tools import pdf_fill,pdf_merge
from tools.translate import _
import tools
from tools.translate import _
import decimal_precision as dp
#from ad_amount2text_idr import amount_to_text_id
from tools import amount_to_text_en
class po_form(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(invoice_form, self).__init__(cr, uid, name, context=context)
#if self.pool.get('sale.order').browse(cr, uid, context['active_ids'])[0].state <> 'approved':
# raise osv.except_osv(_('Can not Print PO Form !'), _('You can not Print PO Form If State not Approved'))
#
# self.line_no = 0
self.localcontext.update({
'get_object':self._get_object,
# 'time': time,
# 'convert':self.convert,
# 'get_company_address': self._get_company_address,
# #'angka':self.angka,
## 'alamat': self.alamat_npwp,
# 'convert':self.convert,
# 'charge':self.charge,
## 'nourut': self.no_urut,
## 'get_ppn': self.get_ppn,
# 'line_no':self._line_no,
# 'blank_line':self.blank_line,
# 'blank_line_rfq':self.blank_line_rfq,
# 'get_grand_total':self.get_grand_total,
# 'get_internal':self._get_internal,
# 'sum_tax':self._sum_tax,
# 'get_curr2':self.get_curr,
# 'get_invoice':self._get_invoice,
# 'get_curr':self._get_used_currency,
})
def _get_object(self,data):
obj_data=self.pool.get(data['model']).browse(self.cr,self.uid,[data['id']])
# seq=obj_data[0].print_seq
# seq+=1
# obj_data[0].write({'print_seq':seq})
return obj_data
report_sxw.report_sxw('report.po.form', 'purchase.order', 'ad_report_salesadmin/po/po_form.mako', parser=po_form,header=False)
| [
"[email protected]"
] | |
f040cc2c3bcc0b27174802337d61601ed34c13a6 | 38c1e589388752100c4afcbe0b445bfff033bab2 | /friend/migrations/0003_auto_20200819_1444.py | 0bf415da2081910e1e2d42a9465ac80b351f2e6a | [] | no_license | ruhullahil/Codingwithmitch-Chat | 02c83f17fd51329fb3e4c0af74f1890ffd7ac012 | dd854e6357e98684c3fe7c87da028de1f356030b | refs/heads/master | 2023-01-03T00:38:38.225127 | 2020-10-29T21:09:37 | 2020-10-29T21:09:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 538 | py | # Generated by Django 2.2.15 on 2020-08-19 21:44
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('friend', '0002_auto_20200819_1443'),
]
operations = [
migrations.AlterField(
model_name='friendlist',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='user', to=settings.AUTH_USER_MODEL),
),
]
| [
"[email protected]"
] | |
7c3de6ac23a5796d7675e6ed3bf8151de5a1c8c6 | a6b6294dd573e7a8429f6e1817a0598c7b315c5e | /examples/finance_vix.py | d0a1e8139e68366c05b1e389003532561c2be261 | [
"MIT"
] | permissive | openknowledge-archive/datapackage-bigquery-py | 4bef9c960c4efc9131d4673fab1f999f5ae09221 | f1d822a1846eac4cfcdfd0f9e94bc27d2458f00b | refs/heads/master | 2021-05-31T09:52:09.884572 | 2016-01-30T16:23:02 | 2016-01-30T16:23:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 577 | py | # -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import sys
from pprint import pprint
sys.path.insert(0, '.')
from examples.base import run
# Fixtures
dataset = 'datapackage'
prefix = 'finance_vix_%s_%s_' % (sys.version_info.major, sys.version_info.minor)
source = 'examples/packages/finance-vix/datapackage.json'
target = 'tmp/packages/finance-vix/datapackage.json'
# Execution
if __name__ == '__main__':
run(dataset, prefix, source, target)
| [
"[email protected]"
] | |
8a7d55caa81a5c81fa616f5f2ed3c6f0142efd0a | 69bc23a7baf65b276496d76d02645b5a76cfe083 | /thu_python_16/program3.py | f3332c8639d67bd315206d183acabe04cb6c234d | [] | no_license | pylinx64/thu_python_16 | a12e4ec8f82e6470b496116342b777e0a6676be1 | 617e75618a5a3117ec34278c41dadb38aa39fdb8 | refs/heads/main | 2023-04-23T03:17:31.347867 | 2021-05-05T10:09:57 | 2021-05-05T10:09:57 | 336,022,469 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 568 | py | #x=10
#print(x)
#y=10
#print(x+y)
#print('x+y')
#x=20
#z = 10
#print(x+y+z)
#x='Яготинское'
#k='молоко'
#print(x+' '+k)
#print('Яготинское'+' молоко')
#print(k * 143543543)
#print(11 > 10)
#print(8 > 9)
#print(9 != 9)
#print(9 == 9)
#x = 8
#y = 9
#print(x >= y)
#print('a' == 'a')
#print('с' == 'c')
#print('z' > 'a')
password = input('Введите пароль: ')
if 'abc123' == password:
print('Вход выполнен')
else:
print('Невход выполнен 404')
| [
"[email protected]"
] | |
d073cf0e510babb4c2329508f3b0d549e0cf3cec | 0bc2a2963cb72c09c0ec0b3e3b10911c7bc31342 | /examples/manila/script.py | a5f6b5d55011b15a3bcca5fbe09c09f48968cb7b | [] | no_license | cloudify-cosmo/cloudify-openstack-plugin | eb5730d0b75442e6a49069164fde03020dcca1de | 7d2cd4162897333adcaab4bd83361bbd369fcf17 | refs/heads/master | 2023-09-06T09:10:53.372638 | 2023-03-06T15:02:59 | 2023-03-06T15:02:59 | 18,327,738 | 19 | 75 | null | 2023-03-06T15:03:01 | 2014-04-01T11:52:24 | Python | UTF-8 | Python | false | false | 391 | py | # For development help:
from manilaclient import client
# Fill in with real values.
manila = client.Client(
client_version='2',
username='admin',
password='openstack',
project_name='demo',
auth_url='http://10.11.12.2/identity',
user_domain_name='Default',
project_domain_name='default')
share_networks = manila.share_networks.list()
shares = manila.shares.list()
| [
"[email protected]"
] | |
dd30c5254405af64ce994ba786c148924ddf521c | fd0194543a142c63812352e79c417e54a19d0cd5 | /Auxiliary_Scripts/Plot_Relocate.py | 7633b63d02c80b1e30093bd97aeca0eb93c5d1b2 | [] | no_license | mwilensky768/MJW-MWA | 2ac85b8f07577e3112c418595bf62902d720c3c2 | ebda1e273a401c88f014bc698743547ec86a6f35 | refs/heads/master | 2021-05-02T00:51:48.591198 | 2021-03-31T22:34:06 | 2021-03-31T22:34:06 | 78,403,875 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | import glob
import shutil
import os
plot_dir = '/Users/mike_e_dubs/MWA/Catalogs/Wenyang_Phase2/data_eva/unflagged/'
target_dir = '/Users/mike_e_dubs/MWA/Catalogs/Wenyang_Phase2/data_eva/frac_diff/'
plots = glob.glob('%s*__INS_frac_diff.png' % (plot_dir))
print(plots)
for plot in plots:
shutil.copy(plot, target_dir)
| [
"[email protected]"
] | |
344513f40b84e70156a271a556a0a7afa60bb84b | 6febc1719503d0f9dbc97f6b1202116370391b10 | /public_holiday/models/hr_holidays_inherited_model.py | fa5c2a57f2e8a69880f076eb808b1dbb72e214ac | [] | no_license | arshakil/Odoo-Development | 5c6a1795cd64a8ebef5abfdf7d6245804594bcd8 | df37f6e8c2f7d89cdbdb36d0a8fd501ef8bfe563 | refs/heads/master | 2022-12-11T05:17:12.123339 | 2020-07-28T07:38:58 | 2020-07-28T07:38:58 | 248,154,189 | 0 | 2 | null | 2022-12-08T03:51:50 | 2020-03-18T06:20:59 | Python | UTF-8 | Python | false | false | 3,029 | py | from odoo import models, fields, api, _
from odoo.exceptions import ValidationError
from datetime import date, datetime
from datetime import datetime, timedelta
class Hr_Holidays_inherited_Model(models.Model):
_inherit = 'hr.holidays'
public_holiday=fields.Float(string='Public Holiday In Between',compute='check_public_holiday')
@api.model
def create(self, vals):
holiday_status_id=vals['holiday_status_id']
# print ("vals date_from",vals['date_from'])
# print ('state', vals['state'])
# print ('holiday_status_id is called',holiday_status_id)
if vals['type'] == 'remove':
Is_check_hr_holidays_status= self.env['hr.holidays.status'].search([('id','=',holiday_status_id),('exclude_public_holidays','=',True)])
if Is_check_hr_holidays_status:
if vals['date_from'] and vals['date_to']:
count = 0;
start_date = datetime.strptime(vals['date_from'], '%Y-%m-%d %H:%M:%S').date()
end_date = datetime.strptime(vals['date_to'], '%Y-%m-%d %H:%M:%S').date()
range_of_dates = [start_date + timedelta(days=x) for x in range((end_date - start_date).days + 1)]
for public_holiday_date in range_of_dates:
check_public_holidays = self.env['public_holiday.public_holiday'].search([])
for pub_holiday in check_public_holidays:
if str(public_holiday_date)==pub_holiday.start:
count+=1
else:
pass
set_count=vals['number_of_days_temp']-float(count)
if vals['number_of_days_temp']<1:
vals['number_of_days_temp']=0
vals['public_holiday']=0
else:
vals['number_of_days_temp']=set_count
vals['public_holiday'] = float(count)
return super(Hr_Holidays_inherited_Model, self).create(vals)
else:
return super(Hr_Holidays_inherited_Model, self).create(vals)
@api.depends('date_from', 'date_to')
def check_public_holiday(self):
if self.date_from and self.date_to:
count = 0;
start_date = datetime.strptime(self.date_from, '%Y-%m-%d %H:%M:%S').date()
end_date = datetime.strptime(self.date_to, '%Y-%m-%d %H:%M:%S').date()
range_of_dates = [start_date + timedelta(days=x) for x in range((end_date - start_date).days + 1)]
for public_holiday_date in range_of_dates:
check_public_holidays = self.env['public_holiday.public_holiday'].search([])
for pub_holiday in check_public_holidays:
if str(public_holiday_date) == pub_holiday.start:
count += 1
else:
pass
self.public_holiday=count
| [
"[email protected]"
] | |
e938435982e4bca35a3bbaf1e7c4c35be18545a9 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/182/usersdata/265/105453/submittedfiles/diagonaldominante.py | 2a3603de9bfd26bb77e24473ed3f5a3d2574df4e | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | # -*- coding: utf-8 -*-
import numpy as np
def soma(A):
somalinhas=[]
for i in range (0,A.shape[0],1):
cont=0
for j in range (0,A,shape[1],1):
cont=cont+a[i,j]
somalinhas.append(cont)
return(somalinhas)
linhas=int(input('digite a quantidade de linhas: '))
a=np.zeros((linhas,linhas))
for i in range (0,a.shape[0],1):
for j in range (0,a.shape[1],1):
a[i,j]=float(input('digite os valores da matriz: '))
print(a)
print(diagonal(linhas)) | [
"[email protected]"
] | |
7e1ccc3c0c0d628fe5629e0ba6ef33d3b0101291 | bf0ecad5f2d9853944e6bbc1ab6160359e9a6803 | /blog/migrations/0001_initial.py | d30bdf5599f3883def76510678e4cb1d43d9f3c0 | [] | no_license | NiteshPidiparars/icoder-blog-post | 9addc53a83ec916c421ee16de7b04b8035be5d6b | 19c5a333faf520b4133a0fa5d5ccf37320ed4181 | refs/heads/master | 2023-05-28T01:41:11.114065 | 2021-06-16T07:05:48 | 2021-06-16T07:05:48 | 374,288,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 683 | py | # Generated by Django 3.2.4 on 2021-06-04 06:36
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('sno', models.AutoField(primary_key=True, serialize=False)),
('title', models.CharField(max_length=255)),
('author', models.CharField(max_length=14)),
('slug', models.CharField(max_length=130)),
('timeStamp', models.DateTimeField(blank=True)),
('content', models.TextField()),
],
),
]
| [
"[email protected]"
] | |
dde36e2eae98fd6ebba3dc430abdd47affdd0f65 | a3e7583b70584f62554c5969a9963ba79afd7ac3 | /check.py | b7630edc09bab8c7a639472f47604386f4a53a32 | [
"MIT"
] | permissive | foamliu/DeepRankIQA | 4f677a2fe1912b16cf4bbcc05c8571d46260711f | 7801cb4ff2c934a9d954ace9ad52600f96396125 | refs/heads/master | 2020-09-03T06:02:05.896210 | 2019-11-05T01:30:27 | 2019-11-05T01:30:27 | 219,402,631 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 717 | py | import os
import pickle
import cv2 as cv
from tqdm import tqdm
from config import data_file, image_folder
if __name__ == "__main__":
with open(data_file, 'rb') as f:
samples = pickle.load(f)
filenames = set()
for sample in tqdm(samples):
before = sample['before']
fullpath = os.path.join(image_folder, before)
img = cv.imread(fullpath)
assert (img is not None)
filenames.add(before)
after = sample['after']
fullpath = os.path.join(image_folder, before)
img = cv.imread(fullpath)
assert (img is not None)
filenames.add(after)
num_samples = len(list(filenames))
print('num_samples: ' + str(num_samples))
| [
"[email protected]"
] | |
fbcf5b7d508ad8a33e8e303f73759b7d5782c4e0 | 30a6975de792d613db836346ff758a7c0797d400 | /lldb/test/API/lang/swift/parseable_interfaces/shared/TestSwiftInterfaceNoDebugInfo.py | 075df0dd1fb74f689568a004da5f267648814dee | [
"NCSA",
"Apache-2.0",
"LLVM-exception"
] | permissive | WYK15/swift-Ollvm11 | 0a2aa1b216c8e3f38829ae16db846039e8de149e | b28dba1ebe1186790650c72d5e97d8b46f1bc6e0 | refs/heads/main | 2023-06-27T18:14:47.652175 | 2021-06-10T12:47:56 | 2021-06-10T12:47:56 | 367,350,198 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,467 | py | # TestSwiftInterfaceNoDebugInfo.py
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2019 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# -----------------------------------------------------------------------------
"""
Test that we load and handle swift modules that only have textual
.swiftinterface files -- i.e. no associated .swiftmodule file -- and no debug
info. The module loader should generate the .swiftmodule for any
.swiftinterface it finds unless it is already in the module cache.
"""
import glob
import lldb
from lldbsuite.test.lldbtest import *
from lldbsuite.test.decorators import *
import lldbsuite.test.lldbutil as lldbutil
import os
import os.path
import unittest2
class TestSwiftInterfaceNoDebugInfo(TestBase):
mydir = TestBase.compute_mydir(__file__)
@swiftTest
def test_swift_interface(self):
"""Test that we load and handle modules that only have textual .swiftinterface files"""
self.build()
self.do_test()
@swiftTest
def test_swift_interface_fallback(self):
"""Test that we fall back to load from the .swiftinterface file if the .swiftmodule is invalid"""
self.build()
# Install invalid modules in the build directory first to check we
# still fall back to the .swiftinterface.
modules = ['AA.swiftmodule', 'BB.swiftmodule', 'CC.swiftmodule']
for module in modules:
open(self.getBuildArtifact(module), 'w').close()
self.do_test()
@swiftTest
@skipUnlessPlatform(["macosx"])
def test_prebuilt_cache_location(self):
"""Verify the prebuilt cache path is correct"""
self.build()
log = self.getBuildArtifact("types.log")
self.runCmd('log enable lldb types -f "%s"' % log)
# Set a breakpoint in and launch the main executable so we load the
# ASTContext and log the prebuilt cache path
lldbutil.run_to_source_breakpoint(
self, "break here", lldb.SBFileSpec("main.swift"),
exe_name=self.getBuildArtifact("main"))
# Check the prebuilt cache path in the log output
prefix = 'Using prebuilt Swift module cache path: '
expected_suffix = os.path.join('macosx', 'prebuilt-modules')
found = False
with open(log, "r") as logfile:
for line in logfile:
if prefix in line:
self.assertTrue(line.rstrip().endswith(os.path.sep + expected_suffix), 'unexpected prebuilt cache path: ' + line)
found = True
break
self.assertTrue(found, 'prebuilt cache path log entry not found')
# Check the host toolchain has a prebuilt cache in the same subdirectory of its swift resource directory
prebuilt_path = os.path.join(self.get_toolchain(), 'usr', 'lib', 'swift', expected_suffix)
self.assertTrue(len(os.listdir(prebuilt_path)) > 0)
def get_toolchain(self):
sdkroot = self.get_sdkroot()
# The SDK root is expected to be wihin the Xcode.app/Contents
# directory. Drop the last path component from the sdkroot until we get
# up to that level.
self.assertTrue('{0}Contents{0}'.format(os.path.sep) in sdkroot)
contents = os.path.abspath(sdkroot)
while os.path.split(contents)[1] != 'Contents':
(contents, _) = os.path.split(contents)
# Construct the expected path to the default toolchain from there and
# check it exists.
toolchain = os.path.join(contents, 'Developer', 'Toolchains', 'XcodeDefault.xctoolchain')
self.assertTrue(os.path.exists(toolchain), 'no default toolchain?')
return toolchain
def get_sdkroot(self):
with open(self.getBuildArtifact("sdk-root.txt"), "r") as sdkroot:
return sdkroot.read().rstrip()
def setUp(self):
TestBase.setUp(self)
def do_test(self):
# The custom swift module cache location
swift_mod_cache = self.getBuildArtifact("MCP")
# Clear the swift module cache (populated by the Makefile build)
shutil.rmtree(swift_mod_cache)
self.assertFalse(os.path.isdir(swift_mod_cache),
"module cache should not exist")
# Update the settings to use the custom module cache location
self.runCmd('settings set symbols.clang-modules-cache-path "%s"'
% swift_mod_cache)
target = self.dbg.CreateTarget(self.getBuildArtifact("main"))
self.assertTrue(target, VALID_TARGET)
self.registerSharedLibrariesWithTarget(target, ['AA', 'BB', 'CC'])
# Set a breakpoint in and launch the main executable
lldbutil.run_to_source_breakpoint(
self, "break here", lldb.SBFileSpec("main.swift"),
exe_name=self.getBuildArtifact("main"))
# Check we are able to access the public fields of variables whose
# types are from the .swiftinterface-only dylibs
var = self.frame().FindVariable("x")
lldbutil.check_variable(self, var, False, typename="AA.MyPoint")
child_y = var.GetChildMemberWithName("y") # MyPoint.y is public
lldbutil.check_variable(self, child_y, False, value="0")
# MyPoint.x isn't public, but LLDB can find it through type metadata.
child_x = var.GetChildMemberWithName("x")
self.assertTrue(child_x.IsValid())
# Expression evaluation using types from the .swiftinterface only
# dylibs should work too
lldbutil.check_expression(
self, self.frame(), "y.magnitudeSquared", "404", use_summary=False)
lldbutil.check_expression(
self, self.frame(), "MyPoint(x: 1, y: 2).magnitudeSquared", "5",
use_summary=False)
# Check the swift module cache was populated with the .swiftmodule
# files of the loaded modules
self.assertTrue(os.path.isdir(swift_mod_cache), "module cache exists")
a_modules = glob.glob(os.path.join(swift_mod_cache, 'AA-*.swiftmodule'))
b_modules = glob.glob(os.path.join(swift_mod_cache, 'BB-*.swiftmodule'))
c_modules = glob.glob(os.path.join(swift_mod_cache, 'CC-*.swiftmodule'))
self.assertEqual(len(a_modules), 1)
self.assertEqual(len(b_modules), 1)
self.assertEqual(len(c_modules), 0)
# Update the timestamps of the modules to a time well in the past
for file in a_modules + b_modules:
make_old(file)
# Re-import module A and B
self.runCmd("expr import AA")
self.runCmd("expr import BB")
# Import C for the first time and check we can evaluate expressions
# involving types from it
self.runCmd("expr import CC")
lldbutil.check_expression(
self, self.frame(), "Baz.baz()", "23", use_summary=False)
# Check we still have a single .swiftmodule in the cache for A and B
# and that there is now one for C too
a_modules = glob.glob(os.path.join(swift_mod_cache, 'AA-*.swiftmodule'))
b_modules = glob.glob(os.path.join(swift_mod_cache, 'BB-*.swiftmodule'))
c_modules = glob.glob(os.path.join(swift_mod_cache, 'CC-*.swiftmodule'))
self.assertEqual(len(a_modules), 1,
"unexpected number of swiftmodules for A.swift")
self.assertEqual(len(b_modules), 1,
"unexpected number of swiftmodules for B.swift")
self.assertEqual(len(c_modules), 1,
"unexpected number of swiftmodules for C.swift")
# Make sure the .swiftmodule files of A and B were re-used rather than
# re-generated when they were re-imported
for file in a_modules + b_modules:
self.assertTrue(is_old(file),
"Swiftmodule file was regenerated rather than reused")
OLD_TIMESTAMP = 1390550700 # 2014-01-24T08:05:00+00:00
def make_old(file):
"""Sets the access and modified time of the given file to a time long past"""
os.utime(file, (OLD_TIMESTAMP, OLD_TIMESTAMP))
def is_old(file):
"""Checks the modified time of the given file matches the timestamp set my make_old"""
return os.stat(file).st_mtime == OLD_TIMESTAMP
| [
"[email protected]"
] | |
478dec05c29f554e8d1effc63ad7264f99e95538 | c236e0c3b34a81e75acb9591423b6aad9d6a22dd | /unitTestRunner.py | 451a956edaed0cee0386f60f3b60470f1b9a6a7c | [] | no_license | abhijeetdtu/heimcharge | 2cd68c9eaaf5b94206d310c8b8348133b5d4e77b | 569a9d22916808ba8b67169a2822a91e05a051e9 | refs/heads/master | 2021-06-06T02:55:29.913134 | 2019-06-11T03:07:20 | 2019-06-11T03:07:20 | 131,433,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | import unittest
from UnitTests.ChartPlotTest import *
from UnitTests.GeoOpsTest import *
from UnitTests.FileOpsTest import *
if __name__ == '__main__':
unittest.main(exit=False) | [
"[email protected]"
] | |
b23143408eae95819c6760c853c06db075ea9987 | d62e01ee1b50b8228d25736daceae0e822f3a0a0 | /examples/user_guide/add_tasks.py | e323c771d0a918d6b08a4f1fc852134d93c6e40d | [
"MIT"
] | permissive | dalg24/radical.entk | b6f34ae1b2075f638fbdfd5fc397ea4c0d87cb93 | 4aa68d8de7804e09ca64629035ccda0b79ac0b76 | refs/heads/master | 2020-04-03T17:25:37.548618 | 2018-10-16T12:06:30 | 2018-10-16T12:06:30 | 155,444,172 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,916 | py | from radical.entk import Pipeline, Stage, Task, AppManager
import os
# ------------------------------------------------------------------------------
# Set default verbosity
if os.environ.get('RADICAL_ENTK_VERBOSE') == None:
os.environ['RADICAL_ENTK_REPORT'] = 'True'
# Description of how the RabbitMQ process is accessible
# No need to change/set any variables if you installed RabbitMQ has a system
# process. If you are running RabbitMQ under a docker container or another
# VM, set "RMQ_HOSTNAME" and "RMQ_PORT" in the session where you are running
# this script.
hostname = os.environ.get('RMQ_HOSTNAME', 'localhost')
port = os.environ.get('RMQ_PORT', 5672)
if __name__ == '__main__':
# Create a Pipeline object
p = Pipeline()
# Create a Stage object
s = Stage()
for cnt in range(10):
# Create a Task object
t = Task()
t.name = 'my-task' # Assign a name to the task (optional, do not use ',' or '_')
t.executable = ['/bin/echo'] # Assign executable to the task
t.arguments = ['I am task %s'%cnt] # Assign arguments for the task executable
# Add the Task to the Stage
s.add_tasks(t)
# Add Stage to the Pipeline
p.add_stages(s)
# Create Application Manager
appman = AppManager(hostname=hostname, port=port)
# Create a dictionary describe four mandatory keys:
# resource, walltime, and cpus
# resource is 'local.localhost' to execute locally
res_dict = {
'resource': 'local.localhost',
'walltime': 10,
'cpus': 1
}
# Assign resource request description to the Application Manager
appman.resource_desc = res_dict
# Assign the workflow as a set or list of Pipelines to the Application Manager
# Note: The list order is not guaranteed to be preserved
appman.workflow = set([p])
# Run the Application Manager
appman.run()
| [
"[email protected]"
] | |
190402e6dd636bf2f4fa9578042f043ce51c8530 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /wtBko8Bc8o8Tmra3q_11.py | 93df75889df3f71ae56c4f1e24530b81e184ba11 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py |
def halflife_calculator(mass, hlife, n):
mass_left = mass/(2**n)
years = hlife * n
return [round(mass_left,3),years]
| [
"[email protected]"
] | |
008880df49eaa648acea8a9abf9ffaa149112098 | 85c0813d837b0e0f189020a52348db1deffb0b11 | /public/db/coupon_db.py | 80daaa213acc609f25eb27d5f2237e1696469652 | [] | no_license | reb00t2018/flask-reptiles | 2d49fb27e718e305a7127e05047d865a1e7a6157 | ac3832340219f922e3b9451c2e2b1e18773938c1 | refs/heads/master | 2020-07-03T03:05:56.991764 | 2019-06-11T01:19:14 | 2019-06-11T01:19:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,051 | py | # -*- coding: utf-8 -*-
__author__ = 'Apple'
from public.db.participle_db import DataBase_PD
class CouponDB(DataBase_PD):
def __init__(self):
super(CouponDB, self).__init__()
def save_coupon(self, coupon):
'''
保存一条商品信息到数据库
:param coupon:
:return:
'''
insert_sql = """
(insert into goods_goods(category_id,second_id,first_id,title, price, url, pic, brand,goods_desc,add_time)
values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s))
"""
old_coupon = self.is_has_by_name(coupon.title)
insert_data = (
coupon.category_id,coupon.second_id,coupon.first_id, coupon.title, coupon.price, coupon.url, coupon.pic
, coupon.brand,coupon.goods_desc,coupon.add_time
)
if not old_coupon:
return self.execute(insert_sql, insert_data)
else:
return False
def is_has_by_name(self,title):
'''
根据name查询是否有这个商品
:param title:
:return:
'''
sql = """
select 1 from goods_goods where title = %s
"""
return self.find_execute(sql, (title))
def save_ip(self,ip,time):
insert_sql = """
insert into goods_getip(ip,add_time) values (%s,%s)
"""
return self.execute(insert_sql, (ip,time))
def count_ip(self):
select_sql = """
select count(*) from goods_getip
"""
return self.find_execute(select_sql)
def delete_ip(self,getip):
delete_sql = """
DELETE FROM goods_getip WHERE id = {0}
"""
return self.execute(delete_sql.format(getip))
def sumip(self):
select_sql = """
select * from goods_getip
"""
return self.find_execute(select_sql,fetchone=False) | [
"[email protected]"
] | |
44c569b36803775a0c36187b8503777aef16b0ec | fa7790c45dbc1ee804011e9dff2d4ff424b9f3d6 | /Searching and Sorting/Counting sort/Implementation.py | 63a5b8f34c85215b5c59e2ea21c34fde374752dc | [] | no_license | KuroKousuii/Algorithms | bcdf75e58e20287e3162ef3302f8051604d7b7d6 | 3bf0250780c9d11c69546ca0da130fbbcada7e40 | refs/heads/main | 2023-05-31T07:41:07.399881 | 2021-06-16T09:32:16 | 2021-06-16T09:32:16 | 368,274,761 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,177 | py | # Python program for counting sort
# The main function that sort the given string arr[] in
# alphabetical order
def countSort(arr):
# The output character array that will have sorted arr
output = [0 for i in range(len(arr))]
# Create a count array to store count of inidividul
# characters and initialize count array as 0
count = [0 for i in range(256)]
# For storing the resulting answer since the
# string is immutable
ans = ["" for _ in arr]
# Store count of each character
for i in arr:
count[ord(i)] += 1
# Change count[i] so that count[i] now contains actual
# position of this character in output array
for i in range(256):
count[i] += count[i - 1]
# Build the output character array
for i in range(len(arr)):
output[count[ord(arr[i])] - 1] = arr[i]
count[ord(arr[i])] -= 1
# Copy the output array to arr, so that arr now
# contains sorted characters
for i in range(len(arr)):
ans[i] = output[i]
return ans
# Driver program to test above function
arr = "geeksforgeeks"
ans = countSort(arr)
print("Sorted character array is % s" % ("".join(ans))) | [
"[email protected]"
] | |
11c74340ab82e472305fd10a2cd5370c1dea9ffb | fb1a7534356941e763755838e9b06fede7a7d116 | /tests/test_metrics.py | 46fe3e1aeab2e0aed220e08069a18e20c0547717 | [
"Apache-2.0"
] | permissive | torkelo/graphite-api | e2417f0bddae9bcd0581272dc270bbe08a78d653 | 0fd1904b462c1cbbe99f531a365839647a01a7e1 | refs/heads/master | 2020-12-02T15:08:56.573796 | 2014-03-17T21:59:38 | 2014-03-17T21:59:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,363 | py | import os.path
import whisper
from . import TestCase, WHISPER_DIR
class MetricsTests(TestCase):
def _create_dbs(self):
for db in (
('test', 'foo.wsp'),
('test', 'bar', 'baz.wsp'),
):
db_path = os.path.join(WHISPER_DIR, *db)
os.makedirs(os.path.dirname(db_path))
whisper.create(db_path, [(1, 60)])
def test_find(self):
url = '/metrics/find'
response = self.app.get(url)
self.assertEqual(response.status_code, 400)
response = self.app.get(url, query_string={'query': 'test'})
self.assertJSON(response, [])
response = self.app.get(url, query_string={'query': 'test',
'format': 'completer'})
self.assertJSON(response, {'metrics': []})
self._create_dbs()
response = self.app.get(url, query_string={'query': 'test.*',
'format': 'treejson'})
self.assertJSON(response, [{
'allowChildren': 1,
'expandable': 1,
'id': 'test.bar',
'leaf': 0,
'text': 'bar',
}, {
'allowChildren': 0,
'expandable': 0,
'id': 'test.foo',
'leaf': 1,
'text': 'foo',
}])
response = self.app.get(url, query_string={'query': 'test.*',
'format': 'treejson',
'wildcards': 1})
self.assertJSON(response, [{
'text': '*',
'expandable': 1,
'leaf': 0,
'id': 'test.*',
'allowChildren': 1,
}, {
'allowChildren': 1,
'expandable': 1,
'id': 'test.bar',
'leaf': 0,
'text': 'bar',
}, {
'allowChildren': 0,
'expandable': 0,
'id': 'test.foo',
'leaf': 1,
'text': 'foo',
}])
response = self.app.get(url, query_string={'query': 'test.*',
'format': 'completer'})
self.assertJSON(response, {'metrics': [{
'is_leaf': 0,
'name': 'bar',
'path': 'test.bar.',
}, {
'is_leaf': 1,
'name': 'foo',
'path': 'test.foo',
}]})
response = self.app.get(url, query_string={'query': 'test.*',
'wildcards': 1,
'format': 'completer'})
self.assertJSON(response, {'metrics': [{
'is_leaf': 0,
'name': 'bar',
'path': 'test.bar.',
}, {
'is_leaf': 1,
'name': 'foo',
'path': 'test.foo',
}, {
'name': '*',
}]})
def test_find_validation(self):
url = '/metrics/find'
response = self.app.get(url, query_string={'query': 'foo',
'wildcards': 'aaa'})
self.assertJSON(response, {'errors': {'wildcards': 'must be 0 or 1.'}},
status_code=400)
response = self.app.get(url, query_string={'query': 'foo',
'from': 'aaa',
'until': 'bbb'})
self.assertJSON(response, {'errors': {
'from': 'must be an epoch timestamp.',
'until': 'must be an epoch timestamp.',
}}, status_code=400)
response = self.app.get(url, query_string={'query': 'foo',
'format': 'other'})
self.assertJSON(response, {'errors': {
'format': 'unrecognized format: "other".',
}}, status_code=400)
def test_expand(self):
url = '/metrics/expand'
response = self.app.get(url)
self.assertJSON(response, {'errors':
{'query': 'this parameter is required.'}},
status_code=400)
response = self.app.get(url, query_string={'query': 'test'})
self.assertJSON(response, {'results': []})
self._create_dbs()
response = self.app.get(url, query_string={'query': 'test'})
self.assertJSON(response, {'results': ['test']})
response = self.app.get(url, query_string={'query': 'test.*'})
self.assertJSON(response, {'results': ['test.bar', 'test.foo']})
response = self.app.get(url, query_string={'query': 'test.*',
'leavesOnly': 1})
self.assertJSON(response, {'results': ['test.foo']})
response = self.app.get(url, query_string={'query': 'test.*',
'groupByExpr': 1})
self.assertJSON(response, {'results': {'test.*': ['test.bar',
'test.foo']}})
def test_expand_validation(self):
url = '/metrics/expand'
response = self.app.get(url, query_string={'query': 'foo',
'leavesOnly': 'bbb',
'groupByExpr': 'aaa'})
self.assertJSON(response, {'errors': {
'groupByExpr': 'must be 0 or 1.',
'leavesOnly': 'must be 0 or 1.',
}}, status_code=400)
def test_noop(self):
url = '/dashboard/find'
response = self.app.get(url)
self.assertJSON(response, {'dashboards': []})
url = '/dashboard/load/foo'
response = self.app.get(url)
self.assertJSON(response, {'error': "Dashboard 'foo' does not exist."},
status_code=404)
url = '/events/get_data'
response = self.app.get(url)
self.assertJSON(response, [])
def test_search(self):
url = '/metrics/search'
response = self.app.get(url, query_string={'max_results': 'a'})
self.assertJSON(response, {'errors': {
'max_results': 'must be an integer.',
'query': 'this parameter is required.'}}, status_code=400)
response = self.app.get(url, query_string={'query': 'test'})
self.assertJSON(response, {'metrics': []})
def test_search_index(self):
response = self.app.get('/metrics/search',
query_string={'query': 'collectd.*'})
self.assertJSON(response, {'metrics': []})
parent = os.path.join(WHISPER_DIR, 'collectd')
os.makedirs(parent)
for metric in ['load', 'memory', 'cpu']:
db = os.path.join(parent, '{0}.wsp'.format(metric))
whisper.create(db, [(1, 60)])
response = self.app.put('/index')
self.assertJSON(response, {'success': True, 'entries': 3})
response = self.app.get('/metrics/search',
query_string={'query': 'collectd.*'})
self.assertJSON(response, {'metrics': [
{'is_leaf': False, 'path': None},
{'is_leaf': True, 'path': 'collectd.cpu'},
{'is_leaf': True, 'path': 'collectd.load'},
{'is_leaf': True, 'path': 'collectd.memory'},
]})
| [
"[email protected]"
] | |
8afec5a7c9748873c1dbc65e7e67f6d025f33a9e | 51d098e7ac392556a6365fcf7d283546d1bc86cb | /pysswords/db/credential.py | 651901a52f61b2811503c8e9363f048ac638b221 | [
"MIT"
] | permissive | mauriciomelo/pysswords | 7dd632577b261aa198a618ca1d6d0faa825cb5e7 | e845475a2a37f6e5ac4fadbc821d89dad6971f1c | refs/heads/master | 2021-01-13T11:41:44.568197 | 2014-12-04T19:32:57 | 2014-12-04T19:32:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | from collections import namedtuple
Credential = namedtuple(
"Credential",
["name", "login", "password", "login_url", "description"]
)
| [
"[email protected]"
] | |
403e6aea999f4b8b857e6ebb24ff679f68a607af | 46e50a1bd93569a0f945b65b6e84c927fb256f53 | /mtpy/utils/modemMakeModel.py | 36128ecdce8a38fc29194a3dd6cc3fa86d367988 | [] | no_license | lucasc15/mt3py | d3afed2527b1bc49e4e4cd4ec1d24b5083a5942d | 734fc551e87cd30261e7d648d79d6d0e5885b85d | refs/heads/master | 2021-01-19T00:35:44.069960 | 2016-07-03T14:03:54 | 2016-07-03T14:03:54 | 62,498,631 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,929 | py | #!/usr/bin/env python
#build ModEM input Model from ModEM data file
import numpy as np
import sys,os
#==============================================================================
#plot model geometry
plot = True
# parameters:
n_xpadding = 10
n_ypadding = 6
#number of vertical padding layers is set to 3 !
#factor with which the padding stretches outside the central rectangle grid
padding_stretch = 1.2
n_layers = 45
#determine minimum block sizes
#used in the inner rectangle - constant widths
dx = 300
dy = 350
#region around stations discretised with these sizes
#outside, the grid steps will be extended exponentially
#the size of padding is determined by the numbers of cells as defined above
#number of trys to shift the grid for getting own cells for each station
n_maximum_gridshifts = 123
#depth of first layer
z0 = 50
#total model depth in meters
model_depth = 200000
#stretching factor for the whole model extension
model_extension_factor = 1
#starting resistivity value for homog. halfspace setup
rho0 = 100.
#define layered/1d model as input
inmodel1d = np.zeros((4,2))
inmodel1d[0] = 0,0.1
inmodel1d[1] = 250,100
inmodel1d[2] = 2000,10
inmodel1d[3] = 4000,1000
#inmodel1d = None
#==============================================================================
#allow rotation of the grid along a known geo electrical strike angle
# X,Y will be rotated to X',Y' with X' along strike
#rotation center is the midpoint of the station loactions
strike = 0.
#NOTE: if strike is set to a value !=0, the locations of the stations have to
#be adapted in the data file in the same way!!!
#==============================================================================
#name of datafile (to be handled as argument later on)
datafile = 'ModEMdata.dat'
#name of output model file
modelfile = 'THE_modelfile.rho'
#==============================================================================
#==============================================================================
#==============================================================================
outstring = ''
outstring += '# ModEM model generated with MTpy - layout read from datafile: {0}\n'.format(datafile)
Fin = open(datafile,'r')
data = Fin.readlines()
Fin.close()
coords = []
#read station coordinates
#start in line after header info, determined by starting character '>'
for dataline in data:
line = dataline.strip().split()
if (len(line) == 0) or line[0].strip()[0] in ['#','>']:
continue
try:
line = dataline.strip().split()
co = (float(line[4]),float(line[5]),float(line[6]))
coords.append(co)
except:
continue
# local, Cartesian coordinates:
coords = np.array(list(set(coords)))
if strike != 0:
original_coords = coords.copy()
cosphi = np.cos(strike/180.*np.pi)
sinphi = np.sin(strike/180.*np.pi)
RotMat = np.matrix(np.array([cosphi,sinphi,-sinphi,cosphi]).reshape(2,2))
center = (np.mean(coords[:,0]),np.mean(coords[:,1]))
rel_coords = coords[:,:2]
rel_coords[:,0] = coords[:,0] - center[0]
rel_coords[:,1] = coords[:,1] - center[1]
rotated_coords = np.dot(RotMat,np.matrix(rel_coords).T).T
rotated_coords[:,0] = rotated_coords[:,0] + center[0]
rotated_coords[:,1] = rotated_coords[:,1] + center[1]
coords[:,:2] = rotated_coords
#reduce grid to 2D - assuming all stations are at the surface
xmin = min(coords[:,0])
xmax = max(coords[:,0])
ymin = min(coords[:,1])
ymax = max(coords[:,1])
x_range = xmax - xmin
y_range = ymax - ymin
n_center_xblocks = int(x_range/dx) + 3
n_center_yblocks = int(y_range/dy) + 3
center_widthX = n_center_xblocks * dx
center_widthY = n_center_yblocks * dy
surplusX = center_widthX - x_range
surplusY = center_widthY - y_range
all_points_in_single_cell = False
n_shifts = 0
x_shifts = 0
y_shifts = 0
while all_points_in_single_cell is False:
#stop after a finite number of steps
if n_shifts > n_maximum_gridshifts:
break
shifting_fraction = np.sqrt(n_maximum_gridshifts) + 1
offset_x = x_shifts * dx/shifting_fraction
offset_y = y_shifts * dy/shifting_fraction
if n_shifts > 0:
print('{0} shift(s): x-offset {1} m - y-offset {2} m'.format(n_shifts,offset_x,offset_y))
center_x0 = xmin - surplusX/2. + offset_x
center_y0 = ymin - surplusY/2. + offset_y
grid_x_points = (np.arange(n_center_xblocks+1) * dx) + center_x0
grid_y_points = (np.arange(n_center_yblocks+1) * dy) + center_y0
station_cells = []
for idx_sta,co in enumerate(coords):
idx_x = np.argmin(np.abs(grid_x_points-co[0]))
if (grid_x_points-co[0])[idx_x] == 0:
# coordinate lies on a node line => need to shift
print('station coordinates lie on cell nodes')
break
#otherwise, shift the index to correspond with the row of blocks, if necessary:
if grid_x_points[idx_x] > co[0] :
idx_x -= 1
idx_y = np.argmin(np.abs(grid_y_points-co[1]))
if (grid_y_points-co[1])[idx_y] == 0:
# coordinate lies on a node line => need to shift
break
#otherwise, shift the index to correspond with the row of blocks, if necessary:
if grid_y_points[idx_y] > co[1] :
idx_y -= 1
#cells enumerated West->East first, then northwards
cell_index = idx_x * n_center_xblocks + idx_y
station_cells.append(cell_index)
if len(set(station_cells)) == len(coords):
all_points_in_single_cell = True
#shift the grid
x_shifts += 1
if x_shifts >= (shifting_fraction - 1):
x_shifts = 0
y_shifts += 1
n_shifts += 1
x_range = np.max(grid_x_points) - np.min(grid_x_points)
y_range = np.max(grid_y_points) - np.min(grid_y_points)
if all_points_in_single_cell < 1:
print('ERROR - cannot build grid having each station in a single cell!\n'\
'change the values for dx,dy or remove stations')
sys.exit()
#Now the inner grid is well distributed over the stations
#add padding to the sides:
grid_x_points = list(grid_x_points)
x_padding_widths = [dx]
for idx_pad in range(n_xpadding):
pad = x_padding_widths[-1] * padding_stretch
x_padding_widths.append(pad)
x_padding_widths.pop(0)
#extend the padding to at least the extent of the regular grid:
pad_ratio = np.sum(x_padding_widths)/(x_range * model_extension_factor)
if pad_ratio < 1:
x_padding_widths = np.array(x_padding_widths)/pad_ratio
#add the padding to the grid
for idx_pad in range(n_xpadding):
grid_x_points.insert(0,grid_x_points[0]-x_padding_widths[idx_pad])
grid_x_points.append(grid_x_points[-1]+x_padding_widths[idx_pad])
grid_y_points = list(grid_y_points)
y_padding_widths = [dy]
for idy_pad in range(n_ypadding):
pad = y_padding_widths[-1] * padding_stretch
y_padding_widths.append(pad)
y_padding_widths.pop(0)
#extend the padding to at least the extent of the regular grid:
pad_ratio = np.sum(y_padding_widths)/(y_range * model_extension_factor)
if pad_ratio < 1:
y_padding_widths = np.array(y_padding_widths)/pad_ratio
#add the padding to the grid
for idy_pad in range(n_ypadding):
grid_y_points.insert(0,grid_y_points[0]-y_padding_widths[idy_pad])
grid_y_points.append(grid_y_points[-1]+y_padding_widths[idy_pad])
xmin_padded = grid_x_points[0]
ymin_padded = grid_y_points[0]
# transfer the block coordinates into block widths
xblocks = []
for idx_x in range(len(grid_x_points)-1):
xblocks.append(grid_x_points[idx_x+1] - grid_x_points[idx_x])
yblocks = []
for idy_y in range(len(grid_y_points)-1):
yblocks.append(grid_y_points[idy_y+1] - grid_y_points[idy_y])
#---------------------------------------------------------------------
n_zpadding = 3
#build block depths:
n_layers_eff = n_layers - 1
#splitted uppermost layer
log_part_thickness = model_depth - (n_layers_eff-1) * z0
depths = np.logspace( np.log10(z0), np.log10(log_part_thickness), n_layers_eff ) + \
np.arange(n_layers_eff) * z0
depths = list(depths)
thicknesses = [z0/2.]
for i, layer in enumerate(depths):
if i == 0 :
t = layer/2.
else:
t = layer - depths[i-1]
thicknesses.append(t)
padding = [thicknesses[-1]*padding_stretch]
for idx_pad in range(n_zpadding-1):
padding.append(padding[-1]*padding_stretch)
total_padding = np.sum(padding)
pad_ratio = total_padding/model_depth
if pad_ratio < 1.5:
padding = list(np.array(padding)/pad_ratio*1.5)
if pad_ratio >2 :
padding = list(np.array(padding)/pad_ratio*2)
thicknesses.extend(padding)
grid_z_points = [0]
for t in thicknesses:
grid_z_points.append(grid_z_points[-1]+t)
#some information for the user:
print('\n\t Model set up - dimensions: {0:.1f}x{1:.1f}x{2:.1f} km^3 ({3}x{4}x{5} cells)\n'.format(
(grid_x_points[-1]-grid_x_points[0])/1000.,(grid_y_points[-1]-grid_y_points[0])/1000.,
depths[-1]/1000.,len(grid_x_points)-1,len(grid_y_points)-1,len(grid_z_points)-1))
outstring += '{0} {1} {2} {3} {4}\n'.format(len(xblocks),len(yblocks),
len(thicknesses), 0,'LOGE')
xstring = ''
for block in xblocks:
xstring += '{0:.3f} '.format(block)
xstring += '\n'
outstring += xstring
ystring = ''
for block in yblocks:
ystring += '{0:.3f} '.format(block)
ystring += '\n'
outstring += ystring
zstring = ''
for block in thicknesses:
zstring += '{0:.3f} '.format(block)
zstring += '\n'
outstring += zstring
for idx_z in range(len(thicknesses)):
z_string = ''
#empty line before each layer:
z_string += '\n'
resistivity = rho0
if inmodel1d is not None:
layertop_depth = grid_z_points[idx_z]
layertop_modelboundary_distance = layertop_depth-inmodel1d[:,0]
layertop_idx = (np.abs(layertop_modelboundary_distance)).argmin()
if layertop_modelboundary_distance[layertop_idx] < 0:
layertop_idx -= 1
resistivity = inmodel1d[layertop_idx,1]
for idx_y in range(len(yblocks)):
y_string = ''
for idx_x in range(len(xblocks)):
x_string = '{0:.5E} '.format(np.log(resistivity))
y_string += x_string
y_string += '\n'
z_string += y_string
outstring += z_string
co_reference = '{0} {1} {2} \n'.format(np.min(grid_x_points),np.min(grid_y_points),0)
outstring += co_reference
outstring += '0 \n'
Fout= open(modelfile,'w')
Fout.write(outstring)
Fout.close()
def plotgrid(stations,grid_x,grid_y,grid_z=None, n_xpadding = None, n_y_padding=None, n_zpadding_layers = None):
ion()
close('all')
equal = True
equal = False
grid_x = [i/1000. for i in grid_x]
grid_y = [i/1000. for i in grid_y]
# Note: X and Y are swapped - mathematical definition used in the plotting functions!!!
#fig = figure(1)
#ax = fig.gca()
fig = figure(figsize=(8, 6))
if grid_z is not None:
colspan = 3
else:
colspan = 4
if equal == True:
ax = subplot2grid((1, 4), (0, 0), colspan=colspan,aspect='equal')
else:
ax = subplot2grid((1, 4), (0, 0), colspan=colspan,aspect='auto')
#ax = subplot(1,2,1)
ax.scatter(stations[:,1]/1000.,stations[:,0]/1000.,c='r')
ax.scatter([ymin_padded/1000.],[xmin_padded/1000.],c='b',marker='x',s=40)
outline_x = [min(grid_x),min(grid_x),max(grid_x),max(grid_x),min(grid_x)]
outline_y = [min(grid_y),max(grid_y),max(grid_y),min(grid_y),min(grid_y)]
ax.plot(outline_y,outline_x,c='r')
if n_xpadding is not None and n_ypadding is not None:
regular_x = [grid_x[n_xpadding],grid_x[n_xpadding],
grid_x[-n_xpadding-1],grid_x[-n_xpadding-1],grid_x[n_xpadding]]
regular_y = [grid_y[n_ypadding],grid_y[-n_ypadding-1],
grid_y[-n_ypadding-1],grid_y[n_ypadding],grid_y[n_ypadding]]
ax.plot(regular_y,regular_x,c='b')
extension_factor = 0.1
x_extent = max(grid_x) - min(grid_x)
x_extension = extension_factor * x_extent
ax.set_ylim([min(grid_x) - x_extension,max(grid_x) + x_extension])
y_extent = max(grid_y) - min(grid_y)
y_extension = extension_factor * y_extent
ax.set_xlim([min(grid_y) - y_extension,max(grid_y) + y_extension])
ax.set_yticks(grid_x, minor=True)
ax.yaxis.grid(False, which='major')
ax.yaxis.grid(True, which='minor',c='g')
ax.set_xticks(grid_y, minor=True)
ax.xaxis.grid(False, which='major')
ax.xaxis.grid(True, which='minor',c='g')
ax.set_xlabel('Easting (Y-coordinate) in km')
ax.set_ylabel('Northing (X-coordinate) in km')
ax.set_title('Model geometry (origin at {0:.1f},{1:.1f})'.format(xmin_padded,ymin_padded))
if equal == True:
ax.set_aspect('equal',adjustable='box')
draw()
if grid_z is not None:
grid_z = [-i/1000. for i in grid_z]
bottom_index = len(grid_z) - n_zpadding_layers -1
if equal == True:
ax2 = subplot2grid((1, 4), (0, 3),aspect='equal')
else:
ax2 = subplot2grid((1, 4), (0, 3),aspect='auto')
#fig2 = figure(2)
#ax2 = fig2.gca()
#ax2 = subplot(1,2,2)
outline_z = [min(grid_z),min(grid_z),max(grid_z),max(grid_z),min(grid_z)]
outline_y = [min(grid_y),max(grid_y),max(grid_y),min(grid_y),min(grid_y)]
plot(outline_y,outline_z,c='r')
plot([min(grid_y),max(grid_y)],[grid_z[bottom_index],grid_z[bottom_index]],c='b')
ax2.axhline(linewidth=2, color='k')
extension_factor = 0.1
z_extent = max(grid_z) - min(grid_z)
z_extension = extension_factor * z_extent
ax2.set_ylim([min(grid_z) - z_extension,max(grid_z) + z_extension])
y_extent = max(grid_y) - min(grid_y)
y_extension = extension_factor * y_extent
ax2.set_xlim([min(grid_y) - y_extension,max(grid_y) + y_extension])
#ax2.set_aspect('equal','datalim')
ax2.set_yticks(grid_z, minor=True)
ax2.yaxis.grid(False, which='major')
ax2.yaxis.grid(True, which='minor',c='k')
ax2.set_xlabel('Easting (Y-coordinate) in km')
ax2.set_ylabel('Depth in km')
ax2.set_title('Model layers')
ax2.set_aspect('equal',adjustable='box')
tight_layout()
show(block=True)
if plot == True:
import platform
if not platform.system().lower().startswith('win') :
#generate an interactive plot window, which remains open after this script has finshed:
proc_num = os.fork()
if proc_num != 0:
#This is the parent process, that should quit immediately to return to the
#shell.
print("You can kill the plot window with the command \"kill %d\"." % proc_num)
sys.exit()
from pylab import *
plotgrid(coords,grid_x_points,grid_y_points,grid_z_points,n_xpadding,n_ypadding, n_zpadding)
| [
"[email protected]"
] | |
53e93f962e07335199743cfd2031f7866c6928b6 | f891828ffe9c8501d276560c8c52d319f284056f | /205_isomorphic_m/index_map.py | 0f5f7ccd9a02480277885e72dd81ce413e922721 | [] | no_license | chao-shi/lclc | 1b852ab61fef4072039c61f68e951ab2072708bf | 2722c0deafcd094ce64140a9a837b4027d29ed6f | refs/heads/master | 2021-06-14T22:07:54.120375 | 2019-09-02T23:13:59 | 2019-09-02T23:13:59 | 110,387,039 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | class Solution(object):
def isIsomorphic(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
return map(s.find, s) == map(t.find, t)
# From OJ discussion | [
"[email protected]"
] | |
a44f223635db7b358b49ccb6ca7923250c316bad | be5ea20226c37d81f1ccb2f704d8825d36e88765 | /Exams/2-apr-2020/skeleton/tests/test_magic_card.py | 75d8d74d391b2baf58d8a14b44e8e5b922aabf4f | [] | no_license | dimDamyanov/PythonOOP | 3845e450e5a48fef4f70a186664e07c0cd60e09b | 723204f5b7e953874fac9314e48eb1d1628d6ff5 | refs/heads/main | 2023-04-07T18:00:36.735248 | 2021-04-19T20:57:14 | 2021-04-19T20:57:14 | 341,329,346 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,231 | py | import unittest
from project.card.magic_card import MagicCard
class TestMagicCard(unittest.TestCase):
def setUp(self) -> None:
self.magic_card = MagicCard('Card')
def test_init_attrs_set(self) -> None:
self.assertEqual(self.magic_card.name, 'Card')
self.assertEqual(self.magic_card.damage_points, 5)
self.assertEqual(self.magic_card.health_points, 80)
def test_init__when_name_invalid__expect_exception(self) -> None:
with self.assertRaises(ValueError) as context:
MagicCard('')
self.assertEqual(context.exception.args[0], 'Card\'s name cannot be an empty string.')
def test_damage_points_setter__expect_exception(self) -> None:
with self.assertRaises(ValueError) as context:
self.magic_card.damage_points = -10
self.assertEqual(context.exception.args[0], 'Card\'s damage points cannot be less than zero.')
def test_health_points_setter__expect_exception(self) -> None:
with self.assertRaises(ValueError) as context:
self.magic_card.health_points = -10
self.assertEqual(context.exception.args[0], 'Card\'s HP cannot be less than zero.')
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
426d534ea1ee13cc690136f8ee33d913fa161456 | fd90b8efa1daaec44b54797e549e0f738f4a5897 | /shuxue/3的幂.py | 648fe8a17fac9249b54026f7347281ef036e3e5e | [] | no_license | ddz-mark/LeetCode | 2a622eeb655398ca9ebd9feee93a52cd114a77c4 | d557faf87374ad8c65634ee9d9e572b88a54913a | refs/heads/master | 2021-07-12T06:58:57.162657 | 2021-04-18T13:25:03 | 2021-04-18T13:25:03 | 244,403,881 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 456 | py | # 给定一个整数,写一个函数来判断它是否是 3 的幂次方。
class Solution(object):
def isPowerOfThree(self, n):
"""
:type n: int
:rtype: bool
"""
if n == 0 :
return False
while n % 3 == 0:
n /= 3
if n == 1:
return True
else:
return False
if __name__ == '__main__':
ob = Solution()
print(ob.isPowerOfThree(9))
| [
"[email protected]"
] | |
37ad995aa7d1d0d0fc4db7476eed8c5d9fcb4d47 | 32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd | /benchmark/wikipedia/testcase/interestallcases/testcase1_008_0.py | 34135d8ab0a3e138c9938ad9ce244b9bbea57ae9 | [] | no_license | Prefest2018/Prefest | c374d0441d714fb90fca40226fe2875b41cf37fc | ac236987512889e822ea6686c5d2e5b66b295648 | refs/heads/master | 2021-12-09T19:36:24.554864 | 2021-12-06T12:46:14 | 2021-12-06T12:46:14 | 173,225,161 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,965 | py | #coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'org.wikipedia',
'appActivity' : 'org.wikipedia.main.MainActivity',
'resetKeyboard' : True,
'androidCoverage' : 'org.wikipedia/org.wikipedia.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
def scrollToFindElement(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
swipe(driver, 0.5, 0.6, 0.5, 0.2)
else:
return element
return
def clickoncheckable(driver, str, value = "true") :
parents = driver.find_elements_by_class_name("android.widget.LinearLayout")
for parent in parents:
try :
parent.find_element_by_android_uiautomator(str)
lists = parent.find_elements_by_class_name("android.widget.LinearLayout")
if (len(lists) == 1) :
innere = parent.find_element_by_android_uiautomator("new UiSelector().checkable(true)")
nowvalue = innere.get_attribute("checked")
if (nowvalue != value) :
innere.click()
break
except NoSuchElementException:
continue
# preference setting and exit
try :
os.popen("adb shell svc data diable")
time.sleep(5)
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
os.popen("adb shell am start -n org.wikipedia/org.wikipedia.settings.DeveloperSettingsActivity")
scrollToFindElement(driver, "new UiSelector().text(\"useRestbase_setManually\")").click()
clickoncheckable(driver, "new UiSelector().text(\"useRestbase_setManually\")", "true")
scrollToFindElement(driver, "new UiSelector().text(\"suppressNotificationPolling\")").click()
clickoncheckable(driver, "new UiSelector().text(\"suppressNotificationPolling\")", "false")
scrollToFindElement(driver, "new UiSelector().text(\"memoryLeakTest\")").click()
clickoncheckable(driver, "new UiSelector().text(\"memoryLeakTest\")", "true")
scrollToFindElement(driver, "new UiSelector().text(\"readingListLoginReminder\")").click()
clickoncheckable(driver, "new UiSelector().text(\"readingListLoginReminder\")", "false")
scrollToFindElement(driver, "new UiSelector().text(\"readingListsFirstTimeSync\")").click()
clickoncheckable(driver, "new UiSelector().text(\"readingListsFirstTimeSync\")", "true")
driver.press_keycode(4)
time.sleep(2)
os.popen("adb shell am start -n org.wikipedia/org.wikipedia.settings.SettingsActivity")
scrollToFindElement(driver, "new UiSelector().text(\"Show link previews\")").click()
clickoncheckable(driver, "new UiSelector().text(\"Show link previews\")", "false")
scrollToFindElement(driver, "new UiSelector().text(\"Download only over Wi-Fi\")").click()
clickoncheckable(driver, "new UiSelector().text(\"Download only over Wi-Fi\")", "false")
scrollToFindElement(driver, "new UiSelector().text(\"Show images\")").click()
clickoncheckable(driver, "new UiSelector().text(\"Show images\")", "false")
driver.press_keycode(4)
time.sleep(2)
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
finally :
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"1_008_pre\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
# testcase008
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememt(driver, "new UiSelector().resourceId(\"org.wikipedia:id/menu_overflow_button\").className(\"android.widget.TextView\")")
TouchAction(driver).long_press(element).release().perform()
swipe(driver, 0.5, 0.8, 0.5, 0.2)
element = getElememt(driver, "new UiSelector().resourceId(\"org.wikipedia:id/menu_overflow_button\").className(\"android.widget.TextView\")")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.wikipedia:id/icon\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Got it\")", "new UiSelector().className(\"android.widget.TextView\").instance(2)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.wikipedia:id/view_static_card_icon\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageView\").description(\"Share the article link\")")
TouchAction(driver).tap(element).perform()
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"1_008\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'org.wikipedia'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage)
os.popen("adb shell svc data enable")
| [
"[email protected]"
] | |
ee0c0439bc8653fcafc1eda4272bc03c2c60a56f | fafee9d79beb7433633d59dce865efb437d608f6 | /__init__.py | 89ba90efec4b90e8c130bf0cd0c31e76b9df484d | [
"MIT",
"BSD-3-Clause"
] | permissive | PabloRomanH/cihaidata-unihan | 65830d945e50518895ce3f06a211a117cd502b84 | c408b57f61b5b13926f42c3647cc0bc61da758be | refs/heads/master | 2021-01-12T22:33:15.289066 | 2016-06-03T17:37:40 | 2016-06-03T17:37:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 674 | py | #!/usr/bin/env python
# -*- coding: utf8 - *-
"""Tool to build `Unihan`_ dataset into datapackage / simple data format."""
from __future__ import absolute_import, division, print_function, \
with_statement, unicode_literals
__title__ = 'cihaidata-python'
__package_name__ = 'cihaidata_python'
__description__ = 'Tool to build `Unihan`_ dataset into datapackage / simple data format.'
__version__ = '0.0.1'
__author__ = 'Tony Narlock'
__email__ = '[email protected]'
__license__ = 'MIT'
__copyright__ = 'Copyright 2013-2014 Tony Narlock'
from .unihan import Unihan, check_install, create_table, flatten_datasets
from .scripts import save, download, extract, convert
| [
"[email protected]"
] | |
dba9cbb205056e92cf377392703f257eafae100a | 242f1dafae18d3c597b51067e2a8622c600d6df2 | /src/1400-1499/1486.xor.in.array.py | 9f93fdedc92c4d84e1a7f4947d3adae6aec16ebd | [] | no_license | gyang274/leetcode | a873adaa083270eb05ddcdd3db225025533e0dfe | 6043134736452a6f4704b62857d0aed2e9571164 | refs/heads/master | 2021-08-07T15:15:01.885679 | 2020-12-22T20:57:19 | 2020-12-22T20:57:19 | 233,179,192 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | class Solution:
def xorOperation(self, n: int, start: int) -> int:
# TC: O(N), SC: O(1), note it is possible but difficult to complete this in O(1)..
ans = 0
for i in range(n):
ans ^= (start + 2 * i)
return ans
| [
"[email protected]"
] | |
7cb4b4d3b8da5d4f881a238fd2bb87a15a3dbb29 | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /d3pm/text/main_test.py | ddcfcfad48bbd428ea167bea70a1e48d197f9795 | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 1,725 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for the main executable."""
import random
from absl.testing import absltest
import jax
import numpy as np
import tensorflow_datasets as tfds
from d3pm.text import configs
from d3pm.text import main
class MainTest(absltest.TestCase):
def test_small_training_job(self):
experiment_dir = self.create_tempdir().full_path
# Disable compiler optimizations for faster compile time.
jax.config.update('jax_disable_most_optimizations', True)
# Seed the random number generators.
random.seed(0)
np.random.seed(0)
# Construct a test config with a small number of steps.
configs.gin_load('lm1b_tiny')
with tfds.testing.mock_data(num_examples=2048):
# Make sure we can train without any exceptions.
main.run_experiment(
experiment_dir,
batch_size_per_device=1,
max_train_steps=1,
validate_every=5,
train_summary_frequency=5,
num_eval_steps=5,
num_predict_steps=1,
restore_checkpoint=False,
checkpoint_frequency=None,
)
if __name__ == '__main__':
absltest.main()
| [
"[email protected]"
] | |
15af76b56f23169ecae06276f96acb2561d4c1b9 | 6480db97bf64fe5020bca084c38b7728ba9a1b9a | /pursuit/src/mvp_landing/urls.py | e10ed7a8489bdc075f57077b42c3bc7754100fd9 | [] | no_license | masterfung/mvp_landing-Django | 056727d3c7107f18ea5203b98816bad9bb94edaa | 4ae2d9128e13616ca2e4e82e36927a352f515858 | refs/heads/master | 2021-01-13T02:35:57.392427 | 2014-06-02T14:30:37 | 2014-06-02T14:30:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 731 | py | from django.conf.urls import patterns, include, url
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples: #has to be in order when it comes to views
url(r'^$', 'signups.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^thank-you/$', 'signups.views.thankyou', name='thankyou'),
url(r'^about-us/$', 'signups.views.aboutus', name='aboutus'),
url(r'^admin/', include(admin.site.urls)),
)
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root = settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT) | [
"[email protected]"
] | |
1575804de3dd437ba9e8b545b69edced761183fd | 78d425c98f093e01dd78c15ffa76cf50f7e4fe1e | /tests/tier1/tc_1087_check_vdc_virtual_pool_revoked_in_guest_after_host_unattached.py | c2f7f74a50aa84f8b13159dec95faef8625f8fe0 | [] | no_license | Junefen/virtwho-ci | 57ef8637a78605fd3f2b1d3eb31031ba2b0de480 | 23e144171d2cc6ee87edfefdace8a25eb3e7bc65 | refs/heads/master | 2023-03-19T04:16:08.055779 | 2021-09-29T03:52:08 | 2021-09-29T03:52:08 | 194,606,208 | 0 | 0 | null | 2019-07-01T05:35:22 | 2019-07-01T05:35:22 | null | UTF-8 | Python | false | false | 2,453 | py | # coding:utf-8
from virt_who import *
from virt_who.base import Base
from virt_who.register import Register
from virt_who.testing import Testing
class Testcase(Testing):
def test_run(self):
self.vw_case_info(os.path.basename(__file__), case_id='RHEL-134064')
self.vw_case_init()
# case config
results = dict()
virtwho_conf = "/etc/virt-who.conf"
self.vw_option_enable('[global]', virtwho_conf)
self.vw_option_enable('debug', virtwho_conf)
self.vw_option_update_value('debug', 'True', virtwho_conf)
config_name = "virtwho-config"
config_file = "/etc/virt-who.d/{0}.conf".format(config_name)
self.vw_etc_d_mode_create(config_name, config_file)
host_name = self.get_hypervisor_hostname()
host_uuid = self.get_hypervisor_hostuuid()
register_config = self.get_register_config()
vdc_physical_sku = register_config['vdc']
vdc_virtual_sku = register_config['vdc_bonus']
# case steps
logger.info(">>>step1: run virt-who and check the mapping info is sent or not")
data, tty_output, rhsm_output = self.vw_start()
res = self.op_normal_value(data, exp_error=0, exp_thread=1, exp_send=1)
results.setdefault('step1', []).append(res)
logger.info(">>>step2: attach physical sku for host/hypervisor")
sku_attrs = self.system_sku_attr(self.ssh_host(), vdc_physical_sku, "physical")
physical_pool_id = sku_attrs['pool_id']
self.vw_web_attach(host_name, host_uuid, physical_pool_id)
logger.info(">>>step3: attach virtual sku by pool_id in guest")
sku_attrs = self.system_sku_attr(self.ssh_guest(), vdc_virtual_sku, "virtual")
virtual_pool_id = sku_attrs['pool_id']
self.system_sku_attach(self.ssh_guest(), pool_id=virtual_pool_id)
output = self.system_sku_consumed(self.ssh_guest())
res = self.vw_msg_search(output, vdc_virtual_sku, exp_exist=True)
results.setdefault('step3', []).append(res)
logger.info(">>>step4: unattach physical sku from host/hypervisor and check virtual pool")
self.vw_web_unattach(host_name, host_uuid)
output = self.system_sku_consumed(self.ssh_guest(), exp_exist=False)
res = self.vw_msg_search(output, vdc_virtual_sku, exp_exist=False)
results.setdefault('step4', []).append(res)
# case result
self.vw_case_result(results)
| [
"[email protected]"
] | |
ada54b21eb805ba13403e644e467913924e72667 | d56828f1f7ae8fbb1fc2e79f84c82c4be1d13651 | /google/cloud/datacatalog_v1/types/schema.py | 4a51a1226512ae2416ba971f949dedc02bb6bb30 | [
"Apache-2.0"
] | permissive | isabella232/python-datacatalog | 940d7664d55ae01524f7fe89f8a295e9190ec23c | d16420640ec97c17e4c63516b8375b41df82de9c | refs/heads/master | 2022-12-19T23:03:49.561389 | 2020-10-16T19:58:42 | 2020-10-16T19:58:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,287 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.cloud.datacatalog.v1", manifest={"Schema", "ColumnSchema",},
)
class Schema(proto.Message):
r"""Represents a schema (e.g. BigQuery, GoogleSQL, Avro schema).
Attributes:
columns (Sequence[~.schema.ColumnSchema]):
Required. Schema of columns. A maximum of
10,000 columns and sub-columns can be specified.
"""
columns = proto.RepeatedField(proto.MESSAGE, number=2, message="ColumnSchema",)
class ColumnSchema(proto.Message):
r"""Representation of a column within a schema. Columns could be
nested inside other columns.
Attributes:
column (str):
Required. Name of the column.
type (str):
Required. Type of the column.
description (str):
Optional. Description of the column. Default
value is an empty string.
mode (str):
Optional. A column's mode indicates whether the values in
this column are required, nullable, etc. Only ``NULLABLE``,
``REQUIRED`` and ``REPEATED`` are supported. Default mode is
``NULLABLE``.
subcolumns (Sequence[~.schema.ColumnSchema]):
Optional. Schema of sub-columns. A column can
have zero or more sub-columns.
"""
column = proto.Field(proto.STRING, number=6)
type = proto.Field(proto.STRING, number=1)
description = proto.Field(proto.STRING, number=2)
mode = proto.Field(proto.STRING, number=3)
subcolumns = proto.RepeatedField(proto.MESSAGE, number=7, message="ColumnSchema",)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.