blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
sequencelengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5fd06e2db99d7b1ac0e485ea5df4f198d3d33b3c | deb07bdfe74e7680b75a68a03920cb5700551525 | /corpusCreator.py | f8d01202cb2703bd725daf6b9e520375fbb0c979 | [] | no_license | Tzvi23/Hierarchical-Summarization-Part1 | dcb2be6dd5bfb01efb9dff0087b840d6801aa9ab | d4645c1a22ff938d025836ba5e97f685b4749a21 | refs/heads/master | 2022-12-04T12:29:33.581258 | 2020-08-29T13:56:42 | 2020-08-29T13:56:42 | 254,397,090 | 3 | 0 | null | 2020-08-29T12:26:15 | 2020-04-09T14:40:06 | Jupyter Notebook | UTF-8 | Python | false | false | 4,219 | py | """
!!! This file is not used in the project anymore
"""
# TODO check if can be deleted
import xml.etree.ElementTree as ET
import os
import copy
import pickle
def get_jaccard_sim(str1, str2):
a = set(str1.split())
b = set(str2.split())
c = a.intersection(b)
return float(len(c)) / (len(a) + len(b) - len(c))
def init_sectionNames_dict(mode='list'):
""" NOT USED """
corpusDict = dict()
sections_types = ['chairmans statement', 'chief executive officer ceo review', 'chief executive officer ceo report',
'governance statement',
'remuneration report',
'business review', 'financial review', 'operating review', 'highlights', 'auditors report',
'risk management', 'chairmans governance introduction',
'Corporate Social Responsibility CSR disclosures']
for section in sections_types:
if mode == 'list':
corpusDict[section.lower()] = list()
if mode == 'float':
corpusDict[section.lower()] = float(0)
return corpusDict
def createCorpusBySectionName(dir_path='output/xml/'):
sectionNames = list()
file_corpus = dict()
corpus = dict()
for filename in os.listdir(dir_path):
print('Processing: {0}'.format(filename))
if filename.endswith('.txt.xml'):
corpus.clear()
tree = ET.parse(dir_path + filename)
root = tree.getroot()
for elem in root:
if elem.attrib:
if elem.attrib['name'].lower() not in corpus:
corpus[elem.attrib['name'].lower()] = list()
for sub_element in elem:
corpus[elem.attrib['name'].lower()].append(sub_element.text)
file_corpus[int(filename[:-8])] = copy.deepcopy(corpus)
for sectionName in corpus.keys():
sectionNames.append(sectionName)
return file_corpus, set(sectionNames)
def loadCorpus(file, dir_path='saved_data'):
if not os.path.isdir(dir_path):
os.mkdir(dir_path)
files_pickle, sections_pickle = createCorpusBySectionName()
with open(dir_path + '/' + file, 'wb') as saveFile:
print('Saving data to {0} ...'.format(dir_path + '/' + file))
pickle.dump([files_pickle, sections_pickle], saveFile)
print('Created {0} and saved'.format(file))
else:
if os.path.isfile(dir_path + '/' + file):
with open(dir_path + '/' + file, 'rb') as saveFile:
print('Loading {0} ...'.format(file))
files_pickle, sections_pickle = pickle.load(saveFile)
else:
files_pickle, sections_pickle = createCorpusBySectionName()
with open(dir_path + '/' + file, 'wb') as saveFile:
print('Created {0} and saved'.format(file))
pickle.dump([files_pickle, sections_pickle], saveFile)
return files_pickle, sections_pickle
def mostRelevantTag(name, res_dict):
for pre_defined_sections in res_dict.keys():
res_dict[pre_defined_sections] = get_jaccard_sim(name, pre_defined_sections)
max_value = max(res_dict.values()) # maximum value
if max_value == 0:
return -1
max_keys = [k for k, v in res_dict.items() if v == max_value] # getting all keys containing the `maximum`
return max_keys
def process_corpus(corpus):
local_jaccard_dict = init_sectionNames_dict('float')
dict_sections = init_sectionNames_dict()
counterFile = 1
counterSize = len(corpus)
for file in corpus.keys():
print('Processing Corpus {0}/{1}'.format(counterFile, counterSize))
for section in corpus[file].keys():
keys = mostRelevantTag(section, local_jaccard_dict)
if keys == -1:
continue
for key in keys:
if not corpus[file][section]: # Check if trying to add empty list
break
dict_sections[key].append((file, corpus[file][section]))
counterFile += 1
print('Done!')
return dict_sections
# files, sections = loadCorpus('corpus.pickle')
# process_corpus(files)
# print()
| [
"[email protected]"
] | |
9733eca28b6e292fb6b0767730696d4de44b3fc9 | 43b4cf47d85e0306b92bbbe044eb74960f34a7c9 | /core/migrations/0015_auto_20191118_1451.py | 2f6bfee7b477233e3d81cf4000360124db8e4cec | [] | no_license | Alexanader/eCommerce-site | d84f06cb29c6f6be4ed8e4259cd1d6150b93b985 | b62e66184b28df43ec940983612a636bc9aaec90 | refs/heads/master | 2020-12-15T15:00:20.720901 | 2020-01-20T16:39:53 | 2020-01-20T16:39:53 | 235,148,076 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 370 | py | # Generated by Django 2.2 on 2019-11-18 14:51
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0014_auto_20191118_1450'),
]
operations = [
migrations.RenameField(
model_name='billingaddress',
old_name='countries',
new_name='country',
),
]
| [
"[email protected]"
] | |
cd979cf383863e27fac2a067d8e949630956d387 | e82b761f53d6a3ae023ee65a219eea38e66946a0 | /All_In_One/addons/io_scene_md2/quake2/bsp.py | fd4a108bf3ac3eaa8e59362e0ea2f1064605f00e | [] | no_license | 2434325680/Learnbgame | f3a050c28df588cbb3b14e1067a58221252e2e40 | 7b796d30dfd22b7706a93e4419ed913d18d29a44 | refs/heads/master | 2023-08-22T23:59:55.711050 | 2021-10-17T07:26:07 | 2021-10-17T07:26:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35,530 | py | """This module provides file I/O for Quake 2 BSP map files.
Example:
bsp_file = bsp.Bsp.open('base1.bsp')
References:
Quake 2 Source
- id Software
- https://github.com/id-Software/Quake-2
Quake 2 BSP File Format
- Max McGuire
- http://www.flipcode.com/archives/Quake_2_BSP_File_Format.shtml
"""
import io
import struct
__all__ = ['BadBspFile', 'is_bspfile', 'Bsp']
class BadBspFile(Exception):
pass
def _check_bspfile(fp):
fp.seek(0)
data = fp.read(struct.calcsize('<4si'))
identity, version = struct.unpack('<4si', data)[0]
return identity is b'IBSP' and version is 38
def is_bspfile(filename):
"""Quickly see if a file is a bsp file by checking the magic number.
The filename argument may be a file for file-like object.
"""
result = False
try:
if hasattr(filename, 'read'):
return _check_bspfile(fp=filename)
else:
with open(filename, 'rb') as fp:
return _check_bspfile(fp)
except:
pass
return result
class ClassSequence:
"""Class for reading a sequence of data structures"""
Class = None
@classmethod
def write(cls, file, structures):
for structure in structures:
cls.Class.write(file, structure)
@classmethod
def read(cls, file):
return [cls.Class(*c) for c in struct.iter_unpack(cls.Class.format, file.read())]
class Entities:
"""Class for representing the entities lump"""
@classmethod
def write(cls, file, entities):
entities_data = entities.encode('cp437')
file.write(entities_data)
@classmethod
def read(cls, file):
entities_data = file.read()
return entities_data.decode('cp437')
class Plane:
"""Class for representing a bsp plane
Attributes:
normal: The normal vector to the plane.
distance: The distance from world (0, 0, 0) to a point on the plane
type: Planes are classified as follows:
0: Axial plane aligned to the x-axis.
1: Axial plane aligned to the y-axis.
2: Axial plane aligned to the z-axis.
3: Non-axial plane roughly aligned to the x-axis.
4: Non-axial plane roughly aligned to the y-axis.
5: Non-axial plane roughly aligned to the z-axis.
"""
format = '<4fi'
size = struct.calcsize(format)
__slots__ = (
'normal',
'distance',
'type'
)
def __init__(self,
normal_x,
normal_y,
normal_z,
distance,
type):
self.normal = normal_x, normal_y, normal_z
self.distance = distance
self.type = type
@classmethod
def write(cls, file, plane):
plane_data = struct.pack(cls.format,
*plane.normal,
plane.distance,
plane.type)
file.write(plane_data)
@classmethod
def read(cls, file):
plane_data = file.read(cls.size)
plane_struct = struct.unpack(cls.format, plane_data)
return Plane(*plane_struct)
class Planes(ClassSequence):
Class = Plane
class Vertex:
"""Class for representing a vertex
A Vertex is an XYZ triple.
Attributes:
x: The x-coordinate
y: The y-coordinate
z: The z-coordinate
"""
format = '<3f'
size = struct.calcsize(format)
__slots__ = (
'x',
'y',
'z'
)
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def __getitem__(self, item):
if type(item) is int:
return [self.x, self.y, self.z][item]
elif type(item) is slice:
start = item.start or 0
stop = item.stop or 3
return [self.x, self.y, self.z][start:stop]
@classmethod
def write(cls, file, vertex):
vertex_data = struct.pack(cls.format,
vertex.x,
vertex.y,
vertex.z)
file.write(vertex_data)
@classmethod
def read(cls, file):
vertex_data = file.read(cls.size)
vertex_struct = struct.unpack(cls.format, vertex_data)
return Vertex(*vertex_struct)
class Vertexes(ClassSequence):
Class = Vertex
class Visibilities:
@classmethod
def write(cls, file, structures):
file.write(structures)
@classmethod
def read(cls, file):
return file.read()
class Node:
"""Class for representing a node
A Node is a data structure used to compose a bsp tree data structure. A
child may be a Node or a Leaf.
Attributes:
plane_number: The number of the plane that partitions the node.
children: A two-tuple of the two sub-spaces formed by the partitioning
plane.
Note: Child 0 is the front sub-space, and 1 is the back sub-space.
Note: If bit 15 is set, the child is a leaf.
bounding_box_min: The minimum coordinate of the bounding box containing
this node and all of its children.
bounding_box_max: The maximum coordinate of the bounding box containing
this node and all of its children.
first_face: The number of the first face in Bsp.mark_surfaces.
number_of_faces: The number of faces contained in the node. These
are stored in consecutive order in Bsp.mark_surfaces starting at
Node.first_face.
"""
format = '<3i6h2H'
size = struct.calcsize(format)
__slots__ = (
'plane_number',
'children',
'bounding_box_min',
'bounding_box_max',
'first_face',
'number_of_faces'
)
def __init__(self,
plane_number,
child_front,
child_back,
bounding_box_min_x,
bounding_box_min_y,
bounding_box_min_z,
bounding_box_max_x,
bounding_box_max_y,
bounding_box_max_z,
first_face,
number_of_faces):
self.plane_number = plane_number
self.children = child_front, child_back
self.bounding_box_min = bounding_box_min_x, bounding_box_min_y, bounding_box_min_z
self.bounding_box_max = bounding_box_max_x, bounding_box_max_y, bounding_box_max_z
self.first_face = first_face
self.number_of_faces = number_of_faces
@classmethod
def write(cls, file, node):
node_data = struct.pack(cls.format,
node.plane_number,
*node.children,
*node.bounding_box_min,
*node.bounding_box_max,
node.first_face,
node.number_of_faces)
file.write(node_data)
@classmethod
def read(cls, file):
node_data = file.read(cls.size)
node_struct = struct.unpack(cls.format, node_data)
return Node(*node_struct)
class Nodes(ClassSequence):
Class = Node
class SurfaceFlag:
LIGHT = 0x1
SLICK = 0x2
SKY = 0x4
WARP = 0x8
TRANS33 = 0x10
TRANS66 = 0x20
FLOWING = 0x40
NODRAW = 0x80
class TextureInfo:
"""Class for representing a texture info
Attributes:
s: The s vector in texture space represented as an XYZ three-tuple.
s_offset: Horizontal offset in texture space.
t: The t vector in texture space represented as an XYZ three-tuple.
t_offset: Vertical offset in texture space.
flags: A bitfield of surface behaviors.
value:
texture_name: The path of the texture.
next_texture_info: For animated textures. Sequence will be terminated
with a value of -1
"""
format = '<8f2i32si'
size = struct.calcsize(format)
__slots__ = (
's',
's_offset',
't',
't_offset',
'flags',
'value',
'texture_name',
'next_texture_info'
)
def __init__(self,
s_x,
s_y,
s_z,
s_offset,
t_x,
t_y,
t_z,
t_offset,
flags,
value,
texture_name,
next_texture_info):
self.s = s_x, s_y, s_z
self.s_offset = s_offset
self.t = t_x, t_y, t_z
self.t_offset = t_offset
self.flags = flags
self.value = value
if type(texture_name) == bytes:
self.texture_name = texture_name.split(b'\00')[0].decode('ascii')
else:
self.texture_name = texture_name
self.next_texture_info = next_texture_info
@classmethod
def write(cls, file, texture_info):
texture_info_data = struct.pack(cls.format,
*texture_info.s,
texture_info.s_offset,
*texture_info.t,
texture_info.t_offset,
texture_info.flags,
texture_info.value,
texture_info.texture_name.encode('ascii'),
texture_info.next_texture_info)
file.write(texture_info_data)
@classmethod
def read(cls, file):
texture_info_data = file.read(cls.size)
texture_info_struct = struct.unpack(cls.format, texture_info_data)
return TextureInfo(*texture_info_struct)
class TextureInfos(ClassSequence):
Class = TextureInfo
class Face:
"""Class for representing a face
Attributes:
plane_number: The plane in which the face lies.
side: Which side of the plane the face lies. 0 is the front, 1 is the
back.
first_edge: The number of the first edge in Bsp.surf_edges.
number_of_edges: The number of edges contained within the face. These
are stored in consecutive order in Bsp.surf_edges starting at
Face.first_edge.
texture_info: The number of the texture info for this face.
styles: A four-tuple of lightmap styles.
light_offset: The offset into the lighting data.
"""
format = '<Hhi2h4Bi'
size = struct.calcsize(format)
__slots__ = (
'plane_number',
'side',
'first_edge',
'number_of_edges',
'texture_info',
'styles',
'light_offset'
)
def __init__(self,
plane_number,
side,
first_edge,
number_of_edges,
texture_info,
style_0,
style_1,
style_2,
style_3,
light_offset):
self.plane_number = plane_number
self.side = side
self.first_edge = first_edge
self.number_of_edges = number_of_edges
self.texture_info = texture_info
self.styles = style_0, style_1, style_2, style_3
self.light_offset = light_offset
@classmethod
def write(cls, file, plane):
face_data = struct.pack(cls.format,
plane.plane_number,
plane.side,
plane.first_edge,
plane.number_of_edges,
plane.texture_info,
*plane.styles,
plane.light_offset)
file.write(face_data)
@classmethod
def read(cls, file):
face_data = file.read(cls.size)
face_struct = struct.unpack(cls.format, face_data)
return Face(*face_struct)
class Faces(ClassSequence):
Class = Face
class Lighting:
@classmethod
def write(cls, file, lighting):
file.write(lighting)
@classmethod
def read(cls, file):
return file.read()
class Contents:
SOLID = 1
WINDOW = 2
AUX = 4
LAVA = 8
SLIME = 16
WATER = 32
MIST = 64
LAST_VISIBLE = 64
AREAPORTAL = 0x8000
PLAYERCLIP = 0x10000
MONSTERCLIP = 0x20000
CURRENT_0 = 0x40000
CURRENT_90 = 0x80000
CURRENT_180 = 0x100000
CURRENT_270 = 0x200000
CURRENT_UP = 0x400000
CURRENT_DOWN = 0x800000
ORIGIN = 0x1000000
MONSTER = 0x2000000
DEADMONSTER = 0x4000000
DETAIL = 0x8000000
TRANSLUCENT = 0x10000000
LADDER = 0x20000000
class Leaf:
"""Class for representing a leaf
Attributes:
contents: The content of the leaf. Affect the player's view.
cluster: The cluster containing this leaf. -1 for no visibility info.
area: The area containing this leaf.
bounding_box_min: The minimum coordinate of the bounding box containing
this node.
bounding_box_max: The maximum coordinate of the bounding box containing
this node.
first_leaf_face: The number of the first face in Bsp.faces
number_of_leaf_faces: The number of faces contained within the leaf.
These are stored in consecutive order in Bsp.faces at
Leaf.first_leaf_face.
first_leaf_brush: The number of the first brush in Bsp.brushes
number_of_leaf_brushes: The number of brushes contained within the
leaf. These are stored in consecutive order in Bsp.brushes at
Leaf.first_leaf_brush.
"""
format = '<i8h4H'
size = struct.calcsize(format)
__slots__ = (
'contents',
'cluster',
'area',
'bounding_box_min',
'bounding_box_max',
'first_leaf_face',
'number_of_leaf_faces',
'first_leaf_brush',
'number_of_leaf_brushes'
)
def __init__(self,
contents,
cluster,
area,
bounding_box_min_x,
bounding_box_min_y,
bounding_box_min_z,
bounding_box_max_x,
bounding_box_max_y,
bounding_box_max_z,
first_leaf_face,
number_of_leaf_faces,
first_leaf_brush,
number_of_leaf_brushes):
self.contents = contents
self.cluster = cluster
self.area = area
self.bounding_box_min = bounding_box_min_x, bounding_box_min_y, bounding_box_min_z
self.bounding_box_max = bounding_box_max_x, bounding_box_max_y, bounding_box_max_z
self.first_leaf_face = first_leaf_face
self.number_of_leaf_faces = number_of_leaf_faces
self.first_leaf_brush = first_leaf_brush
self.number_of_leaf_brushes = number_of_leaf_brushes
@classmethod
def write(cls, file, leaf):
leaf_data = struct.pack(cls.format,
leaf.contents,
leaf.cluster,
leaf.area,
*leaf.bounding_box_min,
*leaf.bounding_box_max,
leaf.first_leaf_face,
leaf.number_of_leaf_faces,
leaf.first_leaf_brush,
leaf.number_of_leaf_brushes)
file.write(leaf_data)
@classmethod
def read(cls, file):
leaf_data = file.read(cls.size)
leaf_struct = struct.unpack(cls.format, leaf_data)
return Leaf(*leaf_struct)
class Leafs(ClassSequence):
Class = Leaf
class LeafFaces:
@classmethod
def write(cls, file, leaf_faces):
leaf_faces_format = '<{}H'.format(len(leaf_faces))
leaf_faces_data = struct.pack(leaf_faces_format, *leaf_faces)
file.write(leaf_faces_data)
@classmethod
def read(cls, file):
return [lf[0] for lf in struct.iter_unpack('<H', file.read())]
class LeafBrushes:
@classmethod
def write(cls, file, leaf_brushes):
leaf_brushes_format = '<{}H'.format(len(leaf_brushes))
leaf_brushes_data = struct.pack(leaf_brushes_format, *leaf_brushes)
file.write(leaf_brushes_data)
@classmethod
def read(cls, file):
return [lb[0] for lb in struct.iter_unpack('<H', file.read())]
class Edge:
"""Class for representing a edge
Attributes:
vertexes: A two-tuple of vertexes that form the edge. Vertex 0 is the
start vertex, and 1 is the end vertex.
"""
format = '<2H'
size = struct.calcsize(format)
__slots__ = (
'vertexes'
)
def __init__(self, vertex_0, vertex_1):
self.vertexes = vertex_0, vertex_1
def __getitem__(self, item):
if item > 1:
raise IndexError('list index of out of range')
return self.vertexes[item]
@classmethod
def write(cls, file, edge):
edge_data = struct.pack(cls.format,
*edge.vertexes)
file.write(edge_data)
@classmethod
def read(cls, file):
edge_data = file.read(cls.size)
edge_struct = struct.unpack(cls.format, edge_data)
return Edge(*edge_struct)
class Edges(ClassSequence):
Class = Edge
class SurfEdges:
@classmethod
def write(cls, file, surf_edges):
surf_edges_format = '<{}H'.format(len(surf_edges))
surf_edges_data = struct.pack(surf_edges_format, *surf_edges)
file.write(surf_edges_data)
@classmethod
def read(cls, file):
return [se[0] for se in struct.iter_unpack('<H', file.read())]
class Model:
"""Class for representing a model
Attributes:
bounding_box_min: The minimum coordinate of the bounding box containing
the model.
bounding_box_max: The maximum coordinate of the bounding box containing
the model.
origin: The origin of the model.
head_node: A four-tuple of indexes. Corresponds to number of map hulls.
visleafs: The number of leaves in the bsp tree?
first_face: The number of the first face in Bsp.mark_surfaces.
number_of_faces: The number of faces contained in the node. These
are stored in consecutive order in Bsp.mark_surfaces starting at
Model.first_face.
"""
format = '<9f3i'
size = struct.calcsize(format)
__slots__ = (
'bounding_box_min',
'bounding_box_max',
'origin',
'head_node',
'first_face',
'number_of_faces'
)
def __init__(self,
bounding_box_min_x,
bounding_box_min_y,
bounding_box_min_z,
bounding_box_max_x,
bounding_box_max_y,
bounding_box_max_z,
origin_x,
origin_y,
origin_z,
head_node,
first_face,
number_of_faces):
self.bounding_box_min = bounding_box_min_x, bounding_box_min_y, bounding_box_min_z
self.bounding_box_max = bounding_box_max_x, bounding_box_max_y, bounding_box_max_z
self.origin = origin_x, origin_y, origin_z
self.head_node = head_node
self.first_face = first_face
self.number_of_faces = number_of_faces
@classmethod
def write(cls, file, model):
model_data = struct.pack(cls.format,
*model.bounding_box_min,
*model.bounding_box_max,
*model.origin,
model.head_node,
model.first_face,
model.number_of_faces)
file.write(model_data)
@classmethod
def read(cls, file):
model_data = file.read(cls.size)
model_struct = struct.unpack(cls.format, model_data)
return Model(*model_struct)
class Models(ClassSequence):
Class = Model
class Brush:
format = '<3i'
size = struct.calcsize(format)
__slots__ = (
'first_side',
'number_of_sides',
'contents'
)
def __init__(self,
first_side,
number_of_sides,
contents):
self.first_side = first_side
self.number_of_sides = number_of_sides
self.contents = contents
@classmethod
def write(cls, file, brush):
brush_data = struct.pack(cls.format,
brush.first_side,
brush.number_of_sides,
brush.contents)
file.write(brush_data)
@classmethod
def read(cls, file):
brush_data = file.read(cls.size)
brush_struct = struct.unpack(cls.format, brush_data)
return Brush(*brush_struct)
class Brushes(ClassSequence):
Class = Brush
class BrushSide:
format = '<Hh'
size = struct.calcsize(format)
__slots__ = (
'plane_number',
'texture_info'
)
def __init__(self,
plane_number,
texture_info):
self.plane_number = plane_number
self.texture_info = texture_info
@classmethod
def write(cls, file, brush_side):
brush_side_data = struct.pack(cls.format,
brush_side.plane_number,
brush_side.texture_info)
file.write(brush_side_data)
@classmethod
def read(cls, file):
brush_side_data = file.read(cls.size)
brush_side_struct = struct.unpack(cls.format, brush_side_data)
return BrushSide(*brush_side_struct)
class BrushSides(ClassSequence):
Class = BrushSide
class Pop:
@classmethod
def write(cls, file, structures):
file.write(structures)
@classmethod
def read(cls, file):
return file.read()
class Area:
format = '<2i'
size = struct.calcsize(format)
__slots__ = (
'number_of_area_portals',
'first_area_portal'
)
def __init__(self,
number_of_area_portals,
first_area_portal):
self.number_of_area_portals = number_of_area_portals
self.first_area_portal = first_area_portal
@classmethod
def write(cls, file, area):
area_data = struct.pack(cls.format,
area.number_of_area_portals,
area.first_area_portal)
file.write(area_data)
@classmethod
def read(cls, file):
area_data = file.read(cls.size)
area_struct = struct.unpack(cls.format, area_data)
return Area(*area_struct)
class Areas(ClassSequence):
Class = Area
class AreaPortal:
format = '<2i'
size = struct.calcsize(format)
__slots__ = (
'portal_number',
'other_area'
)
def __init__(self,
portal_number,
other_area):
self.portal_number = portal_number
self.other_area = other_area
@classmethod
def write(cls, file, area):
area_data = struct.pack(cls.format,
area.portal_number,
area.other_area)
file.write(area_data)
@classmethod
def read(cls, file):
area_data = file.read(cls.size)
area_struct = struct.unpack(cls.format, area_data)
return AreaPortal(*area_struct)
class AreaPortals(ClassSequence):
Class = AreaPortal
class Lump:
"""Class for representing a lump.
A lump is a section of data that typically contains a sequence of data
structures.
Attributes:
offset: The offset of the lump entry from the start of the file.
length: The length of the lump entry.
"""
format = '<2i'
size = struct.calcsize(format)
__slots__ = (
'offset',
'length'
)
def __init__(self, offset, length):
self.offset = offset
self.length = length
@classmethod
def write(cls, file, lump):
lump_data = struct.pack(cls.format,
lump.offset,
lump.length)
file.write(lump_data)
@classmethod
def read(cls, file):
lump_data = file.read(cls.size)
lump_struct = struct.unpack(cls.format, lump_data)
return Lump(*lump_struct)
class Header:
"""Class for representing a Bsp file header
Attributes:
identity: The file identity. Should be b'IBSP'.
version: The file version. Should be 38.
lumps: A sequence of nineteen Lumps
"""
format = '<4si{}'.format(Lump.format[1:] * 19)
size = struct.calcsize(format)
order = [
Entities,
Planes,
Vertexes,
Visibilities,
Nodes,
TextureInfos,
Faces,
Lighting,
Leafs,
LeafFaces,
LeafBrushes,
Edges,
SurfEdges,
Models,
Brushes,
BrushSides,
Pop,
Areas,
AreaPortals
]
__slots__ = (
'identity',
'version',
'lumps'
)
def __init__(self,
identity,
version,
lumps):
self.identity = identity
self.version = version
self.lumps = lumps
@classmethod
def write(cls, file, header):
lump_values = []
for lump in header.lumps:
lump_values += lump.offset, lump.length
header_data = struct.pack(cls.format,
header.identity,
header.version,
*lump_values)
file.write(header_data)
@classmethod
def read(cls, file):
data = file.read(cls.size)
lumps_start = struct.calcsize('<4si')
header_data = data[:lumps_start]
header_struct = struct.unpack('<4si', header_data)
ident = header_struct[0]
version = header_struct[1]
lumps_data = data[lumps_start:]
lumps = [Lump(*l) for l in struct.iter_unpack(Lump.format, lumps_data)]
return Header(ident, version, lumps)
class Bsp:
"""Class for working with Bsp files
Example:
b = Bsp.open(file)
Attributes:
identity: Identity of the Bsp file. Should be b'IBSP'
version: Version of the Bsp file. Should be 38
entities: A string containing the entity definitions.
planes: A list of Plane objects used by the bsp tree data structure.
vertexes: A list of Vertex objects.
visibilities: A list of integers representing visibility data.
nodes: A list of Node objects used by the bsp tree data structure.
texture_infos: A list of TextureInfo objects.
faces: A list of Face objects.
lighting: A list of ints representing lighting data.
leafs: A list of Leaf objects used by the bsp tree data structure.
leaf_faces: A list of ints representing a consecutive list of faces
used by the Leaf objects.
leaf_brushes: A list of ints representing a consecutive list of edges
used by the Leaf objects.
edges: A list of Edge objects.
surf_edges: A list of ints representing a consecutive list of edges
used by the Face objects.
models: A list of Model objects.
brushes: A list of Brush objects.
brush_sides: A list of BrushSide objects.
pop: Proof of purchase? Always 256 bytes of null data if present.
areas: A list of Area objects.
area_portals: A list of AreaPortal objects.
"""
def __init__(self):
self.fp = None
self.mode = None
self._did_modify = False
self.identity = b'IBSP'
self.version = 38
self.entities = ""
self.planes = []
self.vertexes = []
self.visibilities = []
self.nodes = []
self.texture_infos = []
self.faces = []
self.lighting = b''
self.leafs = []
self.leaf_faces = []
self.leaf_brushes = []
self.edges = []
self.surf_edges = []
self.models = []
self.brushes = []
self.brush_sides = []
self.pop = []
self.areas = []
self.area_portals = []
Lump = Lump
Header = Header
Entities = Entities
Planes = Planes
Vertexes = Vertexes
Visibilities = Visibilities
Visibilities = Visibilities
Nodes = Nodes
TextureInfos = TextureInfos
Faces = Faces
Lighting = Lighting
Leafs = Leafs
LeafFaces = LeafFaces
LeafBrushes = LeafBrushes
Edges = Edges
SurfEdges = SurfEdges
Models = Models
Brushes = Brushes
BrushSides = BrushSides
Pop = Pop
Areas = Areas
AreaPortals = AreaPortals
@classmethod
def open(cls, file, mode='r'):
"""Returns a Bsp object
Args:
file: Either the path to the file, a file-like object, or bytes.
mode: An optional string that indicates which mode to open the file
Returns:
An Bsp object constructed from the information read from the
file-like object.
Raises:
ValueError: If an invalid file mode is given.
RuntimeError: If the file argument is not a file-like object.
"""
if mode not in ('r', 'w', 'a'):
raise ValueError("invalid mode: '%s'" % mode)
filemode = {'r': 'rb', 'w': 'w+b', 'a': 'r+b'}[mode]
if isinstance(file, str):
file = io.open(file, filemode)
elif isinstance(file, bytes):
file = io.BytesIO(file)
elif not hasattr(file, 'read'):
raise RuntimeError(
"Bsp.open() requires 'file' to be a path, a file-like object, "
"or bytes")
# Read
if mode == 'r':
return cls._read_file(file, mode)
# Write
elif mode == 'w':
bsp = cls()
bsp.fp = file
bsp.mode = 'w'
bsp._did_modify = True
return bsp
# Append
else:
bsp = cls._read_file(file, mode)
bsp._did_modify = True
return bsp
@classmethod
def _read_file(cls, file, mode):
def _read_lump(Class):
lump = header.lumps[header.order.index(Class)]
file.seek(lump.offset)
return Class.read(io.BytesIO(file.read(lump.length)))
bsp = cls()
bsp.mode = mode
bsp.fp = file
# Header
header = cls.Header.read(file)
bsp.identity = header.identity
bsp.version = header.version
bsp.entities = _read_lump(cls.Entities)
bsp.planes = _read_lump(cls.Planes)
bsp.vertexes = _read_lump(cls.Vertexes)
bsp.visibilities = _read_lump(cls.Visibilities)
bsp.nodes = _read_lump(cls.Nodes)
bsp.texture_infos = _read_lump(cls.TextureInfos)
bsp.faces = _read_lump(cls.Faces)
bsp.lighting = _read_lump(cls.Lighting)
bsp.leafs = _read_lump(cls.Leafs)
bsp.leaf_faces = _read_lump(cls.LeafFaces)
bsp.leaf_brushes = _read_lump(cls.LeafBrushes)
bsp.edges = _read_lump(cls.Edges)
bsp.surf_edges = _read_lump(cls.SurfEdges)
bsp.models = _read_lump(cls.Models)
bsp.brushes = _read_lump(cls.Brushes)
bsp.brush_sides = _read_lump(cls.BrushSides)
bsp.pop = _read_lump(cls.Pop)
bsp.areas = _read_lump(cls.Areas)
bsp.area_portals = _read_lump(cls.AreaPortals)
return bsp
@classmethod
def _write_file(cls, file, bsp):
def _write_lump(Class, data):
offset = file.tell()
Class.write(file, data)
size = file.tell() - offset
return cls.Lump(offset, size)
lumps = [cls.Lump(0, 0) for _ in range(19)]
header = cls.Header(bsp.identity, bsp.version, lumps)
lump_index = header.order.index
# Stub out header info
cls.Header.write(file, header)
lumps[lump_index(cls.Entities)] = _write_lump(cls.Entities, bsp.entities)
lumps[lump_index(cls.Planes)] = _write_lump(cls.Planes, bsp.planes)
lumps[lump_index(cls.Vertexes)] = _write_lump(cls.Vertexes, bsp.vertexes)
lumps[lump_index(cls.Visibilities)] = _write_lump(cls.Visibilities, bsp.visibilities)
lumps[lump_index(cls.Nodes)] = _write_lump(cls.Nodes, bsp.nodes)
lumps[lump_index(cls.TextureInfos)] = _write_lump(cls.TextureInfos, bsp.texture_infos)
lumps[lump_index(cls.Faces)] = _write_lump(cls.Faces, bsp.faces)
lumps[lump_index(cls.Lighting)] = _write_lump(cls.Lighting, bsp.lighting)
lumps[lump_index(cls.Leafs)] = _write_lump(cls.Leafs, bsp.leafs)
lumps[lump_index(cls.LeafFaces)] = _write_lump(cls.LeafFaces, bsp.leaf_faces)
lumps[lump_index(cls.LeafBrushes)] = _write_lump(cls.LeafBrushes, bsp.leaf_brushes)
lumps[lump_index(cls.Edges)] = _write_lump(cls.Edges, bsp.edges)
lumps[lump_index(cls.SurfEdges)] = _write_lump(cls.SurfEdges, bsp.surf_edges)
lumps[lump_index(cls.Models)] = _write_lump(cls.Models, bsp.models)
lumps[lump_index(cls.Brushes)] = _write_lump(cls.Brushes, bsp.brushes)
lumps[lump_index(cls.BrushSides)] = _write_lump(cls.BrushSides, bsp.brush_sides)
lumps[lump_index(cls.Pop)] = _write_lump(cls.Pop, bsp.pop)
lumps[lump_index(cls.Areas)] = _write_lump(cls.Areas, bsp.areas)
lumps[lump_index(cls.AreaPortals)] = _write_lump(cls.AreaPortals, bsp.area_portals)
end_of_file = file.tell()
# Finalize header
file.seek(0)
cls.Header.write(file, header)
file.seek(end_of_file)
def save(self, file):
"""Writes Bsp data to file
Args:
file: Either the path to the file, or a file-like object, or bytes.
Raises:
RuntimeError: If the file argument is not a file-like object.
"""
should_close = False
if isinstance(file, str):
file = io.open(file, 'r+b')
should_close = True
elif isinstance(file, bytes):
file = io.BytesIO(file)
should_close = True
elif not hasattr(file, 'write'):
raise RuntimeError(
"Bsp.open() requires 'file' to be a path, a file-like object, "
"or bytes")
self._write_file(file, self)
if should_close:
file.close()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
"""Closes the file pointer if possible. If mode is 'w' or 'a', the file
will be written to.
"""
if self.fp:
if self.mode in ('w', 'a') and self._did_modify:
self.fp.seek(0)
self._write_file(self.fp, self)
self.fp.truncate()
file_object = self.fp
self.fp = None
file_object.close()
| [
"[email protected]"
] | |
55a4e8e8c4aa91e9545e39a617b5c10879c37d07 | 33eb4fd807c1a641f52f7124ec7b256ce07612f1 | /test/optimization/test_converters.py | ceb47854904a2313fb0dfe0c2ea5d0555a45b620 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | levbishop/qiskit-aqua | 9ee27da1533cbb9746fe5ff5255533bd9742faa5 | 50e4d935241452bb76296cea6144a9fc452c5e2c | refs/heads/master | 2022-12-04T01:48:18.477406 | 2020-08-11T19:25:03 | 2020-08-11T19:25:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30,877 | py | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test Converters """
import logging
import unittest
from test.optimization.optimization_test_case import QiskitOptimizationTestCase
import numpy as np
from docplex.mp.model import Model
from qiskit.aqua.algorithms import NumPyMinimumEigensolver
from qiskit.aqua.operators import Z, I
from qiskit.optimization import QuadraticProgram, QiskitOptimizationError
from qiskit.optimization.algorithms import MinimumEigenOptimizer, CplexOptimizer, ADMMOptimizer
from qiskit.optimization.algorithms import OptimizationResult
from qiskit.optimization.algorithms.admm_optimizer import ADMMParameters
from qiskit.optimization.algorithms.optimization_algorithm import OptimizationResultStatus
from qiskit.optimization.converters import (InequalityToEquality, IntegerToBinary,
LinearEqualityToPenalty, QuadraticProgramToIsing,
IsingToQuadraticProgram)
from qiskit.optimization.problems import Constraint, Variable
logger = logging.getLogger(__name__)
QUBIT_OP_MAXIMIZE_SAMPLE = (
-199999.5 * (I ^ I ^ I ^ Z)
+ -399999.5 * (I ^ I ^ Z ^ I)
+ -599999.5 * (I ^ Z ^ I ^ I)
+ -799999.5 * (Z ^ I ^ I ^ I)
+ 100000 * (I ^ I ^ Z ^ Z)
+ 150000 * (I ^ Z ^ I ^ Z)
+ 300000 * (I ^ Z ^ Z ^ I)
+ 200000 * (Z ^ I ^ I ^ Z)
+ 400000 * (Z ^ I ^ Z ^ I)
+ 600000 * (Z ^ Z ^ I ^ I)
)
OFFSET_MAXIMIZE_SAMPLE = 1149998
class TestConverters(QiskitOptimizationTestCase):
"""Test Converters"""
def test_empty_problem(self):
""" Test empty problem """
op = QuadraticProgram()
conv = InequalityToEquality()
op = conv.convert(op)
conv = IntegerToBinary()
op = conv.convert(op)
conv = LinearEqualityToPenalty()
op = conv.convert(op)
_, shift = op.to_ising()
self.assertEqual(shift, 0.0)
def test_valid_variable_type(self):
"""Validate the types of the variables for QuadraticProgram.to_ising."""
# Integer variable
with self.assertRaises(QiskitOptimizationError):
op = QuadraticProgram()
op.integer_var(0, 10, "int_var")
_ = op.to_ising()
# Continuous variable
with self.assertRaises(QiskitOptimizationError):
op = QuadraticProgram()
op.continuous_var(0, 10, "continuous_var")
_ = op.to_ising()
def test_inequality_binary(self):
""" Test InequalityToEqualityConverter with binary variables """
op = QuadraticProgram()
for i in range(3):
op.binary_var(name='x{}'.format(i))
# Linear constraints
linear_constraint = {'x0': 1, 'x1': 1}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 1, 'x0x1')
linear_constraint = {'x1': 1, 'x2': -1}
op.linear_constraint(linear_constraint, Constraint.Sense.LE, 2, 'x1x2')
linear_constraint = {'x0': 1, 'x2': 3}
op.linear_constraint(linear_constraint, Constraint.Sense.GE, 2, 'x0x2')
# Quadratic constraints
quadratic = {('x0', 'x1'): 1, ('x1', 'x2'): 2}
op.quadratic_constraint({}, quadratic, Constraint.Sense.LE, 3, 'x0x1_x1x2LE')
quadratic = {('x0', 'x1'): 3, ('x1', 'x2'): 4}
op.quadratic_constraint({}, quadratic, Constraint.Sense.GE, 3, 'x0x1_x1x2GE')
# Convert inequality constraints into equality constraints
conv = InequalityToEquality()
op2 = conv.convert(op)
self.assertListEqual([v.name for v in op2.variables],
['x0', 'x1', 'x2', 'x1x2@int_slack', 'x0x2@int_slack',
'x0x1_x1x2LE@int_slack', 'x0x1_x1x2GE@int_slack'])
# Check names and objective senses
self.assertEqual(op.name, op2.name)
self.assertEqual(op.objective.sense, op2.objective.sense)
# For linear constraints
lst = [
op2.linear_constraints[0].linear.to_dict()[0],
op2.linear_constraints[0].linear.to_dict()[1],
]
self.assertListEqual(lst, [1, 1])
self.assertEqual(op2.linear_constraints[0].sense, Constraint.Sense.EQ)
lst = [
op2.linear_constraints[1].linear.to_dict()[1],
op2.linear_constraints[1].linear.to_dict()[2],
op2.linear_constraints[1].linear.to_dict()[3],
]
self.assertListEqual(lst, [1, -1, 1])
lst = [op2.variables[3].lowerbound, op2.variables[3].upperbound]
self.assertListEqual(lst, [0, 3])
self.assertEqual(op2.linear_constraints[1].sense, Constraint.Sense.EQ)
lst = [
op2.linear_constraints[2].linear.to_dict()[0],
op2.linear_constraints[2].linear.to_dict()[2],
op2.linear_constraints[2].linear.to_dict()[4],
]
self.assertListEqual(lst, [1, 3, -1])
lst = [op2.variables[4].lowerbound, op2.variables[4].upperbound]
self.assertListEqual(lst, [0, 2])
self.assertEqual(op2.linear_constraints[2].sense, Constraint.Sense.EQ)
# For quadratic constraints
lst = [
op2.quadratic_constraints[0].quadratic.to_dict()[(0, 1)],
op2.quadratic_constraints[0].quadratic.to_dict()[(1, 2)],
op2.quadratic_constraints[0].linear.to_dict()[5],
]
self.assertListEqual(lst, [1, 2, 1])
lst = [op2.variables[5].lowerbound, op2.variables[5].upperbound]
self.assertListEqual(lst, [0, 3])
lst = [
op2.quadratic_constraints[1].quadratic.to_dict()[(0, 1)],
op2.quadratic_constraints[1].quadratic.to_dict()[(1, 2)],
op2.quadratic_constraints[1].linear.to_dict()[6],
]
self.assertListEqual(lst, [3, 4, -1])
lst = [op2.variables[6].lowerbound, op2.variables[6].upperbound]
self.assertListEqual(lst, [0, 4])
result = OptimizationResult(x=np.arange(7), fval=0, variables=op2.variables)
new_result = conv.interpret(result)
np.testing.assert_array_almost_equal(new_result.x, np.arange(3))
self.assertListEqual(new_result.variable_names, ['x0', 'x1', 'x2'])
self.assertDictEqual(new_result.variables_dict, {'x0': 0, 'x1': 1, 'x2': 2})
def test_inequality_integer(self):
""" Test InequalityToEqualityConverter with integer variables """
op = QuadraticProgram()
for i in range(3):
op.integer_var(name='x{}'.format(i), lowerbound=-3, upperbound=3)
# Linear constraints
linear_constraint = {'x0': 1, 'x1': 1}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 1, 'x0x1')
linear_constraint = {'x1': 1, 'x2': -1}
op.linear_constraint(linear_constraint, Constraint.Sense.LE, 2, 'x1x2')
linear_constraint = {'x0': 1, 'x2': 3}
op.linear_constraint(linear_constraint, Constraint.Sense.GE, 2, 'x0x2')
# Quadratic constraints
quadratic = {('x0', 'x1'): 1, ('x1', 'x2'): 2}
op.quadratic_constraint({}, quadratic, Constraint.Sense.LE, 3, 'x0x1_x1x2LE')
quadratic = {('x0', 'x1'): 3, ('x1', 'x2'): 4}
op.quadratic_constraint({}, quadratic, Constraint.Sense.GE, 3, 'x0x1_x1x2GE')
conv = InequalityToEquality()
op2 = conv.convert(op)
self.assertListEqual([v.name for v in op2.variables],
['x0', 'x1', 'x2', 'x1x2@int_slack', 'x0x2@int_slack',
'x0x1_x1x2LE@int_slack', 'x0x1_x1x2GE@int_slack'])
# For linear constraints
lst = [
op2.linear_constraints[0].linear.to_dict()[0],
op2.linear_constraints[0].linear.to_dict()[1],
]
self.assertListEqual(lst, [1, 1])
self.assertEqual(op2.linear_constraints[0].sense, Constraint.Sense.EQ)
lst = [
op2.linear_constraints[1].linear.to_dict()[1],
op2.linear_constraints[1].linear.to_dict()[2],
op2.linear_constraints[1].linear.to_dict()[3],
]
self.assertListEqual(lst, [1, -1, 1])
lst = [op2.variables[3].lowerbound, op2.variables[3].upperbound]
self.assertListEqual(lst, [0, 8])
self.assertEqual(op2.linear_constraints[1].sense, Constraint.Sense.EQ)
lst = [
op2.linear_constraints[2].linear.to_dict()[0],
op2.linear_constraints[2].linear.to_dict()[2],
op2.linear_constraints[2].linear.to_dict()[4],
]
self.assertListEqual(lst, [1, 3, -1])
lst = [op2.variables[4].lowerbound, op2.variables[4].upperbound]
self.assertListEqual(lst, [0, 10])
self.assertEqual(op2.linear_constraints[2].sense, Constraint.Sense.EQ)
# For quadratic constraints
lst = [
op2.quadratic_constraints[0].quadratic.to_dict()[(0, 1)],
op2.quadratic_constraints[0].quadratic.to_dict()[(1, 2)],
op2.quadratic_constraints[0].linear.to_dict()[5],
]
self.assertListEqual(lst, [1, 2, 1])
lst = [op2.variables[5].lowerbound, op2.variables[5].upperbound]
self.assertListEqual(lst, [0, 30])
lst = [
op2.quadratic_constraints[1].quadratic.to_dict()[(0, 1)],
op2.quadratic_constraints[1].quadratic.to_dict()[(1, 2)],
op2.quadratic_constraints[1].linear.to_dict()[6],
]
self.assertListEqual(lst, [3, 4, -1])
lst = [op2.variables[6].lowerbound, op2.variables[6].upperbound]
self.assertListEqual(lst, [0, 60])
result = OptimizationResult(x=np.arange(7), fval=0, variables=op2.variables)
new_result = conv.interpret(result)
np.testing.assert_array_almost_equal(new_result.x, np.arange(3))
self.assertListEqual(new_result.variable_names, ['x0', 'x1', 'x2'])
self.assertDictEqual(new_result.variables_dict, {'x0': 0, 'x1': 1, 'x2': 2})
def test_inequality_mode_integer(self):
""" Test integer mode of InequalityToEqualityConverter() """
op = QuadraticProgram()
for i in range(3):
op.binary_var(name='x{}'.format(i))
# Linear constraints
linear_constraint = {'x0': 1, 'x1': 1}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 1, 'x0x1')
linear_constraint = {'x1': 1, 'x2': -1}
op.linear_constraint(linear_constraint, Constraint.Sense.LE, 2, 'x1x2')
linear_constraint = {'x0': 1, 'x2': 3}
op.linear_constraint(linear_constraint, Constraint.Sense.GE, 2, 'x0x2')
conv = InequalityToEquality(mode='integer')
op2 = conv.convert(op)
lst = [op2.variables[3].vartype, op2.variables[4].vartype]
self.assertListEqual(lst, [Variable.Type.INTEGER, Variable.Type.INTEGER])
def test_inequality_mode_continuous(self):
""" Test continuous mode of InequalityToEqualityConverter() """
op = QuadraticProgram()
for i in range(3):
op.binary_var(name='x{}'.format(i))
# Linear constraints
linear_constraint = {'x0': 1, 'x1': 1}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 1, 'x0x1')
linear_constraint = {'x1': 1, 'x2': -1}
op.linear_constraint(linear_constraint, Constraint.Sense.LE, 2, 'x1x2')
linear_constraint = {'x0': 1, 'x2': 3}
op.linear_constraint(linear_constraint, Constraint.Sense.GE, 2, 'x0x2')
conv = InequalityToEquality(mode='continuous')
op2 = conv.convert(op)
lst = [op2.variables[3].vartype, op2.variables[4].vartype]
self.assertListEqual(lst, [Variable.Type.CONTINUOUS, Variable.Type.CONTINUOUS])
def test_inequality_mode_auto(self):
""" Test auto mode of InequalityToEqualityConverter() """
op = QuadraticProgram()
for i in range(3):
op.binary_var(name='x{}'.format(i))
# Linear constraints
linear_constraint = {'x0': 1, 'x1': 1}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 1, 'x0x1')
linear_constraint = {'x1': 1, 'x2': -1}
op.linear_constraint(linear_constraint, Constraint.Sense.LE, 2, 'x1x2')
linear_constraint = {'x0': 1.1, 'x2': 2.2}
op.linear_constraint(linear_constraint, Constraint.Sense.GE, 3.3, 'x0x2')
conv = InequalityToEquality(mode='auto')
op2 = conv.convert(op)
lst = [op2.variables[3].vartype, op2.variables[4].vartype]
self.assertListEqual(lst, [Variable.Type.INTEGER, Variable.Type.CONTINUOUS])
def test_penalize_sense(self):
""" Test PenalizeLinearEqualityConstraints with senses """
op = QuadraticProgram()
for i in range(3):
op.binary_var(name='x{}'.format(i))
# Linear constraints
linear_constraint = {'x0': 1, 'x1': 1}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 1, 'x0x1')
linear_constraint = {'x1': 1, 'x2': -1}
op.linear_constraint(linear_constraint, Constraint.Sense.LE, 2, 'x1x2')
linear_constraint = {'x0': 1, 'x2': 3}
op.linear_constraint(linear_constraint, Constraint.Sense.GE, 2, 'x0x2')
self.assertEqual(op.get_num_linear_constraints(), 3)
conv = LinearEqualityToPenalty()
with self.assertRaises(QiskitOptimizationError):
conv.convert(op)
def test_penalize_binary(self):
""" Test PenalizeLinearEqualityConstraints with binary variables """
op = QuadraticProgram()
for i in range(3):
op.binary_var(name='x{}'.format(i))
# Linear constraints
linear_constraint = {'x0': 1, 'x1': 1}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 1, 'x0x1')
linear_constraint = {'x1': 1, 'x2': -1}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 2, 'x1x2')
linear_constraint = {'x0': 1, 'x2': 3}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 2, 'x0x2')
self.assertEqual(op.get_num_linear_constraints(), 3)
conv = LinearEqualityToPenalty()
op2 = conv.convert(op)
self.assertEqual(op2.get_num_linear_constraints(), 0)
result = OptimizationResult(x=np.arange(3), fval=0, variables=op2.variables)
new_result = conv.interpret(result)
self.assertEqual(new_result.status, OptimizationResultStatus.INFEASIBLE)
np.testing.assert_array_almost_equal(new_result.x, np.arange(3))
self.assertListEqual(result.variable_names, ['x0', 'x1', 'x2'])
self.assertDictEqual(result.variables_dict, {'x0': 0, 'x1': 1, 'x2': 2})
def test_penalize_integer(self):
""" Test PenalizeLinearEqualityConstraints with integer variables """
op = QuadraticProgram()
for i in range(3):
op.integer_var(name='x{}'.format(i), lowerbound=-3, upperbound=3)
# Linear constraints
linear_constraint = {'x0': 1, 'x1': 1}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 1, 'x0x1')
linear_constraint = {'x1': 1, 'x2': -1}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 2, 'x1x2')
linear_constraint = {'x0': 1, 'x2': -1}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 1, 'x0x2')
op.minimize(constant=3, linear={'x0': 1}, quadratic={('x1', 'x2'): 2})
self.assertEqual(op.get_num_linear_constraints(), 3)
conv = LinearEqualityToPenalty()
op2 = conv.convert(op)
self.assertEqual(op2.get_num_linear_constraints(), 0)
result = OptimizationResult(x=[0, 1, -1], fval=1, variables=op2.variables)
new_result = conv.interpret(result)
self.assertAlmostEqual(new_result.fval, 1)
self.assertEqual(new_result.status, OptimizationResultStatus.SUCCESS)
np.testing.assert_array_almost_equal(new_result.x, [0, 1, -1])
self.assertListEqual(result.variable_names, ['x0', 'x1', 'x2'])
self.assertDictEqual(result.variables_dict, {'x0': 0, 'x1': 1, 'x2': -1})
def test_integer_to_binary(self):
""" Test integer to binary """
op = QuadraticProgram()
for i in range(0, 2):
op.binary_var(name='x{}'.format(i))
op.integer_var(name='x2', lowerbound=0, upperbound=5)
linear = {}
for i, x in enumerate(op.variables):
linear[x.name] = i + 1
op.maximize(0, linear, {})
conv = IntegerToBinary()
op2 = conv.convert(op)
self.assertEqual(op2.get_num_vars(), 5)
self.assertListEqual([x.vartype for x in op2.variables], [Variable.Type.BINARY] * 5)
self.assertListEqual([x.name for x in op2.variables], ['x0', 'x1', 'x2@0', 'x2@1', 'x2@2'])
dct = op2.objective.linear.to_dict()
self.assertEqual(dct[2], 3)
self.assertEqual(dct[3], 6)
self.assertEqual(dct[4], 6)
def test_binary_to_integer(self):
""" Test binary to integer """
op = QuadraticProgram()
for i in range(0, 2):
op.binary_var(name='x{}'.format(i))
op.integer_var(name='x2', lowerbound=0, upperbound=5)
linear = {'x0': 1, 'x1': 2, 'x2': 1}
op.maximize(0, linear, {})
linear = {}
for x in op.variables:
linear[x.name] = 1
op.linear_constraint(linear, Constraint.Sense.EQ, 6, 'x0x1x2')
conv = IntegerToBinary()
op2 = conv.convert(op)
result = OptimizationResult(x=[0, 1, 1, 1, 1], fval=17, variables=op2.variables)
new_result = conv.interpret(result)
np.testing.assert_array_almost_equal(new_result.x, [0, 1, 5])
self.assertEqual(new_result.fval, 17)
self.assertListEqual(new_result.variable_names, ['x0', 'x1', 'x2'])
self.assertDictEqual(new_result.variables_dict, {'x0': 0, 'x1': 1, 'x2': 5})
def test_optimizationproblem_to_ising(self):
""" Test optimization problem to operators"""
op = QuadraticProgram()
for i in range(4):
op.binary_var(name='x{}'.format(i))
linear = {}
for x in op.variables:
linear[x.name] = 1
op.maximize(0, linear, {})
linear = {}
for i, x in enumerate(op.variables):
linear[x.name] = i + 1
op.linear_constraint(linear, Constraint.Sense.EQ, 3, 'sum1')
penalize = LinearEqualityToPenalty(penalty=1e5)
op2 = penalize.convert(op)
qubitop, offset = op2.to_ising()
self.assertEqual(qubitop, QUBIT_OP_MAXIMIZE_SAMPLE)
self.assertEqual(offset, OFFSET_MAXIMIZE_SAMPLE)
def test_ising_to_quadraticprogram_linear(self):
""" Test optimization problem to operators with linear=True"""
op = QUBIT_OP_MAXIMIZE_SAMPLE
offset = OFFSET_MAXIMIZE_SAMPLE
quadratic = QuadraticProgram()
quadratic.from_ising(op, offset, linear=True)
self.assertEqual(quadratic.get_num_vars(), 4)
self.assertEqual(quadratic.get_num_linear_constraints(), 0)
self.assertEqual(quadratic.get_num_quadratic_constraints(), 0)
self.assertEqual(quadratic.objective.sense, quadratic.objective.Sense.MINIMIZE)
self.assertAlmostEqual(quadratic.objective.constant, 900000)
linear_matrix = np.zeros((1, 4))
linear_matrix[0, 0] = -500001
linear_matrix[0, 1] = -800001
linear_matrix[0, 2] = -900001
linear_matrix[0, 3] = -800001
quadratic_matrix = np.zeros((4, 4))
quadratic_matrix[0, 1] = 400000
quadratic_matrix[0, 2] = 600000
quadratic_matrix[1, 2] = 1200000
quadratic_matrix[0, 3] = 800000
quadratic_matrix[1, 3] = 1600000
quadratic_matrix[2, 3] = 2400000
np.testing.assert_array_almost_equal(
quadratic.objective.linear.coefficients.toarray(), linear_matrix
)
np.testing.assert_array_almost_equal(
quadratic.objective.quadratic.coefficients.toarray(), quadratic_matrix
)
def test_ising_to_quadraticprogram_quadratic(self):
""" Test optimization problem to operators with linear=False"""
op = QUBIT_OP_MAXIMIZE_SAMPLE
offset = OFFSET_MAXIMIZE_SAMPLE
quadratic = QuadraticProgram()
quadratic.from_ising(op, offset, linear=False)
self.assertEqual(quadratic.get_num_vars(), 4)
self.assertEqual(quadratic.get_num_linear_constraints(), 0)
self.assertEqual(quadratic.get_num_quadratic_constraints(), 0)
self.assertEqual(quadratic.objective.sense, quadratic.objective.Sense.MINIMIZE)
self.assertAlmostEqual(quadratic.objective.constant, 900000)
quadratic_matrix = np.zeros((4, 4))
quadratic_matrix[0, 0] = -500001
quadratic_matrix[0, 1] = 400000
quadratic_matrix[0, 2] = 600000
quadratic_matrix[0, 3] = 800000
quadratic_matrix[1, 1] = -800001
quadratic_matrix[1, 2] = 1200000
quadratic_matrix[1, 3] = 1600000
quadratic_matrix[2, 2] = -900001
quadratic_matrix[2, 3] = 2400000
quadratic_matrix[3, 3] = -800001
np.testing.assert_array_almost_equal(
quadratic.objective.quadratic.coefficients.toarray(), quadratic_matrix
)
def test_continuous_variable_decode(self):
""" Test decode func of IntegerToBinaryConverter for continuous variables"""
try:
mdl = Model('test_continuous_varable_decode')
c = mdl.continuous_var(lb=0, ub=10.9, name='c')
x = mdl.binary_var(name='x')
mdl.maximize(c + x * x)
op = QuadraticProgram()
op.from_docplex(mdl)
converter = IntegerToBinary()
op = converter.convert(op)
admm_params = ADMMParameters()
qubo_optimizer = MinimumEigenOptimizer(NumPyMinimumEigensolver())
continuous_optimizer = CplexOptimizer()
solver = ADMMOptimizer(
qubo_optimizer=qubo_optimizer,
continuous_optimizer=continuous_optimizer,
params=admm_params,
)
result = solver.solve(op)
result = converter.interpret(result)
self.assertEqual(result.x[0], 10.9)
self.assertListEqual(result.variable_names, ['c', 'x'])
self.assertDictEqual(result.variables_dict, {'c': 10.9, 'x': 0})
except NameError as ex:
self.skipTest(str(ex))
def test_auto_penalty(self):
""" Test auto penalty function"""
op = QuadraticProgram()
op.binary_var('x')
op.binary_var('y')
op.binary_var('z')
op.minimize(constant=3, linear={'x': 1}, quadratic={('x', 'y'): 2})
op.linear_constraint(linear={'x': 1, 'y': 1, 'z': 1}, sense='EQ', rhs=2, name='xyz_eq')
lineq2penalty = LinearEqualityToPenalty(penalty=1e5)
lineq2penalty_auto = LinearEqualityToPenalty()
qubo = lineq2penalty.convert(op)
qubo_auto = lineq2penalty_auto.convert(op)
exact_mes = NumPyMinimumEigensolver()
exact = MinimumEigenOptimizer(exact_mes)
result = exact.solve(qubo)
result_auto = exact.solve(qubo_auto)
self.assertEqual(result.fval, result_auto.fval)
np.testing.assert_array_almost_equal(result.x, result_auto.x)
def test_auto_penalty_warning(self):
""" Test warnings of auto penalty function"""
op = QuadraticProgram()
op.binary_var('x')
op.binary_var('y')
op.binary_var('z')
op.minimize(linear={'x': 1, 'y': 2})
op.linear_constraint(linear={'x': 0.5, 'y': 0.5, 'z': 0.5}, sense='EQ', rhs=1, name='xyz')
with self.assertLogs('qiskit.optimization', level='WARNING') as log:
lineq2penalty = LinearEqualityToPenalty()
_ = lineq2penalty.convert(op)
warning = (
'WARNING:qiskit.optimization.converters.linear_equality_to_penalty:'
'Warning: Using 100000.000000 for the penalty coefficient because a float '
'coefficient exists in constraints. \nThe value could be too small. If so, '
'set the penalty coefficient manually.'
)
self.assertIn(warning, log.output)
def test_linear_equality_to_penalty_decode(self):
""" Test decode func of LinearEqualityToPenalty"""
qprog = QuadraticProgram()
qprog.binary_var('x')
qprog.binary_var('y')
qprog.binary_var('z')
qprog.maximize(linear={'x': 3, 'y': 1, 'z': 1})
qprog.linear_constraint(linear={'x': 1, 'y': 1, 'z': 1}, sense='EQ', rhs=2, name='xyz_eq')
lineq2penalty = LinearEqualityToPenalty()
qubo = lineq2penalty.convert(qprog)
exact_mes = NumPyMinimumEigensolver()
exact = MinimumEigenOptimizer(exact_mes)
result = exact.solve(qubo)
decoded_result = lineq2penalty.interpret(result)
self.assertEqual(decoded_result.fval, 4)
np.testing.assert_array_almost_equal(decoded_result.x, [1, 1, 0])
self.assertEqual(decoded_result.status, OptimizationResultStatus.SUCCESS)
self.assertListEqual(decoded_result.variable_names, ['x', 'y', 'z'])
self.assertDictEqual(decoded_result.variables_dict, {'x': 1.0, 'y': 1.0, 'z': 0.0})
infeasible_result = OptimizationResult(x=[1, 1, 1], fval=0, variables=qprog.variables)
decoded_infeasible_result = lineq2penalty.interpret(infeasible_result)
self.assertEqual(decoded_infeasible_result.fval, 5)
np.testing.assert_array_almost_equal(decoded_infeasible_result.x, [1, 1, 1])
self.assertEqual(decoded_infeasible_result.status, OptimizationResultStatus.INFEASIBLE)
self.assertListEqual(infeasible_result.variable_names, ['x', 'y', 'z'])
self.assertDictEqual(infeasible_result.variables_dict, {'x': 1.0, 'y': 1.0, 'z': 1.0})
def test_empty_problem_deprecated(self):
""" Test empty problem """
op = QuadraticProgram()
conv = InequalityToEquality()
op = conv.encode(op)
conv = IntegerToBinary()
op = conv.encode(op)
conv = LinearEqualityToPenalty()
op = conv.encode(op)
conv = QuadraticProgramToIsing()
_, shift = conv.encode(op)
self.assertEqual(shift, 0.0)
def test_valid_variable_type_deprecated(self):
"""Validate the types of the variables for QuadraticProgramToIsing."""
# Integer variable
with self.assertRaises(QiskitOptimizationError):
op = QuadraticProgram()
op.integer_var(0, 10, "int_var")
conv = QuadraticProgramToIsing()
_ = conv.encode(op)
# Continuous variable
with self.assertRaises(QiskitOptimizationError):
op = QuadraticProgram()
op.continuous_var(0, 10, "continuous_var")
conv = QuadraticProgramToIsing()
_ = conv.encode(op)
def test_optimizationproblem_to_ising_deprecated(self):
""" Test optimization problem to operators"""
op = QuadraticProgram()
for i in range(4):
op.binary_var(name='x{}'.format(i))
linear = {}
for x in op.variables:
linear[x.name] = 1
op.maximize(0, linear, {})
linear = {}
for i, x in enumerate(op.variables):
linear[x.name] = i + 1
op.linear_constraint(linear, Constraint.Sense.EQ, 3, 'sum1')
penalize = LinearEqualityToPenalty(penalty=1e5)
op2ope = QuadraticProgramToIsing()
op2 = penalize.encode(op)
qubitop, offset = op2ope.encode(op2)
self.assertEqual(qubitop, QUBIT_OP_MAXIMIZE_SAMPLE)
self.assertEqual(offset, OFFSET_MAXIMIZE_SAMPLE)
def test_ising_to_quadraticprogram_linear_deprecated(self):
""" Test optimization problem to operators with linear=True"""
op = QUBIT_OP_MAXIMIZE_SAMPLE
offset = OFFSET_MAXIMIZE_SAMPLE
op2qp = IsingToQuadraticProgram(linear=True)
quadratic = op2qp.encode(op, offset)
self.assertEqual(len(quadratic.variables), 4)
self.assertEqual(len(quadratic.linear_constraints), 0)
self.assertEqual(len(quadratic.quadratic_constraints), 0)
self.assertEqual(quadratic.objective.sense, quadratic.objective.Sense.MINIMIZE)
self.assertAlmostEqual(quadratic.objective.constant, 900000)
linear_matrix = np.zeros((1, 4))
linear_matrix[0, 0] = -500001
linear_matrix[0, 1] = -800001
linear_matrix[0, 2] = -900001
linear_matrix[0, 3] = -800001
quadratic_matrix = np.zeros((4, 4))
quadratic_matrix[0, 1] = 400000
quadratic_matrix[0, 2] = 600000
quadratic_matrix[1, 2] = 1200000
quadratic_matrix[0, 3] = 800000
quadratic_matrix[1, 3] = 1600000
quadratic_matrix[2, 3] = 2400000
np.testing.assert_array_almost_equal(
quadratic.objective.linear.coefficients.toarray(), linear_matrix
)
np.testing.assert_array_almost_equal(
quadratic.objective.quadratic.coefficients.toarray(), quadratic_matrix
)
def test_ising_to_quadraticprogram_quadratic_deprecated(self):
""" Test optimization problem to operators with linear=False"""
op = QUBIT_OP_MAXIMIZE_SAMPLE
offset = OFFSET_MAXIMIZE_SAMPLE
op2qp = IsingToQuadraticProgram(linear=False)
quadratic = op2qp.encode(op, offset)
self.assertEqual(len(quadratic.variables), 4)
self.assertEqual(len(quadratic.linear_constraints), 0)
self.assertEqual(len(quadratic.quadratic_constraints), 0)
self.assertEqual(quadratic.objective.sense, quadratic.objective.Sense.MINIMIZE)
self.assertAlmostEqual(quadratic.objective.constant, 900000)
quadratic_matrix = np.zeros((4, 4))
quadratic_matrix[0, 0] = -500001
quadratic_matrix[0, 1] = 400000
quadratic_matrix[0, 2] = 600000
quadratic_matrix[0, 3] = 800000
quadratic_matrix[1, 1] = -800001
quadratic_matrix[1, 2] = 1200000
quadratic_matrix[1, 3] = 1600000
quadratic_matrix[2, 2] = -900001
quadratic_matrix[2, 3] = 2400000
quadratic_matrix[3, 3] = -800001
np.testing.assert_array_almost_equal(
quadratic.objective.quadratic.coefficients.toarray(), quadratic_matrix
)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
393fd65ff6acb3f16a103c6786941faa450a1e7f | 293720158f0836140cb7a417980f04272445a787 | /day22.py | d04ac87c99c95d0c09412aa66fba62fcc4f5958f | [] | no_license | Chicken-Bones/AdventOfCode2020 | cc01c7136a1b19fbbcbb367dd7a3d4dc8f82466a | 62c82002a9f99c960407e70552028fce2755e666 | refs/heads/master | 2023-02-06T07:20:11.493262 | 2020-12-24T06:20:57 | 2020-12-24T06:20:57 | 318,743,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,503 | py | import re
if __name__ == "__main__":
with open("input/day22.txt") as file:
p1, p2 = file.read().split("\n\n")
p1 = [int(s) for s in p1.strip().split("\n")[1:]]
p2 = [int(s) for s in p2.strip().split("\n")[1:]]
while p1 and p2:
a, b = p1[0], p2[0]
if a > b:
p1 += [a, b]
else:
p2 += [b, a]
p1 = p1[1:]
p2 = p2[1:]
win = p1 if p1 else p2
print(sum((i+1)*c for i, c in enumerate(win[::-1])))
with open("input/day22.txt") as file:
p1, p2 = file.read().split("\n\n")
p1 = [int(s) for s in p1.strip().split("\n")[1:]]
p2 = [int(s) for s in p2.strip().split("\n")[1:]]
def play(decks):
seen = set()
while all(decks):
key = tuple(tuple(d) for d in decks)
if key in seen:
return 0, None
seen.add(key)
cards = [d[0] for d in decks]
decks = [d[1:] for d in decks]
if all(c < len(d) for c, d in zip(cards, decks)):
win, _ = play([d[:c] for c, d in zip(cards, decks)])
else:
win = 0 if cards[0] > cards[1] else 1
decks[win] += [cards[win], cards[win^1]]
return next((i, d) for i, d in enumerate(decks) if d)
_, deck = play([p1, p2])
print(sum((i+1)*c for i, c in enumerate(deck[::-1])))
| [
"[email protected]"
] | |
6447b4421e3a2256c272226eb874c95411fda479 | 8dffff5ff7f2645a50fd9846198e12e4c96a91da | /18-letter-count.py | ab86a66578554a66e0cb43fd008cdfbc21744bb6 | [] | no_license | akshaypawar2508/Coderbyte-pythonSol | b233c5ee0c34e0413a26b24b423dae45342b9ade | 5c7d2028fe09fd02aad7808f88abc40fdea0f81e | refs/heads/master | 2022-01-03T09:44:18.635060 | 2014-07-31T13:32:08 | 2014-07-31T13:32:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | def LetterCountI(str):
for word in str.split():
for i in range(len(word)):
if word[i] in word[i+1:]:
return word
return -1
# keep this function call here
# to see how to enter arguments in Python scroll down
print LetterCountI(raw_input())
| [
"[email protected]"
] | |
7bfaa576b5ff7ed6b4637b78badfbcb70b512d1f | 78edacedb86c5ed0570849839825be35f394a954 | /python/DS_Academy/DS_Academy_Num01.py | c94ec9694164c91405d7f5e8969fb944653f21a4 | [] | no_license | jihunsuk/ACMICPC | c0540a6f77d2d8ce835ba3c67d63e0c81b283e48 | 66849944be19b8614f9708e81e4674c26f3b94e6 | refs/heads/master | 2020-03-15T04:34:02.382779 | 2019-01-08T12:19:05 | 2019-01-08T12:19:05 | 131,968,665 | 0 | 0 | null | 2019-01-08T12:19:06 | 2018-05-03T08:57:53 | Java | UTF-8 | Python | false | false | 2,657 | py |
def bfs(list, start_Node, End_Node):
if start_Node == End_Node:
return True
else:
index = list.__len__()
Edge = [[0] * 100 for i in range(100)]
for i in range(0, list.__len__(), 2):
Edge[list[i]][list[i + 1]] = 1
visited = [start_Node]
Queue = [start_Node]
while Queue.__len__() is not 0:
node = Queue.pop(0)
for i in range(1, list.__len__(), 2):
if list[i] in visited or Edge[node][list[i]] == 0:
continue
visited.append(list[i])
Queue.append(list[i])
if list[i] == End_Node:
return True
return False
def is_tree(tree_candidate):
check_tree = []
for i in range(0, tree_candidate.__len__()):
root_Node = 0
isTree = True
if tree_candidate[i].__len__() == 0:
check_tree.append([root_Node, True])
elif tree_candidate[i].__len__() % 2 == 1:
check_tree.append([root_Node, False])
else:
NodeCount = {}
anslist = []
for j in range(1, tree_candidate[i].__len__(), 2):
if tree_candidate[i][j] in NodeCount.keys():
NodeCount[tree_candidate[i][j]] += 1
else:
NodeCount[tree_candidate[i][j]] = 1
for j in range(0, tree_candidate[i].__len__(), 2):
if NodeCount[tree_candidate[i][j+1]] >= 2:
isTree = False
if tree_candidate[i][j] not in NodeCount.keys():
if root_Node == 0:
root_Node = tree_candidate[i][j]
elif root_Node != tree_candidate[i][j]:
isTree = False
if isTree is True:
for j in NodeCount.keys():
isTree = bfs(tree_candidate[i], root_Node, j)
if isTree is False:
anslist.append(0)
else:
anslist.append(root_Node)
anslist.append(isTree)
check_tree.append(anslist)
return check_tree
# 아래 부분은 함수의 호출과 출력을 위한 부분입니다. 수정하지 마세요
def read_inputs():
tree_candidate = []
while True:
a = input()
if a == '-1':
break
else:
candidate = list(int(x) for x in a.split())
tree_candidate.append(candidate)
return tree_candidate
def main():
tree_candidate = read_inputs()
ans = is_tree(tree_candidate)
print(ans)
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
dce48e066e8527b6a473d65b0a855e2f56d76723 | c70c8bded973de280235a86290684eac8ecf296a | /cgi-bin/rsp_survey.py | 6efe2112b2e554526c36dc3d911ede77e6dc2a52 | [] | no_license | LewB/Surveyor | 85fe9233258dea4304ba939f30dd6b21df88ca04 | bf8d390922198e223b86eabce46f86afbea5fe14 | refs/heads/master | 2021-01-13T02:16:58.772874 | 2015-06-08T23:36:07 | 2015-06-08T23:36:07 | 32,690,111 | 0 | 3 | null | 2015-04-01T16:45:31 | 2015-03-22T18:57:07 | JavaScript | UTF-8 | Python | false | false | 11,880 | py | #!/usr/bin/env python
""" This Program returns the Data for the Header and Body Elements of a
Dynamically Generated HTML Survey Called by index.html"""
# -*- coding: UTF-8 -*-
import os
# use the cgi library
import cgi
# enable debugging
#import cgitb
#cgitb.enable()
# use JSON encoding
import json
# use python sqlite3 database
import sqlite3
#from _sqlite3 import Row
#Define Main Function
def main():
fs = cgi.FieldStorage()
#svCode = fs.getvalue("SURVEY")
svPart = fs.getvalue("SVPART")
svMode = fs.getvalue("SVMODE")
svData = fs.getvalue("SVDATA")
if svPart == None or svMode == None or svData == None:
print "Content-Type: text/plain"
print
print "Invalid Parameter Passed to rsp_survey."
return
#svCode = "SVR0003"
#svPart = "HEADER"
#svMode = "UPD"
#svData = '[{"SH_CODE":"SVR0003","SH_OWNER":"Bond","SH_STATUS":"ready","SH_TYPE":"default","SH_NAME":"RT1","SH_DESC":"Raw Test One","SH_SKIN":"default"}]'
dbg = "OK"
try:
# connect to the database
dbc = sqlite3.connect("data/rsp-survey.db")
dbc.row_factory = sqlite3.Row
# Unpack DATA into JSON
jrows = json.loads(svData)
# ****** DEL MODE *********
if svMode == "DEL":
# ID Should Exist - go ahead and try to delete it
if svPart == "HEADER":
try:
dbc.execute("DELETE FROM SURVEY_HDR WHERE ROWID=" + jrows[0]['SH_ROWID'] + ";")
csr = dbc.cursor()
csr.execute("SELECT ROWID FROM SURVEY_BDY WHERE ROWID=" + jrows[0]['SH_ROWID'] + ";")
# Delete only if records found
chkrow = csr.fetchone()
if chkrow != None:
dbc.execute("DELETE FROM SURVEY_BDY WHERE SB_HDR=" + jrows[0]['SH_ROWID'] + ";")
dbc.commit()
except (sqlite3.Error):
dbg = "Failed to DELETE Survey Header Or Detail For: " + jrows[0]['SH_CODE']
raise
else:
# Remove Survey Database Too - If it Exists
dbPath = "data/" + jrows[0]['SH_CODE'] + ".db"
try:
if os.access(dbPath, os.W_OK) == True:
os.remove(dbPath)
except OSError, e:
dbg = "Error Removing DB File: " + dbPath
dbg += "\nOSerr: " + e.args[1]
elif svPart == "BODY":
try:
dbc.execute("DELETE FROM SURVEY_BDY WHERE ROWID=" + jrows[0]['SB_ROWID'] + ";")
dbc.commit()
except (sqlite3.Error):
dbg = "Failed to DELETE Survey Detail For: " + jrows[0]['SH_CODE']
raise
else:
# ****** ADD OR CHANGE MODE *************************
csr = dbc.cursor()
svHDR = None
for row in jrows:
if svPart == "BODY" and svHDR != row['SB_HDR']:
# Delete Body Detail Records to Avoid Duplicate Conflicts in SB_HDR/SB_SEQ Key
try:
dbc.execute("DELETE FROM SURVEY_BDY WHERE SB_HDR=" + row['SB_HDR'] + ";")
dbc.commit()
except (sqlite3.Error):
dbg = "Failed to DELETE Survey Detail For SB_HDR: " + row['SH_HDR']
raise
# Set svHDR to Current Value
svHDR = row['SB_HDR']
if svPart == "HEADER":
# execute SQL SELECT on CGI values
csr.execute("SELECT ROWID FROM SURVEY_HDR WHERE ROWID=" + row['SH_ROWID'] + ";")
# get first DB table row from cursor after select
chkrow = csr.fetchone()
if chkrow == None:
# No record Exists - go ahead and try to ADD it
try:
dbc.execute("INSERT INTO SURVEY_HDR \
(SH_CODE, SH_OWNER, SH_STATUS, SH_TYPE, \
SH_NAME, SH_DESC, SH_SKIN) \
VALUES ('" + row['SH_CODE'] + "', '" \
+ row['SH_OWNER'] + "', '" \
+ row['SH_STATUS'] + "', '" \
+ row['SH_TYPE'] + "', '" \
+ row['SH_NAME'] + "', '" \
+ row['SH_DESC'] + "', '" \
+ row['SH_SKIN'] + "');")
dbc.commit()
# Perform a Data Load to Establish ROWID after fresh Add
except (sqlite3.Error):
# check for hard coded initial values to create and access database table
try:
dbc.execute('''CREATE TABLE IF NOT EXISTS 'SURVEY_HDR'
('SH_CODE' TEXT PRIMARY KEY NOT NULL UNIQUE,
'SH_OWNER' TEXT NOT NULL,
'SH_STATUS' TEXT,
'SH_TYPE' TEXT,
'SH_NAME' TEXT,
'SH_DESC' TEXT,
'SH_SKIN' TEXT);''')
# Also assume body detail table does not exist.
dbc.execute('''CREATE TABLE IF NOT EXISTS SURVEY_BDY
`SB_HDR` INTEGER NOT NULL,
`SB_SEQ` INTEGER NOT NULL,
`SB_TYPE` TEXT NOT NULL DEFAULT 'default',
`SB_TITLE` TEXT NOT NULL,
`SB_DESC` TEXT,
`SB_LABEL` TEXT,
`SB_MIN` INTEGER DEFAULT 1,
`SB_MAX` INTEGER DEFAULT 5,
`SB_BTN_1` TEXT DEFAULT 'Submit',
`SB_BTN_2` TEXT,
`SB_BTN_3` TEXT,
PRIMARY KEY(SB_HDR,SB_SEQ);''')
dbc.execute("INSERT INTO SURVEY_HDR \
(SH_CODE, SH_OWNER, SH_STATUS, SH_TYPE, \
SH_NAME, SH_DESC, SH_SKIN) \
VALUES ('" + row['SH_CODE'] + "', '" \
+ row['SH_OWNER'] + "', '" \
+ row['SH_STATUS'] + "', '" \
+ row['SH_TYPE'] + "', '" \
+ row['SH_NAME'] + "', '" \
+ row['SH_DESC'] + "', '" \
+ row['SH_SKIN'] + "');")
dbc.commit()
except (sqlite3.Error):
dbg = "ERR:Failed to Create Initial DB Tables: " + e.args[0]
raise
else:
# Have a record match so Change it.
try:
dbc.execute("UPDATE SURVEY_HDR SET " \
+ "SH_CODE='" + row['SH_CODE'] + "'," \
+ "SH_OWNER='" + row['SH_OWNER'] + "'," \
+ "SH_STATUS='" + row['SH_STATUS'] + "'," \
+ "SH_TYPE='" + row['SH_TYPE'] + "'," \
+ "SH_NAME='" + row['SH_NAME'] + "'," \
+ "SH_DESC='" + row['SH_DESC'] + "'," \
+ "SH_SKIN='" + row['SH_SKIN'] + "' " \
+ "WHERE ROWID=" + row['SH_ROWID'] + ";")
dbc.commit()
except (sqlite3.Error):
dbg = "Failed to CHANGE Survey: " + row['SH_CODE'] + " in DB SURVEY_HDR Table."
raise
elif svPart == "BODY":
#csr.execute("SELECT SB_HDR, SB_SEQ FROM SURVEY_BDY WHERE (ROWID=" + row['SB_ROWID'] + ");")
# get first DB table row from cursor after select
#chkrow = csr.fetchone()
#if chkrow == None:
# No record Exists - go ahead and try to ADD it
try:
# All Records for that Header were Deleted, So Insert All New Records
# To Avoid SB_HDR/SB_SEQ Key Conflicts
dbc.execute("INSERT INTO SURVEY_BDY \
(SB_HDR, SB_SEQ, SB_TYPE, SB_TITLE, SB_DESC, SB_LABEL, SB_MIN, \
SB_MAX, SB_BTN_1, SB_BTN_2, SB_BTN_3) \
VALUES (" + row['SB_HDR'] + ", " \
+ row['SB_SEQ'] + ", '" \
+ row['SB_TYPE'] + "', '" \
+ row['SB_TITLE'] + "', '" \
+ row['SB_DESC'] + "', '" \
+ row['SB_LABEL'] + "', " \
+ row['SB_MIN'] + ", " \
+ row['SB_MAX'] + ", '" \
+ row['SB_BTN_1'] + "', '" \
+ row['SB_BTN_2'] + "', '" \
+ row['SB_BTN_3'] + "');")
dbc.commit()
except (sqlite3.Error):
dbg = "Failed to ADD Survey Detail to DB SURVEY_BDY Table."
raise
#else:
# Have a record match so Change it.
# try:
# dbc.execute("UPDATE SURVEY_BDY SET " \
# + "SB_HDR=" + row['SB_HDR'] + "," \
# + "SB_SEQ=" + row['SB_SEQ'] + "," \
# + "SB_TYPE='" + row['SB_TYPE'] + "'," \
# + "SB_TITLE='" + row['SB_TITLE'] + "'," \
# + "SB_DESC='" + row['SB_DESC'] + "'," \
# + "SB_LABEL='" + row['SB_LABEL'] + "'," \
# + "SB_MIN=" + row['SB_MIN'] + "," \
# + "SB_MAX=" + row['SB_MAX'] + "," \
# + "SB_BTN_1='" + row['SB_BTN_1'] + "'," \
# + "SB_BTN_2='" + row['SB_BTN_2'] + "'," \
# + "SB_BTN_3='" + row['SB_BTN_3'] + "'" \
# + "WHERE (ROWID=" + row['SB_ROWID'] + ");")
# dbc.commit()
# except (sqlite3.Error):
# dbg = "Failed to CHANGE Survey Detail in DB SURVEY_BDY Table."
# raise
except sqlite3.Error, e:
# Handle Exceptions
if dbc:
dbc.rollback()
dbg += "\nDB Err: " + e.args[0]
finally:
if dbc:
dbc.close()
#
# Print HTTP Response text: "OK" if no errors, else error string.
# Variable "dbg" will be set to either condition.
#
print "Content-Type: text/plain"
print
print dbg
# Run The Program
main()
| [
"[email protected]"
] | |
b1a8ca498ec8f78dd664b8dbd97f237e390a7b1c | 499a6a8f078dc337f109f84142d1c16b8b613415 | /1. poglavlje/5.py | 6811ace14eee6a0be36f840b6d22e628a4a38906 | [] | no_license | renatasokol/Srce-D460 | ff92a80eaf63bdd69465e5a2d11888b58040b707 | 6ab9f0373885d968dcd2a008f02f46e79d8099d3 | refs/heads/master | 2020-08-18T11:39:31.657164 | 2019-09-02T16:04:41 | 2019-09-02T16:04:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 107 | py | broj = input("Unesite broj: ")
broj = int(broj)
print("Paran!") if (broj % 2 == 0) else print("Neparan!")
| [
"[email protected]"
] | |
b14e46e32da98ddb6e1b28c189f1bf4d9b85ba5c | 017f49596bba7f16c2e35a34c5b5c0546ca3eb8f | /AutoNav/autonav.py | f081bcd48e97ada88572207240beba7cce26229c | [] | no_license | MichaelCStrauss/ECE4078_Lab | f349b4f47abdd0bd2b5fa3b43feeef342f59b5de | 43bc39e34c5ffef9cb91da2ff35c459bf556269b | refs/heads/master | 2023-01-11T03:41:35.615187 | 2020-11-08T10:49:14 | 2020-11-08T10:49:14 | 286,386,837 | 0 | 0 | null | 2020-08-10T05:45:08 | 2020-08-10T05:45:07 | null | UTF-8 | Python | false | false | 25,835 | py | # Manually drive the robot inside the arena and perform SLAM using ARUCO markers
# TODO:
# - increase turning speed greatly
# - maybe increase forward speed (calibration)
# - add to readme that we don't use FPS and use real time instead
# - maybe investigate glitching??
# Import packages
import numpy as np
import matplotlib.pyplot as plt
import os, sys
import json
import math
# Import keyboard teleoperation components
import penguinPiC
import keyboardControlStarter as Keyboard
# Import SLAM components
sys.path.insert(0, "{}/slam".format(os.getcwd()))
import slam.Slam as Slam
import slam.Robot as Robot
import slam.aruco_detector as aruco
import cv2
import cv2.aruco as cv2_aruco
import slam.Measurements as Measurements
import time
from yolo import YoloV5
import warnings
warnings.filterwarnings("ignore")
# camera calibration parameters (from M2: SLAM)
camera_matrix = np.loadtxt(
"calibration/camera_calibration/intrinsic.txt", delimiter=","
)
dist_coeffs = np.loadtxt("calibration/camera_calibration/distCoeffs.txt", delimiter=",")
marker_length = 0.1
# wheel calibration parameters (from M2: SLAM)
wheels_scale = np.loadtxt("calibration/wheel_calibration/scale.txt", delimiter=",")
wheels_width = np.loadtxt("calibration/wheel_calibration/baseline.txt", delimiter=",")
# display window for visulisation
cv2.namedWindow("video", cv2.WINDOW_NORMAL)
cv2.setWindowProperty("video", cv2.WND_PROP_AUTOSIZE, cv2.WINDOW_AUTOSIZE)
# font display options
font = cv2.FONT_HERSHEY_SIMPLEX
location = (0, 0)
font_scale = 1
font_col = (255, 255, 255)
line_type = 2
ip = [(None, 3), (17, 5)]
skip_survey = []
# Manual SLAM
class Operate:
def __init__(self, datadir, ppi):
# Initialise
self.ppi = ppi
self.ppi.set_velocity(0, 0)
self.img = np.zeros([240, 320, 3], dtype=np.uint8)
self.aruco_img = np.zeros([240, 320, 3], dtype=np.uint8)
# Keyboard teleoperation components
self.keyboard = Keyboard.Keyboard(self.ppi)
# Get camera / wheel calibration info for SLAM
camera_matrix, dist_coeffs, scale, baseline = self.getCalibParams(datadir)
# SLAM components
self.pibot = Robot.Robot(baseline, scale, camera_matrix, dist_coeffs)
self.aruco_det = aruco.aruco_detector(self.pibot, marker_length=0.1)
self.slam = Slam.Slam(self.pibot)
self.markers_travelled_to = []
self.paths = []
self.current_marker = None
self.spinning = True
self.frames = 0
self.markers_seen_at_step = []
self.yolo = YoloV5("./weights.pt", "cuda") # TODO: Fix device
self.run_start = time.time()
self.keyboard_controlled = False
self.manual = False
# def __del__(self):
# self.ppi.set_velocity(0, 0)
def getCalibParams(self, datadir):
# Imports camera / wheel calibration parameters
fileK = "{}camera_calibration/intrinsic.txt".format(datadir)
camera_matrix = np.loadtxt(fileK, delimiter=",")
fileD = "{}camera_calibration/distCoeffs.txt".format(datadir)
dist_coeffs = np.loadtxt(fileD, delimiter=",")
fileS = "{}wheel_calibration/scale.txt".format(datadir)
scale = np.loadtxt(fileS, delimiter=",")
fileB = "{}wheel_calibration/baseline.txt".format(datadir)
baseline = np.loadtxt(fileB, delimiter=",")
return camera_matrix, dist_coeffs, scale, baseline
def get_camera(self):
# get current frame
curr = self.ppi.get_image()
# visualise ARUCO marker detection annotations
aruco_params = cv2_aruco.DetectorParameters_create()
aruco_params.minDistanceToBorder = 0
aruco_params.adaptiveThreshWinSizeMax = 1000
aruco_dict = cv2_aruco.Dictionary_get(cv2.aruco.DICT_4X4_100)
corners, ids, rejected = cv2_aruco.detectMarkers(
curr, aruco_dict, parameters=aruco_params
)
rvecs, tvecs, _ = cv2.aruco.estimatePoseSingleMarkers(
corners, marker_length, camera_matrix, dist_coeffs
)
return corners, ids, rejected, rvecs, tvecs
def pause(self, pause_time=0.5, speeds=None):
time_start = time.time()
self.time_prev = time.time()
real_time_factor = 0.5
if speeds is not None:
self.ppi.set_velocity(speeds[0], speeds[1])
while time.time() - time_start < pause_time:
time_now = time.time()
dt = time_now - self.time_prev
dt *= real_time_factor
self.time_prev = time_now
self.ppi.set_velocity(0, 0)
self.step(0, 0, dt)
def rotate(self, model_theta, pause_theta, spin_direction=1):
wheel_vel = 30
d_theta = abs(model_theta - pause_theta)
lv, rv = -wheel_vel, wheel_vel
k = 60
break_at = 24 * math.pi / 12
reduction = k if d_theta > break_at else 0
b_l = -(wheel_vel - reduction)
b_r = -b_l
b_l, b_r = int(b_l), int(b_r)
k2 = 10
k3 = 0
y_int = 7
y_int2 = wheel_vel
model_vel = (
y_int + k2 * d_theta if d_theta < math.pi / 2 else y_int2 - k3 * d_theta
)
m_l = -1 * min(wheel_vel / 2, model_vel)
m_r = -m_l
m_l, m_r = int(m_l), int(m_r)
b_l *= spin_direction
b_r *= spin_direction
m_l *= spin_direction
m_r *= spin_direction
return m_l, m_r, b_l, b_r
def spinOneRotation(self):
# spinning and looking for markers at each step
wheel_vel = 50
self.frames += 1
spin = True
spin_time = 5
fps = 30
measurements = []
seen_ids = set()
moved_past_first = False
real_time_factor = 0.5
self.time_prev = time.time()
initial_theta = self.slam.get_state_vector()[2]
pause_theta = initial_theta
while spin:
time_now = time.time()
dt = time_now - self.time_prev
dt *= real_time_factor
self.time_prev = time_now
model_theta = self.slam.get_state_vector()[2]
m_l, m_r, b_l, b_r = self.rotate(model_theta, pause_theta)
# print(f"{seen_ids=}")
corners, ids, rejected, rvecs, tvecs = self.get_camera()
if ids is not None:
ids_in_view = [ids[i, 0] for i in range(len(ids))]
for id in ids_in_view:
seen_ids.add(id)
self.step(m_l, m_r, dt, bot_input=(b_l, b_r))
image = self.ppi.get_image()
objects = self.yolo.get_relative_locations(image)
for class_id, local_x, local_y in objects:
world_x, world_y = self.slam.transform_local_world_space(local_x, local_y)
tag_id = self.slam.get_tag_of_object(class_id, world_x, world_y)
if tag_id is None:
continue
seen_ids.add(tag_id)
if model_theta - pause_theta > 2*math.pi:
self.pause(3)
pause_theta = model_theta
if model_theta - initial_theta > 2 * math.pi:
spin = False
kboard_info = self.keyboard.get_drive_signal()
if kboard_info[3] == True:
spin = False
self.pause()
return seen_ids
def get_marker_location(self, marker_id):
x_list, y_list = self.slam.markers.tolist()
idx = self.slam.taglist.index(marker_id)
return x_list[idx], y_list[idx]
def spin_to_marker(self, goal_marker_id):
real_time_factor = 0.5
self.time_prev = time.time()
model_theta = self.slam.get_state_vector()[2]
while model_theta > math.pi:
model_theta -= 2 * math.pi
while model_theta < -math.pi:
model_theta += 2 * math.pi
try:
marker_pos = self.get_marker_location(goal_marker_id)
robot_pos = self.slam.robot.state[0:2]
relative_angle = math.atan2(
marker_pos[1] - robot_pos[1], marker_pos[0] - robot_pos[0]
)
delta = relative_angle - model_theta
spin_direction = 1 if delta > 0 else -1
except Exception as e:
print(e)
spin_direction = 1
pause_theta = model_theta
while True:
time_now = time.time()
dt = time_now - self.time_prev
dt *= real_time_factor
self.time_prev = time_now
print(f"turning to target {goal_marker_id}")
model_theta = self.slam.get_state_vector()[2]
m_l, m_r, b_l, b_r = self.rotate(model_theta, pause_theta, spin_direction)
self.step(m_l, m_r, dt, bot_input=(b_l, b_r))
if abs(model_theta - pause_theta) > 2*math.pi:
self.pause(2)
pause_theta = model_theta
if goal_marker_id > 0:
# Get the ids in view
corners, ids, rejected, rvecs, tvecs = self.get_camera()
print(ids)
if ids is None:
continue
ids_in_view = [ids[i, 0] for i in range(len(ids))]
print(ids_in_view)
if goal_marker_id in ids_in_view:
break
else:
image = self.ppi.get_image()
objects = self.yolo.get_relative_locations(image)
print(objects)
found = False
for class_id, local_x, local_y in objects:
world_x, world_y = self.slam.transform_local_world_space(local_x, local_y)
tag_id = self.slam.get_tag_of_object(class_id, world_x, world_y)
if tag_id == goal_marker_id:
found = True
break
if found:
break
self.pause(2)
adjusting = True
adjusting_ticks = 0
while adjusting and adjusting_ticks < 30:
time_now = time.time()
dt = time_now - self.time_prev
dt *= real_time_factor
self.time_prev = time_now
wheel_vel = 30
print(f"adjusting to target {goal_marker_id}")
lv, rv = 0, 0
if goal_marker_id > 0:
corners, ids, rejected, rvecs, tvecs = self.get_camera()
if ids is not None:
for i in range(len(ids)):
idi = ids[i, 0]
# Some markers appear multiple times but should only be handled once.
if idi == goal_marker_id:
avg_x = corners[i][0, :, 0].mean()
diff_from_center = avg_x - 320
print(f"{diff_from_center}")
k = 0.4
lv, rv = (
diff_from_center * k,
-diff_from_center * k,
)
lv, rv = int(lv), int(rv)
lv = np.clip(lv, -wheel_vel, wheel_vel)
rv = np.clip(rv, -wheel_vel, wheel_vel)
if abs(diff_from_center) < 10:
adjusting = False
else:
image = self.ppi.get_image()
preds = self.yolo.forward(image)
target_class = 0 if -10 < goal_marker_id <= -1 else 1
if preds is not None:
for prediction in preds:
if prediction[5] != target_class:
continue
diff_from_center = float(prediction[2] + prediction[0]) / 2 - 320
print(f"{diff_from_center}")
k = 0.4
lv, rv = (
diff_from_center * k,
-diff_from_center * k,
)
lv, rv = int(lv), int(rv)
lv = np.clip(lv, -wheel_vel, wheel_vel)
rv = np.clip(rv, -wheel_vel, wheel_vel)
if abs(diff_from_center) < 10:
adjusting = False
self.step(lv / 4, rv / 4, dt, bot_input=(lv, rv))
adjusting_ticks += 1
for _ in range(10):
self.vision()
print(self.slam.taglist)
def drive_to_marker(self, goal_marker_id):
real_time_factor = 0.5
prev_dist = 1e6
self.time_prev = time.time()
target_location = self.get_marker_location(goal_marker_id)
driving = True
while driving:
time_now = time.time()
dt = time_now - self.time_prev
dt *= real_time_factor
self.time_prev = time_now
wheel_vel = 65
print(f"driving to target {goal_marker_id}")
lv, rv = wheel_vel, wheel_vel
b_lv, b_rv = lv, rv
corners, ids, rejected, rvecs, tvecs = self.get_camera()
position = self.slam.robot.state[0:2]
dist = (target_location[0] - position[0]) ** 2 + (
target_location[1] - position[1]
) ** 2
dist = dist ** 0.5
print(f"{dist} {ids}")
threshold = 1.5
if dist < threshold or dist > prev_dist:
driving = False
elif dist > 1:
b_lv = 75
prev_dist = dist
kboard_data = self.keyboard.get_drive_signal()
stop_signal = kboard_data[3]
if stop_signal:
break
# elif dist < 1.2:
# if ids is None:
# driving = False
# else:
# ids_in_view = [ids[i, 0] for i in range(len(ids))]
# if goal_marker_id not in ids_in_view:
# driving = False
self.step(lv, rv, dt, bot_input=(b_lv, b_rv))
self.current_marker = goal_marker_id
def get_next_untravelled_marker(self, ids_in_view, mode="closes", filter=False):
x_list, y_list = self.slam.markers.tolist()
position = self.slam.robot.state[0:2]
min_dist = 1e9
min_marker = None
max_dist = -1e9
max_marker = None
top_y = -1e9
top_y_marker = None
for idx, (marker_x, marker_y) in enumerate(zip(x_list, y_list)):
marker = self.slam.taglist[idx]
if marker in self.markers_travelled_to or marker not in ids_in_view:
continue
if filter:
f = False
for marker_set in self.markers_seen_at_step:
if marker in marker_set:
f = True
if f:
continue
if (self.current_marker, marker) in ip:
continue
dist = (marker_x - position[0]) ** 2 + (marker_y - position[1]) ** 2
dist = dist ** 0.5
dist = float(dist)
if dist < min_dist:
min_dist = dist
min_marker = marker
if dist > max_dist:
max_dist = dist
max_marker = marker
if marker_y > top_y:
top_y = marker_y
top_y_marker = marker
print(f"{min_marker}, {min_dist}")
print(f"{max_marker}, {max_dist}")
if mode == "closest":
return min_marker, min_dist
elif mode == "furthest":
return max_marker, max_dist
elif mode == "top_y":
return top_y_marker, top_y
def get_next_marker_up(self, ids_in_view):
x_list, y_list = self.slam.markers.tolist()
position = self.slam.robot.state[0:2]
top_y = -1e9
top_y_marker = None
for idx, (marker_x, marker_y) in enumerate(zip(x_list, y_list)):
marker = self.slam.taglist[idx]
if marker in self.markers_travelled_to or marker not in ids_in_view:
continue
if (self.current_marker, marker) in ip:
continue
if marker_y < position[1]:
continue
dist = (marker_x - position[0]) ** 2 + (marker_y - position[1]) ** 2
dist = dist ** 0.5
dist = float(dist)
if marker_y > top_y:
top_y = marker_y
top_y_marker = marker
return top_y_marker, top_y
def spin_radians(self, radians):
real_time_factor = 0.5
self.time_prev = time.time()
model_theta = self.slam.get_state_vector()[2]
spin_direction = 1 if radians > 0 else -1
start_theta = model_theta
pause_theta = model_theta
while True:
time_now = time.time()
dt = time_now - self.time_prev
dt *= real_time_factor
self.time_prev = time_now
model_theta = self.slam.get_state_vector()[2]
m_l, m_r, b_l, b_r = self.rotate(model_theta, pause_theta, spin_direction)
self.step(m_l, m_r, dt, bot_input=(b_l, b_r))
if abs(model_theta - pause_theta) > math.pi:
self.pause(2)
pause_theta = model_theta
if abs(model_theta - start_theta) > abs(radians):
self.pause(2)
break
def control(self, lv, rv, dt, bot_input=None):
# Import teleoperation control signals
drive_meas = Measurements.DriveMeasurement(lv, rv, dt=dt)
self.slam.predict(drive_meas)
if bot_input is not None:
lv = bot_input[0]
rv = bot_input[1]
self.ppi.set_velocity(lv, rv)
def vision(self):
# Import camera input and ARUCO marker info
self.img = self.ppi.get_image()
lms, aruco_image = self.aruco_det.detect_marker_positions(self.img)
objects = self.yolo.get_relative_locations(self.img)
self.slam.add_landmarks(lms, objects)
# print(f'{self.slam.taglist=}, {self.slam.markers=}')
self.slam.update(lms, objects)
def display(self, fig, ax):
# Visualize SLAM
ax[0].cla()
self.slam.draw_slam_state(ax[0])
ax[1].cla()
ax[1].imshow(self.img[:, :, -1::-1])
plt.pause(0.01)
def adjust(self):
directions, move = self.keyboard.get_key_status()
if not move:
return
current_pos = self.slam.robot.state
dt = time.time() - self.time_prev
speed = 0.5 * dt
current_pos[0] += np.clip((directions[3] - directions[2]) * speed, -0.3, 0.3)
current_pos[1] += np.clip((directions[0] - directions[1]) * speed, -0.3, 0.3)
self.slam.robot.state = current_pos
def step(self, lv, rv, dt, bot_input=None):
print(self.slam.robot.state)
print(self.slam.taglist)
if not self.manual:
keyboard_l, keyboard_r, adjustment, _ = self.keyboard.get_drive_signal()
if adjustment:
lv += int(keyboard_l / 1.5)
rv += int(keyboard_r / 1.5)
else:
if keyboard_l != 0 or keyboard_r != 0:
lv, rv, b_lv, b_rv = self.convert_keyboard_to_slam_bot(keyboard_l, keyboard_r)
bot_input = b_lv, b_rv
self.adjust()
self.control(lv, rv, dt, bot_input)
self.vision()
# Save SLAM map
self.write_map(self.slam)
# Output visualisation
self.display(self.fig, self.ax)
def write_map(self, slam):
map_f = "map.txt"
marker_list = sorted(self.slam.taglist)
with open(map_f, "w") as f:
f.write("id, x, y\n")
x_list, y_list = self.slam.markers.tolist()
position = self.slam.robot.state[0:2]
min_dist = 1e9
min_marker = None
lines = []
num_sheep = 0
num_coke = 0
for idx, (marker_x, marker_y) in enumerate(zip(x_list, y_list)):
marker = self.slam.taglist[idx]
if marker > 0:
marker = f"Marker{marker}"
elif -10 < marker <= -1:
num_sheep += 1
marker = f"sheep{num_sheep}"
elif marker <= -10:
num_coke += 1
marker = f"Coke{num_coke}"
lines.append(f"{marker}, {round(marker_x, 4)}, {round(marker_y, 4)}\n")
lines = sorted(lines)
f.writelines(lines)
return lines
# f.write("\ncurrent id, accessible id, distance\n")
# for path in self.paths:
# line = ", ".join(path)
# f.write(line + "\n")
# f.close()
def drive_distance(self, distance=1):
# spinning and looking for markers at each step
wheel_vel = 65
start = self.slam.get_state_vector()[0:2]
drive = True
real_time_factor = 0.5
self.time_prev = time.time()
while drive:
time_now = time.time()
dt = time_now - self.time_prev
dt *= real_time_factor
self.time_prev = time_now
current = self.slam.get_state_vector()[0:2]
dist = (current[0] - start[0]) ** 2 + (current[1] - start[1]) ** 2
dist = dist ** 0.5
lv, rv = wheel_vel, wheel_vel
b_lv, b_rv = lv + 5, rv
if dist > distance:
drive = False
self.step(lv, rv, dt, bot_input=(b_lv, b_rv))
def check_early_exit(self):
if len(self.paths) > 12 and len(self.slam.taglist) == 8:
return True
else:
return False
def run_one_iteration(self):
self.seen_ids = self.spinOneRotation()
manual = False
target = None
while True:
print("Current Map:")
print("".join(self.write_map(self.slam)))
seen_string = "\n"
for m_id in sorted(self.seen_ids):
seen_string += f'{m_id} '
if m_id <= -10:
seen_string += "coke "
elif m_id < 0:
seen_string += "sheep "
try:
pos = self.get_marker_location(m_id)
seen_string += f"({round(pos[0], 1)}, {round(pos[1], 1)})"
except ValueError:
seen_string += f"(unknown)"
seen_string += " || "
print("Seen IDs: " + seen_string)
print("Enter ID to drive to, or 'drive'")
command = input()
if command == "drive":
manual = True
break
try:
if int(command) in self.seen_ids:
target = int(command)
break
except:
continue
if manual:
self.manual_control()
else:
self.spin_to_marker(target)
self.drive_to_marker(target)
def convert_keyboard_to_slam_bot(self, lv, rv):
b_lv, b_rv = lv, rv
if lv == 0 or rv == 0:
pass
elif lv / rv > 0:
lv = int(lv / 2.1)
rv = int(rv / 2.1)
b_lv += 15
elif lv / rv < 0:
lv = int(lv / 5)
rv = int(rv / 5)
return lv, rv, b_lv, b_rv
def manual_control(self):
self.manual = True
self.time_prev = time.time()
while True:
time_now = time.time()
dt = time_now - self.time_prev
lv, rv, adjust, stop = self.keyboard.get_drive_signal()
if stop:
break
lv, rv, b_lv, b_rv = self.convert_keyboard_to_slam_bot(lv, rv)
if adjust:
b_lv, b_rv = 0, 0
self.step(lv, rv, dt, bot_input=(b_lv, b_rv))
self.time_prev = time_now
self.manual = False
def process(self):
# Show SLAM and camera feed side by side
self.yolo.setup()
self.fig, self.ax = plt.subplots(1, 2)
img_artist = self.ax[1].imshow(self.img)
self.times_no_marker = 0
# Run our code
while True:
# self.manual_control()
self.run_one_iteration()
if __name__ == "__main__":
# Location of the calibration files
currentDir = os.getcwd()
datadir = "{}/calibration/".format(currentDir)
# connect to the robot
ppi = penguinPiC.PenguinPi()
kb = False
if len(sys.argv) > 1 and sys.argv[1] == 'keyboard':
print("Using keyboard!")
kb = True
# Perform Manual SLAM
operate = Operate(datadir, ppi)
operate.keyboard_controlled = kb
try:
operate.process()
except KeyboardInterrupt:
operate.ppi.set_velocity(0, 0)
| [
"[email protected]"
] | |
0b3eeb02095fbf2030db653bc03576071c4a956a | 9672fa478478085b69c7ef8f02eaa7fa0bc7767b | /symphony/cli/pyinventory/graphql/fragment/service_endpoint.py | f22a4f54006c151f24a0aaab059869fd9813ff4f | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | julianchr/magma | 437a1d86490ff5f1d279cf2cd3243bbd3f22f715 | f0b2ed7e08314208133cf722921d6e6ab7853825 | refs/heads/master | 2022-09-21T21:45:14.678593 | 2020-05-28T22:47:52 | 2020-05-28T22:49:52 | 267,723,888 | 0 | 0 | NOASSERTION | 2020-05-29T00:07:02 | 2020-05-29T00:07:01 | null | UTF-8 | Python | false | false | 1,352 | py | #!/usr/bin/env python3
# @generated AUTOGENERATED file. Do not Change!
from dataclasses import dataclass
from datetime import datetime
from gql.gql.datetime_utils import DATETIME_FIELD
from gql.gql.graphql_client import GraphqlClient
from gql.gql.client import OperationException
from gql.gql.reporter import FailedOperationException
from functools import partial
from numbers import Number
from typing import Any, Callable, List, Mapping, Optional
from time import perf_counter
from dataclasses_json import DataClassJsonMixin
from ..fragment.equipment_port import EquipmentPortFragment, QUERY as EquipmentPortFragmentQuery
from ..fragment.service_endpoint_definition import ServiceEndpointDefinitionFragment, QUERY as ServiceEndpointDefinitionFragmentQuery
QUERY: List[str] = EquipmentPortFragmentQuery + ServiceEndpointDefinitionFragmentQuery + ["""
fragment ServiceEndpointFragment on ServiceEndpoint {
id
port {
...EquipmentPortFragment
}
definition {
...ServiceEndpointDefinitionFragment
}
}
"""]
@dataclass
class ServiceEndpointFragment(DataClassJsonMixin):
@dataclass
class EquipmentPort(EquipmentPortFragment):
pass
@dataclass
class ServiceEndpointDefinition(ServiceEndpointDefinitionFragment):
pass
id: str
definition: ServiceEndpointDefinition
port: Optional[EquipmentPort]
| [
"[email protected]"
] | |
a3919176104aa7e4ed4715c1d81b7406891c3a72 | 21e1c5f2065b8be5589e9b5c98881a23dc0964fc | /src/account/forms.py | 045a71275e8f6960fd9ce95535d9b84bdccea21c | [] | no_license | ognanshissi/vgc-stock | 1f6e898e0dcbbfe77df5589f055bb98bb7503d60 | a97ede4718688c2adc4328eeacd6d1282b473be2 | refs/heads/master | 2020-03-11T19:00:17.344933 | 2018-04-19T09:49:33 | 2018-04-19T09:49:33 | 130,194,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,378 | py | from django import forms
from django.contrib.auth import get_user_model, authenticate, password_validation
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from django.utils.translation import ugettext as _
from django.utils.text import capfirst
User = get_user_model()
class AccountLoginForm(forms.Form):
error_messages = {
'invalid_login': _(
"Please enter a correct %(username)s and password. Note that both "
"fields may be case-sensitive."
),
'inactive': _("This account is inactive."),
'access_denied': _("Votre compte n'est pas autorisé")
}
username = forms.CharField(
required=True,
label=_('Nom d\'utilisateur'), widget=forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'Entrez votre nom d\'utilisateur',
'autofocus': True
}))
password = forms.CharField(label=_('Mot de passe'), required=True, widget=forms.PasswordInput(attrs={
'class': 'form-control',
'placeholder': 'Votre mot de passe'
}))
def __init__(self, request=None, *args, **kwargs):
"""
The 'request' parameter is set for custom auth use by subclasses.
The form data comes in via the standard 'data' kwarg.
"""
self.request = request
self.user_cache = None
super(AccountLoginForm, self).__init__(*args, **kwargs)
# Set the label for the "username" field.
self.username_field = User._meta.get_field(User.USERNAME_FIELD)
# if self.fields['username'].label is None:
# self.fields['username'].label = capfirst(self.username_field.verbose_name)
def clean(self, *args, **kwargs):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username is not None and password:
self.user_cache = authenticate(username=username, password=password)
if self.user_cache is None:
raise forms.ValidationError(
self.error_messages['invalid_login'],
code='invalid_login',
params={'username': self.username_field.verbose_name},
)
else:
self.confirm_login_allowed(self.user_cache)
return self.cleaned_data
def confirm_login_allowed(self, user):
"""
Controls whether the given User may log in. This is a policy setting,
independent of end-user authentication. This default behavior is to
allow login by active users, and reject login by inactive users.
If the given user cannot log in, this method should raise a
``forms.ValidationError``.
If the given user may log in, this method should return None.
"""
if not user.is_active:
raise forms.ValidationError(
self.error_messages['inactive'],
code='inactive',
)
else:
if not user.is_staff:
raise forms.ValidationError(
self.error_messages['access_denied'],
code='access_denied',
)
def get_user_id(self):
if self.user_cache:
return self.user_cache.id
return None
def get_user(self):
return self.user_cache
| [
"[email protected]"
] | |
a8457deb62460f59eaaa8c13dd0a6738551e24f2 | cdb2160523b3742ea058ab5c22ebf6e44cb7f87d | /Python/boj/boj11866.py | 65d12b9f021991c749ccadce75963e33fb24b098 | [] | no_license | ryul99/algorithm-study | 68193471264a8bb249e047c922c2f66144eed6a7 | d64c388ca7d0c3b01e8e8a480f5fc435772f154b | refs/heads/master | 2023-03-06T14:44:24.498487 | 2023-02-23T21:16:35 | 2023-02-23T21:16:35 | 149,987,036 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | N, K = map(int, input().split(' '))
idx = 0
people = list(range(1, N+1))
print('<', end='')
while len(people) > 0:
idx += 1
tmp = people[0]
if idx == K:
people = people[1:]
idx = 0
print(tmp, end='')
if len(people) > 0:
print(', ', end='')
continue
people = people[1:]
people.append(tmp)
print('>', end='') | [
"[email protected]"
] | |
c0b7851ecfde2f392ff517edc359887e19ed383e | 58e53ecfc5c58259e27d91cc1307c0477d47f1f1 | /build/global_planner/catkin_generated/pkg.develspace.context.pc.py | 7256f45fd0d85eb13ec52021c39744000bca24f4 | [] | no_license | Abdu-lybot/AdvanDiptera_VIO | 3f0deb7b749b558f08a7d7c8b9f99e3a012d9fbd | 8aee5b9de6af4288b2c4489bc00dc1d4894c33cc | refs/heads/master | 2023-07-17T00:31:53.680172 | 2021-09-06T18:14:05 | 2021-09-06T18:14:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 798 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/lybot/AdvanDiptera_VIO/devel/.private/global_planner/include;/home/lybot/AdvanDiptera_VIO/src/navigation/global_planner/include".split(';') if "/home/lybot/AdvanDiptera_VIO/devel/.private/global_planner/include;/home/lybot/AdvanDiptera_VIO/src/navigation/global_planner/include" != "" else []
PROJECT_CATKIN_DEPENDS = "costmap_2d;dynamic_reconfigure;geometry_msgs;nav_core;navfn;nav_msgs;pluginlib;roscpp;tf2_ros".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lglobal_planner".split(';') if "-lglobal_planner" != "" else []
PROJECT_NAME = "global_planner"
PROJECT_SPACE_DIR = "/home/lybot/AdvanDiptera_VIO/devel/.private/global_planner"
PROJECT_VERSION = "1.17.1"
| [
"[email protected]"
] | |
4ab7a3b3bff2c4d4dabc96b292f269a32f107769 | 25bd9825dddab4b1d8b03dfa665d5705f8574e23 | /config/settings/base.py | 044092a877cfabc01088822eebe2ece7a4666412 | [
"BSD-3-Clause"
] | permissive | rasmus-storjohann-PG/pathways-backend | 7466e99c8fcc5effbb008b909d16e5a650d4124e | 524ba43cb85adbc000e4317c9b3e238001ed591b | refs/heads/master | 2021-05-16T12:47:36.045957 | 2017-12-21T17:41:19 | 2017-12-21T17:41:19 | 105,317,742 | 0 | 0 | null | 2017-09-29T21:14:37 | 2017-09-29T21:14:37 | null | UTF-8 | Python | false | false | 6,132 | py | import environ
# Three levels up from pathways-backend/config/settings/base.py gives pathways-backend/
ROOT_DIR = environ.Path(__file__) - 3
APPS_DIR = ROOT_DIR.path('main')
env = environ.Env()
READ_DOT_ENV_FILE = env.bool('DJANGO_READ_DOT_ENV_FILE', default=False)
if READ_DOT_ENV_FILE:
# Operating System Environment variables have precedence over variables defined in
# the .env file, that is to say variables from the .env files will only be used if
# not defined as environment variables.
env_file = str(ROOT_DIR.path('.env'))
print('Loading : {}'.format(env_file))
env.read_env(env_file)
print('The .env file has been loaded. See base.py for more information')
DJANGO_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
]
THIRD_PARTY_APPS = [
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
'corsheaders',
'rest_framework',
'behave_django',
'parler',
]
LOCAL_APPS = [
'polls.apps.PollsConfig',
'locations.apps.LocationsConfig',
'organizations.apps.OrganizationsConfig',
'search.apps.SearchConfig',
'users.apps.UsersConfig',
]
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
MIGRATION_MODULES = {
'sites': 'main.contrib.sites.migrations'
}
DEBUG = env.bool('DJANGO_DEBUG', False)
CORS_ORIGIN_ALLOW_ALL = True
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
ADMINS = [
("""PeaceGeeks""", '[email protected]'),
]
MANAGERS = ADMINS
TIME_ZONE = 'UTC'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
USE_TZ = True
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
'debug': DEBUG,
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
STATIC_URL = '/static/'
STATICFILES_DIRS = [
str(APPS_DIR.path('static')),
]
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
MEDIA_ROOT = str(APPS_DIR('media'))
MEDIA_URL = '/media/'
ROOT_URLCONF = 'config.urls'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose-format': {
'format': '%(levelname)s %(module)s %(message)s'
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
'verbose-console': {
'class': 'logging.StreamHandler',
'formatter': 'verbose-format'
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': 'INFO',
},
'bc211': {
'handlers': ['verbose-console'],
'level': 'DEBUG',
},
'polls': {
'handlers': ['verbose-console'],
'level': 'DEBUG',
},
},
}
PARLER_DEFAULT_LANGUAGE_CODE = 'en'
PARLER_LANGUAGES = {
1: (
{'code': 'en',},
{'code': 'fr',},
{'code': 'nl',},
),
'default': {
'fallbacks': ['en'],
'hide_untranslated': False,
}
}
WSGI_APPLICATION = 'config.wsgi.application'
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
]
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
]
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_ALLOW_REGISTRATION = env.bool('DJANGO_ACCOUNT_ALLOW_REGISTRATION', True)
ACCOUNT_ADAPTER = 'users.adapters.AccountAdapter'
SOCIALACCOUNT_ADAPTER = 'users.adapters.SocialAccountAdapter'
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
ADMIN_URL = r'^admin/'
| [
"[email protected]"
] | |
1d26fc16271d8e9c4e023fb8d0548a2c7769d925 | 3adfa973d3c61b4f5c8ffb2e9cf7e2e7b95fe171 | /1/4/9/1.4.9.py | 03fe3ae71c3bfbddf9d2a4803cce1ccb1f18292a | [] | no_license | bletzacker/alyra | 12b173c218a4f4d399055e2aa39e841d932169fc | 30990fa823720d3555d44a0719cd46f944cc4efe | refs/heads/master | 2020-04-19T12:30:04.869363 | 2019-03-16T12:31:37 | 2019-03-16T12:31:37 | 168,193,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 608 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Utilisation de l'API de https://blockchain.info/
# pip install blockchain
from blockchain import blockexplorer
import time
now = time.strftime('%Y-%m-%d %H:%M:%S UTC', time.gmtime())
height_latest_block = blockexplorer.get_latest_block().height
print("Nous pouvons calculer précisément la taille de l’ensemble des en-têtes.")
print("Nous multiplions la taille d'une en-tête (80 octets) par le nombre de blocs à ce jour : ",height_latest_block)
print("Taille de l’ensemble des en-têtes le ", now, ": ", 80 * (height_latest_block + 1) / 1e6, "Mo")
| [
"[email protected]"
] | |
fe09defdd2aac833ad3b706a687edfb9fd1caf69 | 6603c222871b2dd4df4cad5522cd1ffb764d7276 | /scripts/test_params_train-1-svc_testing_kernels.py | 24d8c400d6201e17dc6e7c5d23376f1f986e044a | [] | no_license | Sandman1705/Relevant-Images-in-HTML | 5264c9fa078c37e2ef69b3898460f251453e9c4c | 75c441f6d4262e224af38e870aaf8df7a13284b9 | refs/heads/master | 2020-03-16T02:09:31.330421 | 2018-10-04T16:21:34 | 2018-10-04T16:21:34 | 132,458,190 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 548 | py | import sys
sys.path.insert(0, '../src')
import os
dir_name = os.path.dirname(sys.argv[0])
if(len(dir_name)!=0):
os.chdir(os.path.dirname(sys.argv[0]))
from svc_models import kernel_test
import pandas as pd
def main():
data = pd.read_csv("../data/imgdata_train.csv", index_col='page')
results, best_params, (f1_score, acc_score) = kernel_test(data)
#print("Best parameters:", best_params)
#print("F1:", f1_score, "Accuracy:", acc_score)
results.to_csv("results_train_1-svc_testing_kernels.csv")
#if __name__ == "__main__":
# main()
main() | [
"[email protected]"
] | |
e13ddc0768f370a0fc0c229b13b9ff5493444676 | 9ba7138ce81f6ceb2581cb8868ebb102e4c1b31a | /other_dependency/test/test_module.py | dc18758bf81df9812dc53a45058d4560d03173fa | [] | no_license | syev/dependency | a787e0de4901872e2ae34fbb3256e6218e63b49e | 38e92d198fe0cf60d7ae57b5ef631e00a083ae32 | refs/heads/master | 2020-04-02T03:19:22.769554 | 2016-07-15T15:47:23 | 2016-07-15T15:47:23 | 62,855,140 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | """Unit tests"""
import unittest
import other_dependency.depmodule
class TestModule(unittest.TestCase):
def test_something(self):
self.assertEqual(other_dependency.depmodule.function(), 1)
| [
"[email protected]"
] | |
39de5d7854f8fa71836130cc2bb246841e7677be | 7ba87be3b62ebd3647762984d7190d4c0abc4aa1 | /primeall.py | 0e1daa4c7aeedd3b04f650688f14317ba82fc2dc | [] | no_license | pradeepdevloper1/Learn_Python_CN | 7ca0d70e2cb5050bff2e090ec08cf97e4c38da2e | cf2978f52c68511cb93c03922aecae570ae9b009 | refs/heads/main | 2023-02-03T23:39:17.738748 | 2020-12-22T18:29:58 | 2020-12-22T18:29:58 | 323,703,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | n=int(input())
k=2
while k<=n:
Flag =False;
d=2;
while d<k:
if k%d==0:
Flag=True
d+=1;
if not(Flag):
print(k)
k+=1; | [
"[email protected]"
] | |
f5d319d69486e544284d5a391d18304dd20f00fe | d29fd8ac20bf53f366821892bf5a80005a9cef36 | /tests/apps/pages_directory.py | 2d096f87c51ae88ef0ebd1ac72dc6772f44a26cb | [
"MIT"
] | permissive | T4rk1n/dazzler | d325ff664c6e310374f08cea84bd53aa1ca2ca43 | 69c49422dc19c910445ab265b1d3481041de8f43 | refs/heads/master | 2023-02-11T02:39:08.423597 | 2021-12-06T03:16:49 | 2021-12-06T03:34:25 | 191,060,792 | 19 | 7 | MIT | 2023-01-23T11:02:57 | 2019-06-09T22:16:59 | Python | UTF-8 | Python | false | false | 146 | py | from dazzler import Dazzler
app = Dazzler(__name__)
app.config.pages_directory = 'page_dir'
if __name__ == '__main__':
app.start('--debug')
| [
"[email protected]"
] | |
837a50e4b9f8e97ec8f304adc9d057df7917293e | dad67bd2b3bcd2e5d7db630d5ded744e57b9b57e | /algorithms/compression.py | 0225e69ec47035893bde0ef98eb2fbd65804f4a8 | [
"MIT"
] | permissive | jcarreiro/jmc-python | 1affb26757b8df530a345c8995e6c758e7c3e2af | 979c67ca489d4e80e6be96d23f8bcecacabbee59 | refs/heads/master | 2021-06-06T17:58:39.311612 | 2019-05-17T14:13:11 | 2019-05-17T14:13:11 | 14,427,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 835 | py | ##############################################################################
# Basic compression algorithms
##############################################################################
# Prefix Codes
#
# 1. Every message is a leaf in a binary tree.
#
# 2. The code is encoded in the path from the root to the leaf (left = 0,
# right = 1)
#
# As a result, no code is a prefix of another code.
#
# The average length of a code, assuming a probability distribution on the
# symbols, is l = \sum_{i} p_{i} l_{i}, where p_i is the probability of the
# i-th symbol, and l_i is the length of its code (the depth of the leaf).
#
# Source: Algorithms in the Real World lecture notes.
def prefix_code(s):
# build a tree with len(s) leaf nodes
# now just assign symbols from s to the leaf nodes until we run out
pass
| [
"[email protected]"
] | |
14f38006d15d26343d7b1e21b00a34643f5619b1 | db12d20e1e9329503af280104929de63a622b6bd | /photos/admin.py | e20da248833c7b00555a7ef4613382e20cf9c81b | [
"MIT"
] | permissive | dorothymuhonja/Gallery | 61b5aee21859f563f24736d38bc8ca20df737a75 | 422b760fe64b6ff91398fe92edfc240817a0bce4 | refs/heads/master | 2023-03-29T11:11:15.572123 | 2021-03-24T11:37:25 | 2021-03-24T11:37:25 | 349,353,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | from django.contrib import admin
from .models import Category, Location, Image
admin.site.register(Category)
admin.site.register(Location)
admin.site.register(Image)
| [
"[email protected]"
] | |
18a0227d98d52769dace79cf55d75fa631b7d570 | 9e7c04bd4543261e4427b9f3cfa383c22ec59598 | /Vistas/migrations/0008_auto_20201028_1947.py | 95a4b134318d4ec39a7f25bb456855b0feb2d0c0 | [] | no_license | GustavoJimenezV/PythonERP | 0325a3292c65380be3ed997063a1eb64598201b8 | b31e55ad5f5867debe65a0cb3dd7753289bda5d5 | refs/heads/master | 2023-01-10T14:26:05.040550 | 2020-11-05T06:19:02 | 2020-11-05T06:19:02 | 301,633,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,505 | py | # Generated by Django 3.1.1 on 2020-10-29 01:47
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Vistas', '0007_auto_20201028_0207'),
]
operations = [
migrations.AlterModelOptions(
name='mobiliario',
options={'ordering': ['Nombre'], 'verbose_name': 'Mobiliario', 'verbose_name_plural': 'Mobiliarios'},
),
migrations.AlterModelOptions(
name='pago',
options={'ordering': ['IdEmpleado'], 'verbose_name': 'Pago', 'verbose_name_plural': 'Pagos'},
),
migrations.AlterModelOptions(
name='pedido',
options={'ordering': ['Fecha'], 'verbose_name': 'Pedido', 'verbose_name_plural': 'Pedidos'},
),
migrations.AlterModelOptions(
name='producto',
options={'ordering': ['Nombre'], 'verbose_name': 'Producto', 'verbose_name_plural': 'Productos'},
),
migrations.AlterModelOptions(
name='proveedor',
options={'ordering': ['Nombre'], 'verbose_name': 'Proveedor', 'verbose_name_plural': 'Proveedores'},
),
migrations.AlterModelOptions(
name='remplazo',
options={'ordering': ['Descripcion'], 'verbose_name': 'Remplazo', 'verbose_name_plural': 'Remplazos'},
),
migrations.RenameField(
model_name='materiaprima',
old_name='Existencias',
new_name='Existencia',
),
]
| [
"[email protected]"
] | |
f2c72470a67ccb4f7257c63cbb56e7e57a4e2ecd | 655b5d91011e5ac6361441b32c8b6c893815cf28 | /Trees/getting_started/Level_Order_Binary_Tree_Traversal.py | 4cf1707af6b06daf7cb31082bd7f6a7fde921ca8 | [
"MIT"
] | permissive | abhaydhiman/Pyalgo | 1bdc02d6326022ac832e20c581700d7875638380 | 69efdd937041548234bf67a2bd0b962b6e60a556 | refs/heads/main | 2023-05-10T17:56:06.005156 | 2021-05-19T16:38:22 | 2021-05-19T16:38:22 | 318,769,620 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 888 | py | class Node:
def __init__(self, key) -> None:
self.val = key
self.left = None
self.right = None
def height(node):
if node is None:
return 0
else:
lheight = height(node.left)
rheight = height(node.right)
if lheight > rheight:
return lheight + 1
else:
return rheight + 1
def printGivenLevel(node, level):
if node is None:
return
if level == 1:
print(node.val, end=' ')
elif level > 1:
printGivenLevel(node.left, level - 1)
printGivenLevel(node.right, level - 1)
def printLevelOrder(node):
h = height(node)
for i in range(1, h + 1):
printGivenLevel(node, i)
root = Node(1)
root.left = Node(2)
root.right = Node(3)
root.left.left = Node(4)
root.left.right = Node(5)
# print(height(root))
printLevelOrder(root)
| [
"[email protected]"
] | |
947d1a396be059afdf7d383c3ba8875ac7652ea0 | e6342576193fd70937ab1cead8d9504f5a1a0b9b | /basic_api/admin.py | 55af462e17a4184687dafcbebc6dba0b8735234e | [] | no_license | shubham1560/Django-rest-angular-frontend | 41acfad812d522c12b2f2c70931bbf882e1f5f85 | 306151ebf772b036204bb709096b0eaea0a8d552 | refs/heads/master | 2020-07-15T22:08:16.271011 | 2019-09-19T18:40:41 | 2019-09-19T18:40:41 | 205,658,813 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121 | py | from django.contrib import admin
from .models import Summary
# Register your models here.
admin.site.register(Summary)
| [
"[email protected]"
] | |
d32785f5251c57ca8aae7287da6d07c3ebf1e6d8 | 4dc686e8c2584585d7aaf9e27f82a77fb9177f40 | /meetups/migrations/0003_rename_participant_meetup_participants.py | 984c88b6e35497fcaccbd8b15eb156dfb2d79da4 | [] | no_license | AhmedElashmawi/DevOps-Task | d7f0cc6564de265d70259d0599e7f31415ef40d4 | 0f56104c21f71be2fa289f43ce031618e1a5fd41 | refs/heads/master | 2023-07-27T02:07:09.011033 | 2021-09-09T23:38:28 | 2021-09-09T23:38:28 | 404,090,439 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | # Generated by Django 3.2.6 on 2021-08-25 20:33
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('meetups', '0002_remove_emolyees_slug'),
]
operations = [
migrations.RenameField(
model_name='meetup',
old_name='participant',
new_name='participants',
),
]
| [
"[email protected]"
] | |
76abc4ed71db0aea0bfe5dd5c7b26debda40e245 | b0b6f5e9c5af597c1f0943d60aef49dd3b52dd3d | /videos/decorators.py | 6b1d82b5cc36fcb4737cb469cd5e5eb356b54ac6 | [] | no_license | dan214/Elimisha | 1c917f0b3c123bcf8e328083281c035478cf4467 | 2c179a0bb503d3ee00fb1179444abf140d3c28cf | refs/heads/master | 2021-09-26T05:15:25.566148 | 2021-09-16T20:40:05 | 2021-09-16T20:40:05 | 139,483,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,101 | py | from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.decorators import user_passes_test
def student_required(function=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url='login'):
'''
Decorator for views that checks that the logged in user is a student,
redirects to the log-in page if necessary.
'''
actual_decorator = user_passes_test(
lambda u: u.is_active and u.is_student,
login_url=login_url,
redirect_field_name=redirect_field_name
)
if function:
return actual_decorator(function)
return actual_decorator
def speaker_required(function=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url='login'):
'''
Decorator for views that checks that the logged in user is a teacher,
redirects to the log-in page if necessary.
'''
actual_decorator = user_passes_test(
lambda u: u.is_active and u.is_teacher,
login_url=login_url,
redirect_field_name=redirect_field_name
)
if function:
return actual_decorator(function)
return actual_decorator | [
"[email protected]"
] | |
fab2693211dbd0c2a860cc76f9c6560b088a2410 | 3486e643dc826b68fd3c7d895a8d3216492c5b66 | /Tests/test_context_menu.py | 9dc01ebb6e256d08e176ec392b6833319ddf2b6b | [] | no_license | maheshkafle/The-Internet-Example-Solution | 773d29eee5ef62c800780c7b54608766cdb701f2 | 298794dbb843aeee8709aeb87bd672da5ccce51a | refs/heads/main | 2023-06-16T12:09:40.729174 | 2021-07-17T21:08:36 | 2021-07-17T21:08:36 | 383,881,268 | 0 | 0 | null | 2021-07-17T21:08:36 | 2021-07-07T17:49:03 | Python | UTF-8 | Python | false | false | 587 | py | from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
"""
Test Context Click Menu or Right Click Menu
"""
URL = "https://the-internet.herokuapp.com/context_menu"
driver = webdriver.Chrome(ChromeDriverManager().install())
driver.get(URL)
action_chain = ActionChains(driver)
context_click_element = driver.find_element(By.ID, 'hot-spot')
action_chain.context_click(context_click_element).perform()
alert = driver.switch_to.alert
print(alert.text)
alert.accept()
| [
"[email protected]"
] | |
c77de50c1bc3274824ecd3f3cc23faa27d6840d7 | 4c3dd270440c48a0a8e87d1937844371476f7cef | /resource_wrangler/scripts/download_mods.py | cb42130c64e1983371fe8880c460d6c88f9945b7 | [] | no_license | Soartex-Modded/Resource-Wrangler | f84726bf5ffb246d8562149fb6cc0a613a4f4043 | 36c6f7059bb876e034c99d5e02fca1cf81888dac | refs/heads/master | 2023-01-25T00:34:22.900581 | 2020-11-29T23:00:35 | 2020-11-29T23:00:35 | 309,116,894 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,085 | py | import json
import math
import os
import requests
from sqlalchemy import Table, Column, Integer, String, MetaData
from sqlalchemy import create_engine
from sqlalchemy.sql import select
def download_mods(mods_dirs, database_path, mod_limit=100):
"""
Collect the top mods from CurseForge into mods_dirs
:param mods_dirs: {[minor_version]: [path to mods folder]}
:param database_path: path to .db file with download history (will be created if not exists)
:param mod_limit: maximum number of mods to collect
"""
mods_dirs = {k: os.path.expanduser(v) for k, v in mods_dirs.items()}
database_path = os.path.expanduser(database_path)
patch_info = {}
for minor_version in mods_dirs:
patch_info[minor_version] = {}
os.makedirs(mods_dirs[minor_version], exist_ok=True)
os.makedirs(os.path.dirname(database_path), exist_ok=True)
engine = create_engine('sqlite:///' + database_path)
metadata = MetaData()
mod_files = Table('mod_files', metadata,
Column('id', Integer, primary_key=True),
Column('file_name', String(250)),
Column('mod_id', Integer),
Column('vanilla_minor_version', Integer))
metadata.create_all(engine)
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36',
}
page_size = 50
mod_count = 0
# download sets of mod information at a time
for page_index in range(math.ceil(mod_limit / page_size)):
mods = requests.get(
"https://addons-ecs.forgesvc.net/api/v2/addon/search/",
params={
'gameId': 432,
'index': page_index * page_size,
'pageSize': page_size,
'sort': 'TotalDownloads',
'sortDescending': True
},
headers=headers).json()
for mod_meta in mods:
mod_count += 1
if mod_count > mod_limit:
return
if mod_meta['categorySection']['name'] != 'Mods':
continue
versioned_mod_files = {}
for mod_file_meta in mod_meta['gameVersionLatestFiles']:
tokens = mod_file_meta['gameVersion'].split('.')
minor_version = int(tokens[1])
patch_version = 0 if len(tokens) == 2 else int(tokens[2])
# find latest mod files
if minor_version in versioned_mod_files:
if versioned_mod_files[minor_version]['patch_version'] > patch_version:
continue
prior_file_id = versioned_mod_files.get(minor_version, {}).get('value', {}).get('projectFileId', 0)
if mod_file_meta['projectFileId'] > prior_file_id:
versioned_mod_files[minor_version] = {
'patch_version': patch_version,
'value': mod_file_meta
}
for minor_version in versioned_mod_files:
if str(minor_version) not in mods_dirs:
continue
mod_file_meta = versioned_mod_files[minor_version]['value']
patch_info[str(minor_version)][mod_file_meta["projectFileName"]] = {
"mod_id": mod_meta['slug'],
"mod_name": mod_meta['name'],
# typically contains the mod version inside somewhere
"mod_filename": mod_file_meta['projectFileName'],
"mc_version": mod_file_meta['gameVersion'],
"mod_authors": [auth['name'] for auth in mod_meta['authors']],
"url_website": mod_meta['websiteUrl'],
"description": mod_meta.get('summary')
}
available_file_name = mod_file_meta['projectFileName']
stored_file_name = engine.execute(select([mod_files.c.file_name]).where(
(mod_files.c.mod_id == mod_meta['id']) & (mod_files.c.vanilla_minor_version == minor_version))
).scalar()
if stored_file_name == available_file_name:
# file is already current
# print(f'Skipping {mod_meta["name"]} for 1.{minor_version}')
continue
mod_path = os.path.join(mods_dirs[str(minor_version)], mod_file_meta['projectFileName'])
if os.path.exists(mod_path):
engine.execute(mod_files.insert(),
file_name=available_file_name,
mod_id=mod_meta['id'],
vanilla_minor_version=minor_version)
continue
download_url = requests.get(
f"https://addons-ecs.forgesvc.net/api/v2/addon/{mod_meta['id']}/file/{mod_file_meta['projectFileId']}/download-url",
headers=headers).text
print(f'Downloading {mod_meta["name"]} for 1.{minor_version}')
with open(mod_path, 'wb') as mod_file:
mod_file.write(requests.get(download_url, headers=headers).content)
if stored_file_name is None:
engine.execute(mod_files.insert(),
file_name=available_file_name,
mod_id=mod_meta['id'],
vanilla_minor_version=minor_version)
else:
engine.execute(mod_files.update()
.where((mod_files.c.mod_id == mod_meta['id']) & (mod_files.c.vanilla_minor_version == minor_version))
.values(file_name=available_file_name))
for minor_version in patch_info:
with open(os.path.join(mods_dirs[str(minor_version)], "patch_info.json"), 'w') as patch_info_file:
json.dump(patch_info[minor_version], patch_info_file, indent=4)
| [
"[email protected]"
] | |
c72e3c3dcb8a88238fa6b42cb63e1df026e8c669 | d2d6bbb76fd92ad596b0476b37ac8dd5cf08df14 | /1.9 LISTAS.py | a97405df71a8abca906f6bf2d182f2441b9b24db | [] | no_license | edneyefs/curso_python | b917d8f2c405173af901287dab86264ff937aaa6 | 2c862ad62223b7c3bd0ea7d7410a9b69c38d814d | refs/heads/master | 2022-12-14T21:29:59.875637 | 2020-08-21T12:42:07 | 2020-08-21T12:42:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 571 | py | lista = []
print(type(lista))
print(dir(lista))
print(help(lista))
print(len(lista))#contador
lista.append(1)
lista.append(5)
print(len(lista))
nova_lista = [1, 4, 'Ana', 'Bia']
#print(nova_lista)
nova_lista.remove(4)
#print(nova_lista)
nova_lista.reverse()
print(nova_lista)
lista = [1, 5, 'Rebeca', 'Guilherme', 3.1415]
print(lista.index(1))
print(lista[2])
print(lista[-1])
lista = ['Ana', 'Lia', 'Rui', 'Paulo', 'Dani']
print(lista[1:3])
print(lista[1:-1])
print(lista[1:])
print(lista[::2])
print(lista[::-1])
del lista[2]
print(lista)
del lista[1:]
print(lista)
| [
"[email protected]"
] | |
e2543c1d836b4e07c2c56809cb5ccd62c0e46a2c | 6c7412abde7b4e65b751b6862f7ea66f0a24b9f0 | /pf_tests.py | d91a5b790db378d1db5539cfddc47cc9b53e97a2 | [] | no_license | SundropFuels/project-finance | c030112b015a6c2c82d74b8c78b4ba37d773383b | 5a49c7c9885f453828b5e43cd27564e3f4c95b23 | refs/heads/master | 2021-01-20T10:56:37.159616 | 2015-04-25T19:59:04 | 2015-04-25T19:59:04 | 10,133,253 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 117 | py | import unittest
from capex_tests import *
from loan_tests import *
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
311b92c0e6dd038422f4fe578243ed511d7603e1 | 3d5e26af890495bf1ed137af789dc61e4a61442f | /prestamo/migrations/0001_initial.py | 4c71cb423e43354399e55e3bea08d8f036310492 | [] | no_license | andchaves/recursos | 187ba4bfa5ced1531526c763eea2a72f9b70d3d6 | 6f98cc83a1b0e8524e516a28e65d94cb903608fd | refs/heads/master | 2021-08-20T09:22:07.303926 | 2017-11-28T19:16:43 | 2017-11-28T19:16:43 | 112,377,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,761 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-11-25 20:55
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Cliente',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=50)),
('apellido', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Formulario',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('conceptoPrestamo', models.CharField(max_length=1000)),
('fechaPrestamo', models.DateTimeField()),
('fechaDevolucion', models.DateTimeField()),
('nombreCliente', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='prestamo.Cliente')),
],
),
migrations.CreateModel(
name='Recurso',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre_recurso', models.CharField(max_length=50)),
('cantidad', models.IntegerField()),
],
),
migrations.AddField(
model_name='formulario',
name='nombreRecurso',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='prestamo.Recurso'),
),
]
| [
"[email protected]"
] | |
c009a684dee20066dc3b9eb52828169dc7772e13 | 908bd9a40dd34c788595643586e7231c5aa32d76 | /learning_logs/migrations/0010_auto_20190326_2207.py | 3a0dab48978800b18290417c835523c568088bf5 | [] | no_license | tpchencn/myweb | 625257541ec3c874ff9839c370e274f5887987ad | 8915857873d6a52b0803cbb19fe5fe7e123b366a | refs/heads/master | 2020-05-01T15:23:58.271045 | 2019-04-28T14:14:58 | 2019-04-28T14:14:58 | 177,545,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 905 | py | # Generated by Django 2.1.7 on 2019-03-26 14:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('learning_logs', '0009_remove_entry_text'),
]
operations = [
migrations.CreateModel(
name='Books',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.CharField(max_length=200)),
('date_added', models.DateTimeField(auto_now_add=True)),
],
),
migrations.AlterModelOptions(
name='readnote',
options={'verbose_name_plural': 'Books'},
),
migrations.AlterField(
model_name='readnote',
name='topic',
field=models.ForeignKey(on_delete=True, to='learning_logs.Books'),
),
]
| [
"[email protected]"
] | |
d469fbea7eb9b0326c4fae0374ee749cb83bf42b | 8adea6649ec3d5b62477db4b65367fca889cc650 | /hello.py | 2fb572ac053d7628b1ed6e2f1a32631a28a18f74 | [] | no_license | hshar94/devopssol | ad0d0c95d62281f30cab4b81afcafb5bcd61f400 | 2903101220a9c170c925ad5659d1156f3b8ab330 | refs/heads/master | 2023-05-22T21:44:31.283234 | 2021-06-13T10:29:03 | 2021-06-13T10:29:03 | 375,890,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 333 | py | from flask import Flask
from flask import render_template
from flask import request
app = Flask(__name__)
@app.route("/", methods=['GET', 'POST'])
def hello_world():
return "<h1>HHHello World from intellipaat!!@@ from Kubernetes from github!</h1>"
if __name__ == '__main__':
app.run(host='0.0.0.0', port=80, debug=True)
| [
"[email protected]"
] | |
548980782c09a047bbcc43b0e12a6dae822cdcc6 | ed1d841dbd836f5a02a8b2c22bcc92380f28d11b | /seed.py | 9b08aa64301e4ced1c79ad9d8a6e7a7e4658118c | [] | no_license | GraceDurham/ratings | b063389f368f0b3994f0771ca4cac46555a04a10 | 2e628c2a824ca5a10879a15282cd60e21695322b | refs/heads/master | 2020-05-23T07:59:29.310561 | 2017-02-03T02:00:36 | 2017-02-03T02:00:36 | 80,483,352 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,894 | py | """Utility file to seed ratings database from MovieLens data in seed_data/"""
from sqlalchemy import func
from model import User
from model import Rating
from model import Movie
from datetime import datetime
from model import connect_to_db, db
from server import app
def load_users():
"""Load users from u.user into database."""
print "Users"
# Delete all rows in table, so if we need to run this a second time,
# we won't be trying to add duplicate users
User.query.delete()
# Read u.user file and insert data
for row in open("seed_data/u.user"):
row = row.rstrip()
user_id, age, gender, occupation, zipcode = row.split("|")
user = User(user_id=user_id,
age=age,
zipcode=zipcode)
# We need to add to the session or it won't ever be stored
db.session.add(user)
# Once we're done, we should commit our work
db.session.commit()
def load_movies():
"""Load movies from u.item into database."""
print "Movies"
# Delete all rows in table, so if we need to run this a second time,
# we won't be trying to add duplicate users
Movie.query.delete()
# Read u.user file and insert data
for row in open("seed_data/u.item"):
# striped the whitespace
row = row.rstrip()
# print "each row!", row
# we took the row and split it on the pipe
row_split = row.split("|")
# print "it's splitted!!", row_split
# sliced the giant list into only 0-4 index
first_five = row_split[:5]
# print "this is our short list", first_five
# unpacked the first five items from the u.item list
movie_id, title, released_at, empty, imdb_url = first_five
# print first_five
#Boolean if released at is not an empty string evaluates true
#set string to datetime object
# else make datetime equal none if no value is present in release at
if released_at:
released_at = datetime.strptime(released_at, "%d-%b-%Y")
else:
released_at = None
title = title[:-7] # (year) ==7
movie = Movie(movie_id=movie_id,
title=title,
released_at=released_at,
imdb_url=imdb_url)
# We need to add to the session or it won't ever be stored
db.session.add(movie)
# Once we're done, we should commit our work
db.session.commit()
def load_ratings():
"""Load ratings from u.data into database."""
print "Ratings"
# Delete all rows in table, so if we need to run this a second time,
# we won't be trying to add duplicate users
Rating.query.delete()
# Read u.user file and insert data
for row in open("seed_data/u.data"):
row = row.strip().split()
user_id, movie_id, score, time_stamp = row
# print row
rating = Rating(
user_id=int(user_id),
movie_id=int(movie_id),
score=int(score))
# We need to add to the session or it won't ever be stored
db.session.add(rating)
# Once we're done, we should commit our work
db.session.commit()
def set_val_user_id():
"""Set value for the next user_id after seeding database"""
# Get the Max user_id in the database
result = db.session.query(func.max(User.user_id)).one()
max_id = int(result[0])
# Set the value for the next user_id to be max_id + 1
query = "SELECT setval('users_user_id_seq', :new_id)"
db.session.execute(query, {'new_id': max_id + 1})
db.session.commit()
if __name__ == "__main__":
connect_to_db(app)
# In case tables haven't been created, create them
db.create_all()
# Import different types of data
load_users()
load_movies()
load_ratings()
set_val_user_id()
| [
"[email protected]"
] | |
ce74869dfba1ac197d3fa552de5a0282de7b1254 | 53f8d420057b7f7d403b29e7f1f827df9d422b6b | /Solver.py | df2fdc813406b58432dad9f76aec07f90861e100 | [] | no_license | won2930015/PyCharm_test2 | 3fc403fef278d5a5e8168cba5cdc8c32f0788f2e | 11ec6f8ef103d6be63e7090f9003f58778616024 | refs/heads/master | 2020-04-09T22:29:36.792077 | 2018-12-19T03:33:36 | 2018-12-19T03:33:36 | 160,629,898 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 587 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import math
class Solver(object):
# <editor-fold desc="Description">
def demo(self, a, b, c):
# a = int(input('a:'))
# b = int(input('b:'))
# c = int(input('c:'))
d = b ** 2 - 4 * a * c
if d >= 0:
disc=math.sqrt(d)
root3 = (-b + disc) / (2 * a)
root2 = (-b - disc) / (2 * a)
print(root3, root2)
else:
raise Exception
# print('error')
print('ok')
Solver().demo(2, 1, 0) # 注释
# # Solver.calculate123()
| [
"[email protected]"
] | |
f613a28a68ddb2b2e13f560c935d205ae1a933bc | e124468b63af504f0a52d8c23a7b4d5441104c21 | /ecommerce/store/views.py | c1765c582c3640559d8662a9e7797f49fff52cae | [] | no_license | arpitgupta30/Ecommerce-Website | 568d1fe6c7a089686b5796e4b84a5bbf8f2d147b | 39777cf843dab375765e5a646e44afea2af349a7 | refs/heads/master | 2022-11-05T02:04:15.249516 | 2020-06-18T10:27:17 | 2020-06-18T10:27:17 | 269,936,954 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,836 | py | from django.shortcuts import render
from django.http import JsonResponse
from .models import *
import json
import datetime
from . utils import cookieCart, cartData, guestOrder
# Create your views here.
def store(request):
products = Product.objects.all()
data = cartData(request)
items = data['items']
order = data['order']
cart_total = data['cart_total']
context = {'products': products,
'cart_total': cart_total}
return render(request, 'store/store.html', context)
def cart(request):
data = cartData(request)
items = data['items']
order = data['order']
cart_total = data['cart_total']
context = {'items': items,
'order': order,
'cart_total':cart_total
}
return render(request, 'store/cart.html', context)
def checkout(request):
data = cartData(request)
items = data['items']
order = data['order']
cart_total = data['cart_total']
context = {'items': items,
'order': order,
'cart_total': cart_total,
}
return render(request, 'store/checkout.html', context)
def updateItem(request):
data = json.loads(request.body)
productId = data['productId']
action = data['action']
customer = request.user.customer
product = Product.objects.get(id = productId)
order, created = Order.objects.get_or_create(customer = customer, complete = False)
orderItem, created = OrderItem.objects.get_or_create(order = order, product = product)
if action == 'add':
orderItem.quantity = orderItem.quantity+1
elif action == 'remove':
orderItem.quantity = orderItem.quantity-1
orderItem.save()
if orderItem.quantity<=0:
orderItem.delete()
return JsonResponse("Data was added", safe=False)
def processOrder(request):
transaction_id = datetime.datetime.now().timestamp()
data = json.loads(request.body)
if request.user.is_authenticated:
customer = request.user.customer
order, created = Order.objects.get_or_create(customer = customer, complete = False)
else:
customer, order = guestOrder(request, data)
total = float(data['form']['total'])
order.transaction_id = transaction_id
if(total == order.get_total_price):
order.complete = True
order.save()
if order.shipping == True:
ShippingAddress.objects.create(
customer= customer,
order= order,
address= data['shipping']['address'],
city= data['shipping']['city'],
state= data['shipping']['state'],
zipcode = data['shipping']['zipcode']
)
return JsonResponse("Payment was completed", safe = False) | [
"[email protected]"
] | |
6ceb6080895456a03be17937d08324d6464f3e4f | f15f4e4ae7861e8a854c6bc10a47c4c0aaebe712 | /scripts/dist-git/pkgdb_gen_gitolite_conf.py | 255ad05146ffd01bf524d2d10c2af8fd8161deee | [] | no_license | asamalik/dist-git | 825c5e225e714ec89737ac4cacbb574fecfd95c5 | 71fea916ab829d19f6c11fcfe49a8cd31f813ccb | refs/heads/master | 2020-12-25T19:15:14.620146 | 2015-04-27T18:48:29 | 2015-04-27T18:48:29 | 32,317,420 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,244 | py | #!/usr/bin/python -t
#
# Create an /etc/gitolog/conf/getolog.conf file with acls for dist-git
#
# Takes no arguments!
#
import grp
import sys
import requests
from ConfigParser import ConfigParser
def _get_conf(cp, section, option, default):
if cp.has_section(section) and cp.has_option(section, option):
return cp.get(section, option)
return default
if __name__ == '__main__':
config = ConfigParser()
config.read("/etc/dist-git/dist-git.conf")
user_groups = _get_conf(config, "acls", "user_groups", "").split(",")
admin_groups = _get_conf(config, "acls", "admin_groups", "").split(",")
ACTIVE = _get_conf(config, "acls", "active_branches", "").split(",")
RESERVED = _get_conf(config, "acls", "reserved_branches", "").split(",")
pkgdb_acls_url = _get_conf(config, "acls", "pkgdb_acls_url", "")
pkgdb_groups_url = _get_conf(config, "acls", "pkgdb_groups_url", "")
# Read the ACL information from the packageDB
data = requests.get(pkgdb_acls_url).json()
# Get a list of all the packages
acls = data['packageAcls']
pkglist = data['packageAcls'].keys()
pkglist.sort()
# sanity check
#if len(pkglist) < 2500:
# sys.exit(1)
# get the list of all groups
pkgdb_groups = requests.get(pkgdb_groups_url).json()
# print out our user groups
for group in user_groups + pkgdb_groups["groups"]:
print "@{0} = {1}".format(group, " ".join(grp.getgrnam(group)[3]))
# Give a little space before moving onto the permissions
print ''
# print our default permissions
print 'repo @all'
print ' - VREF/update-block-push-origin = @all'
if admin_groups:
print ' RWC = @{}'.format(" @".join(admin_groups))
print ' R = @all'
#print ' RW private- = @all'
# dont' enable the above until we prevent building for real from private-
for pkg in pkglist:
branchAcls = {} # Check whether we need to set separate per branch acls
buffer = [] # Buffer the output per package
masters = [] # Folks that have commit to master
writers = [] # Anybody that has write access
# Examine each branch in the package
branches = acls[pkg].keys()
branches.sort()
for branch in branches:
if not branch in ACTIVE:
continue
if 'packager' in acls[pkg][branch]['commit']['groups']:
# If the packager group is defined, everyone has access
buffer.append(' RWC %s = @all' % (branch))
branchAcls.setdefault('@all', []).append((pkg, branch))
if branch == 'master':
masters.append('@all')
if '@all' not in writers:
writers.append('@all')
else:
# Extract the owners
committers = []
owners = acls[pkg][branch]['commit']['people']
owners.sort()
for owner in owners:
committers.append(owner)
for group in acls[pkg][branch]['commit']['groups']:
committers.append('@%s' % group)
if branch == 'master':
masters.extend(committers)
# add all the committers to the top writers list
for committer in committers:
if not committer in writers:
writers.append(committer)
# Print the committers to the acl for this package-branch
committers = ' '.join(committers)
buffer.append(' RWC %s = %s' %
(branch, committers))
branchAcls.setdefault(committers, []).append((pkg, branch))
print
print 'repo %s' % pkg
#if len(branchAcls.keys()) == 1:
# acl = branchAcls.keys()[0]
# print ' RW = %s' % acl
#else:
print '\n'.join(buffer)
for reserved in RESERVED:
print ' - %s = @all' % reserved
print ' RWC refs/tags/ = %s' % ' '.join(writers)
if masters:
print ' RWC = %s' % ' '.join(masters)
sys.exit(0)
| [
"[email protected]"
] | |
b36df7b0192a1496fa338fe4bd31bcf8c9822f46 | d60e23026d717942466dd363e4822da7b00f6320 | /web/tests/apps/partner/test_views.py | 52cd9892c46eb5eeb0d9cd68330792801dddae95 | [
"BSD-3-Clause"
] | permissive | sidneijp/zedev | 190f59d9b5551137eabe948d0bf816c32fcb7782 | 75d6a83d08febb795f862627811925ea18f89fca | refs/heads/master | 2023-01-01T20:59:41.942026 | 2020-10-23T23:01:12 | 2020-10-23T23:01:12 | 303,247,331 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,777 | py | import json
from django.contrib.gis.geos import Point, GEOSGeometry
from django.urls import reverse
import pytest
from apps.partner import factories, views, models
class TestViews:
def setup(self):
self.point = Point(15.0, 10.0, srid=4326)
self.coverageArea = GEOSGeometry(json.dumps({
"type": "MultiPolygon",
"coordinates": [
[
[
[30.0, 20.0],
[45.0, 40.0],
[10.0, 40.0],
[30.0, 20.0]
]
],
[
[
[15.0, 5.0],
[40.0, 10.0],
[10.0, 20.0],
[5.0, 10.0],
[15.0, 5.0]
]
]
]
}))
self.another_coverageArea = GEOSGeometry(json.dumps({
"type": "MultiPolygon",
"coordinates": [
[
[
[-30.0, -20.0],
[-45.0, -40.0],
[-10.0, -40.0],
[-30.0, -20.0]
]
],
[
[
[-15.0, -5.0],
[-40.0, -10.0],
[-10.0, -20.0],
[-5.0, -10.0],
[-15.0, -5.0]
]
]
]
}))
self.view = views.PartnerViewSet()
self.view.kwargs = {'coordinates': f'{self.point.x},{self.point.y}'}
@pytest.mark.unittest
def test_get_point_from_lookup(self):
expected = self.point
point = self.view.get_point_from_lookup()
assert point == expected
@pytest.mark.integration
@pytest.mark.django_db
def test_get_nearest(self, client):
instance = factories.PartnerFactory.create(
address=self.point,
coverageArea=self.coverageArea
)
url = reverse('partner-nearest-detail', kwargs=self.view.kwargs)
response = client.get(url)
data = response.json()
assert data.get('id') == instance.pk
@pytest.mark.integration
@pytest.mark.django_db
def test_get_partner(self, client):
instance = factories.PartnerFactory.create(
address=self.point,
coverageArea=self.coverageArea
)
url = reverse('partner-detail', kwargs={'pk': instance.pk})
response = client.get(url)
data = response.json()
assert data.get('id') == instance.pk
@pytest.mark.integration
@pytest.mark.django_db
def test_list_partners(self, client):
expected = 5
factories.PartnerFactory.create_batch(
expected,
address=self.point,
coverageArea=self.coverageArea
)
url = reverse('partner-list')
response = client.get(url)
data = response.json()
assert len(data) == expected
@pytest.mark.integration
@pytest.mark.django_db
def test_list_nearest_partners(self, client):
expected = 5
factories.PartnerFactory.create_batch(
expected,
address=self.point,
coverageArea=self.coverageArea
)
factories.PartnerFactory.create_batch(
expected,
address=self.point,
coverageArea=self.another_coverageArea
)
url = reverse('partner-list')
url = url + '?address={ "type": "Point", "coordinates": [15, 10] }'
response = client.get(url)
data = response.json()
assert len(data) == expected
| [
"[email protected]"
] | |
38af83d170297d348201ba84ec024ff6782f1b88 | e6dab5aa1754ff13755a1f74a28a201681ab7e1c | /.parts/lib/django-1.4/tests/regressiontests/admin_custom_urls/urls.py | 12f440e54206905c1883af69161ca4715a9ff7be | [] | no_license | ronkagan/Euler_1 | 67679203a9510147320f7c6513eefd391630703e | 022633cc298475c4f3fd0c6e2bde4f4728713995 | refs/heads/master | 2021-01-06T20:45:52.901025 | 2014-09-06T22:34:16 | 2014-09-06T22:34:16 | 23,744,842 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | /home/action/.parts/packages/googleappengine/1.9.4/lib/django-1.4/tests/regressiontests/admin_custom_urls/urls.py | [
"[email protected]"
] | |
ca016bd689fb246e19dc877a574e00c0cd0e1ec1 | 2b6e1b7bd7065229054b4cdecd40daa5e251c22d | /src/models/dqn.py | 4195fc5933776981002b4d8d68a69c1ac3b934bb | [] | no_license | raufer/deep-q-learning | b9be99c41829e8d62cd350cd279e5ddc135e7809 | c31b8803a45bcf1f22f1c4552daf48b9a284dd5c | refs/heads/main | 2023-06-19T06:01:49.867163 | 2021-07-20T13:35:30 | 2021-07-20T13:35:30 | 379,271,213 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,506 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
from src.config import config
class DQN(nn.Module):
"""
Assumption: the environment is deterministic
so all equations presented here are also formulated deterministically for the sake of simplicity.
In the reinforcement learning literature, they would also contain expectations
over stochastic transitions in the environment.
Our aim is to train a policy that tries to maximize the discounter, cumulative reward
R = sum_{t=t0}^{inf} 𝛾^t * r_t
The discount, 𝛾 , should be a constant between 0 and 1 that ensures the sum converges.
It makes rewards from the uncertain, far future, less important for our agent
than the ones in the near future that it can be more confident about
The main idea behind Q-learning is:
If we had a function Q* :: (S, A) -> R (scalar) that could tell us the real return of
taking an action A at the state S, then we could easily construct an optimal policy:
policy*(s) = argmax {a} Q*(S, a)
This policy would always maximize our rewards
However, we dont know everything about the world, so we do not have direct access to Q*
Nevertheless, We can use function approximation techniques to approximate Q*
For the training update rule, we'll use the fact that every function Q for some policy
obeys the Bellman Equation:
Q_pi(s, a) = r + gamma * max {a'} Q_pi(s', a')
The difference between the two sides of the equality is known as the temporal
difference error
delta = Q(s,a) - (r + gamma max {a} Q(s', a))
To minimize this error, we'll use the Hubber loss:
* MSE when the error is small (< 1)
* MAE when the error is large (> 1)
(more robust to outliers)
This error is calculated over a batch of transitions B
sampled from the replay memory
L = 1 / |B| * sum {(s, a, s', r) in B} L(delta)
with L(delta) =
1/2 delta**2 for |delta| < 1
|delta| - 1/2 otherwise
Q-network
Our model is a convolutional neural network that takes as input
the different between the current and previous screen patches.
It has two outputs representing Q(s, left) and Q(s, right),
where s is the input to the network.
In effect, the network is trying to predict the quality/value of
taking each action given the current input
"""
def __init__(self, h, w, outputs):
super(DQN, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=5, stride=2)
self.bn1 = nn.BatchNorm2d(16)
self.conv2 = nn.Conv2d(16, 32, kernel_size=5, stride=2)
self.bn2 = nn.BatchNorm2d(32)
self.conv3 = nn.Conv2d(32, 32, kernel_size=5, stride=2)
self.bn3 = nn.BatchNorm2d(32)
# Number of Linear input connections depends on output of conv2d layers
# and therefore the input image size, so compute it.
def conv2d_size_out(size, kernel_size=5, stride=2):
return (size - (kernel_size - 1) - 1) // stride + 1
convw = conv2d_size_out(conv2d_size_out(conv2d_size_out(w)))
convh = conv2d_size_out(conv2d_size_out(conv2d_size_out(h)))
linear_input_size = convw * convh * 32
self.head = nn.Linear(linear_input_size, outputs)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
return self.head(x.view(x.size(0), -1))
| [
"[email protected]"
] | |
0c7ea9ab5c864e9b66250d7cbbd2983cbb8b51a7 | 6b9e271dd425396c9cbc617e47f5065de2d1fef1 | /tmplr/__init__.py | 60519e68d78e107bd2776ea0e455ec007229936f | [
"MIT"
] | permissive | joshleejosh/tmplr | 0cb236f185a82abad28380f0c5ddd0cc02fdb731 | 19209bb43ce40a307eecf2faf6dae8ccc8b0c9db | refs/heads/master | 2020-04-04T09:48:05.418311 | 2019-01-06T18:14:34 | 2019-01-06T18:14:34 | 42,019,077 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 67 | py | # -*- coding: utf-8 -*-
"""
Yet another static site generator.
"""
| [
"[email protected]"
] | |
814dd18821eebcb7889c382aedbef546353c54a2 | b1ebf9c399477b6aa15627208a31b39cf673ffe4 | /testes/cadastro2.py | 8baf70ab154ea3236492fdd78d7462c31cd86f78 | [] | no_license | srclayton/Python-uDemy | 170a0ca88e4251abe2105fe4b2a797f0e33e353b | cff915da30edfc61647588431d52d32cf08c22bc | refs/heads/main | 2023-01-21T17:23:33.581235 | 2020-11-28T14:32:34 | 2020-11-28T14:32:34 | 308,778,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 693 | py | # -*- coding: utf-8 -*-
arquivo_original = open("cadastros.txt", "r")
arquivo_copia = open("cadastros2.txt", "a")
#for linha in arquivo_original:
# valores = linha.split()
# count=0
# while count < 11:
# arquivo_copia.write(valores[count] + " ")
# count+=1
# arquivo_copia.write("\n")
arquivo_2 = open("cadastros2.txt" , "r")
nome = input("Digite o nome:")
for i in arquivo_2:
valores = i.split()
if valores[0] == nome:
print(valores[0] + " ACHEI \n")
print(valores[0])
print("Nome não encontrado!")
chose= input("Deseja cadastrar um novo nome? y/n")
if chose == "y" or chose =="Y":
arquivo_copia.write(nome + " ")
arquivo_copia.close()
arquivo_original.close()
arquivo_2.close() | [
"[email protected]"
] | |
f7c2105bdadbd86b2600eff80773427f74aa8d3b | 525f39ec2fe53bcd65ff13b87c7a900358e30c1c | /Lab3B/1C.py | a539436e5493292aab72694f459c7520aa9e2d66 | [] | no_license | abdullah2808/ENGR_102 | fc2cbda672c407d8cf24c14238c2354516c8d7d7 | 6b3080e01c162e16b207df0099427368ab231af9 | refs/heads/master | 2022-12-14T23:49:56.090186 | 2020-09-13T21:31:12 | 2020-09-13T21:31:12 | 295,239,783 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 719 | py | # By submitting this assignment, I agree to the following:
# “Aggies do not lie, cheat, or steal, or tolerate those who do”
# “I have not given or received any unauthorized aid on this assignment”
#
# Name: ABDULLAH AHMAD
# Section: 518
# Assignment: LAB 3B - 1C
# Date: 13/9/18
Days = int(input("Please enter the amount of days of production: "))
Initial = int(input("Please enter the initial production of the well: "))
Decline = int(input("Please enter the decline rate of the well: " ))
HyperCons = .8
Arps = (Initial/((1 + (HyperCons * Decline * Days ))**(1/HyperCons )))
print ("The production of a well after", Days, "days and with a hyperbolic constant of .8 is", Arps) # ARPS EQUATION
| [
"[email protected]"
] | |
bd22959ea01a76d8407594d4c290cda432b2a95e | e5b902979133b8bbc9abcec53d0b8486518a15cd | /test.py | 1ee5ef72813f59f202037bda85b34a129ab2b0e0 | [] | no_license | fly2016git/MyBot | 51838b37d0fcf81547b1e3a6a31b6bf8dbd107b1 | 66aad51dadda9d054159822ba58b57dded3eb272 | refs/heads/master | 2021-01-19T09:03:15.621776 | 2017-05-17T14:53:24 | 2017-05-17T14:53:24 | 87,713,252 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 108 | py | '''
Created on 2017年4月9日
@author: zpf_s
'''
ls = [1,2,3,4,5,6]
for _ in range(5):
print(_) | [
"zpf_s@DESKTOP-L2STAU4"
] | zpf_s@DESKTOP-L2STAU4 |
b4838aea4ebb660fe5f294da900b8d93a5ba1f09 | 2a671de9ff13e2d82de0abbffa1712000a5c53e6 | /selenium/Website/testcase/model/testutil.py | f1f3b0daca07106b384c17d22f00e0307a46e3fb | [] | no_license | asdf27901/auto_selenium | eddb28ed60cb8238bc5f742f66ddd972e0a518d0 | f669a222e102c1737966d8863e4a74650403e379 | refs/heads/main | 2023-03-27T01:08:07.695555 | 2021-03-14T18:31:40 | 2021-03-14T18:31:40 | 347,719,423 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | import unittest
from driver.driver import *
from Website.config.data_config import *
class SetStartAndEnd(unittest.TestCase):
def setUp(self) -> None:
self.driver = get_driver()
self.driver.maximize_window()
self.driver.implicitly_wait(timeout)
def tearDown(self) -> None:
self.driver.quit()
| [
"[email protected]"
] | |
c885620223bab7b3b759d52fbf738145d6690444 | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/rtctrl/setrtmetricdef.py | f5d55b1458f3e0a5d0f447271471db818060c777 | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,969 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class SetRtMetricDef(Mo):
"""
The set route metric definition.
"""
meta = ClassMeta("cobra.model.rtctrl.SetRtMetricDef")
meta.moClassName = "rtctrlSetRtMetricDef"
meta.rnFormat = "smetric"
meta.category = MoCategory.REGULAR
meta.label = "None"
meta.writeAccessMask = 0x1000001
meta.readAccessMask = 0x1000001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.parentClasses.add("cobra.model.rtctrl.AttrDef")
meta.superClasses.add("cobra.model.pol.Comp")
meta.superClasses.add("cobra.model.rtctrl.ASetRule")
meta.superClasses.add("cobra.model.fabric.L3ProtoComp")
meta.superClasses.add("cobra.model.fabric.ProtoComp")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.rtctrl.ASetRtMetric")
meta.rnPrefixes = [
('smetric', False),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 5582, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "metric", "metric", 795, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 4294967295)]
meta.props.add("metric", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "name", "name", 4991, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "type", "type", 794, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.defaultValue = 5
prop.defaultValueStr = "metric"
prop._addConstant("as-path", "as-path", 11)
prop._addConstant("community", "community", 1)
prop._addConstant("dampening-pol", "dampening-type", 10)
prop._addConstant("ip-nh", "ip-nexthop", 8)
prop._addConstant("local-pref", "local-preference", 4)
prop._addConstant("metric", "metric", 5)
prop._addConstant("metric-type", "metric-type", 9)
prop._addConstant("ospf-fwd-addr", "ospf-fowarding-address", 7)
prop._addConstant("ospf-nssa", "ospf-nssa-area", 6)
prop._addConstant("rt-tag", "route-tag", 2)
prop._addConstant("rt-weight", "route-weight", 3)
meta.props.add("type", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
4c2f407d9154816cb619874628717ae82b0805bf | 300cc8404005086b5a576f3e5572dbafd0cfe362 | /Dashboard/urls.py | aca2dafd68b55b94d38abbb071d9ef81158f3b7e | [] | no_license | TabaPesaMc/Blocks-Management-System | 4a92f64fbfaa0940146edf9ef5427c575583ea54 | 35d966fac0623b56c7674c6a1c837491276b5725 | refs/heads/main | 2023-04-29T21:15:19.106450 | 2021-05-12T06:22:45 | 2021-05-12T06:22:45 | 366,617,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 841 | py | """STOCK_MANAGEMENT_PROJECT URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path
from . import views
urlpatterns = [
path('', views.dashboard_with_pivot, name='dashboard_with_pivot'),
path('data', views.pivot_data, name='pivot_data'),
] | [
"[email protected]"
] | |
5a7acb02c1f18590c52ef7d5b38a3033044bf2a1 | 366ad2700f4b5627fdd7ade2696a4901b3ec74bf | /A2/src/Load.py | 8d740e599aeda93b748eea6be541285be307acdf | [] | no_license | HaoyuanFu/qualifiedAssignments | f9f4f3dbaf72228ae3e30cdb7a5361e415a75bdc | 834715694956073e082c690be6ba3ef2e3f56b41 | refs/heads/master | 2020-04-24T04:53:14.390586 | 2019-02-24T00:49:22 | 2019-02-24T00:49:22 | 171,718,332 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 906 | py | ## @file Load.py
# @author Harry Fu
# @brief Provides the Plotting methods
# @details Provides the Plotting methods with the assumption:
# The input file will match the given specification.
# @date 2/14/2018
from Data import Data
from CurveADT import CurveT
## @brief Reads data from file
# @param s The filename of file
def Load(s):
Data.init()
with open(s, 'r') as f:
row = next(f)
row = list(map(float, row.split(',')))
Z = row
n = len(Z)
row = next(f)
row = list(map(int, row.split(',')))
O = row
X_Y = [[] for _ in range(n + n)]
for row in f:
row = [e.strip() for e in row.split(',')]
for i, e in enumerate(row):
if e:
X_Y[i].append(float(e))
for i, o in enumerate(O):
Data.add(CurveT(X_Y[i + i], X_Y[i + i + 1], o), Z[i])
| [
"[email protected]"
] | |
2d886b361371b93476368c2a749027b3942e7f7d | 9dd5d19773720756e57b8f14bf92e848fca3f8ec | /TP5/makedatabase.py | 18a130683ffeaaa9ea726351a736ecced56e4774 | [] | no_license | JermanKell/TPs_Python | 12afd2c9817705d6d419d38285d3045a4197e3f4 | ed4bb958230d181146f08f6936586feffbd2bf15 | refs/heads/master | 2020-03-29T09:25:36.313313 | 2018-11-25T18:42:05 | 2018-11-25T18:42:05 | 149,757,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,979 | py | import sqlite3
def create_database(nom_db):
conn = sqlite3.connect(nom_db)
c = conn.cursor()
c.execute('''CREATE TABLE if not exists Communes
(code_dpt text, code_com text, nom_com text, pop_tot integer)''')
c.execute('''CREATE TABLE if not exists Departements
(code_dpt text, nom_dpt text, code_reg text)''')
c.execute('''CREATE TABLE if not exists Region
(code_reg text, nom_reg text)''')
conn.commit()
conn.close()
def fill_database(nom_db):
conn = sqlite3.connect(nom_db)
c = conn.cursor()
file_name = '''bdd/communes.csv'''
file = open(file_name, "rt")
file_content = file.read()
file_content = file_content.split("\n")
del file_content[0:8]
del file_content[len(file_content)-1]
for line in file_content:
if len(line) > 0:
data_array = line.split(";")
if len(data_array[0]) > 0:
c.execute("insert into Communes values (?, ?, ?, ?)", (data_array[2], data_array[5], data_array[6], int(data_array[9].replace(" ", ""))))
file.close()
file_name = '''bdd/departements.csv'''
file = open(file_name, "rt")
file_content = file.read()
file_content = file_content.split("\n")
del file_content[0:8]
del file_content[len(file_content)-1]
for line in file_content:
if len(line) > 0:
data_array = line.split(";")
if len(data_array[0]) > 0:
c.execute("insert into Departements values (?, ?, ?)", (data_array[2], data_array[3], data_array[1]))
file.close()
file_name = '''bdd/departements.csv'''
file = open(file_name, "rt")
file_content = file.read()
file_content = file_content.split("\n")
del file_content[0:8]
del file_content[len(file_content)-1]
for line in file_content:
if len(line) > 0:
data_array = line.split(";")
if len(data_array[0]) > 0:
c.execute("insert into Region values (?, ?)", (data_array[0], data_array[1]))
file.close()
conn.commit()
conn.close()
def sum_total_pop_department():
conn = sqlite3.connect('database.db')
c = conn.cursor()
c.execute('SELECT code_dpt, SUM(pop_tot) FROM Communes GROUP BY code_dpt')
for row in c:
print(row)
c.close()
conn.close()
def sum_total_pop_region():
conn = sqlite3.connect('database.db')
c = conn.cursor()
c.execute('SELECT code_reg, SUM(pop_tot) FROM Communes INNER JOIN Departements on Departements.code_dpt = Communes.code_dpt GROUP BY code_reg')
for row in c:
print(row)
c.close()
conn.close()
def list_department_com():
conn = sqlite3.connect('database.db')
c = conn.cursor()
c.execute('SELECT nom_com, code_dpt FROM Communes WHERE nom_com IN (SELECT nom_com FROM Communes GROUP BY nom_com HAVING COUNT(*) > 1) ORDER BY nom_com ASC')
for row in c:
print(row)
c.close()
conn.close()
def modify_database(nom_db):
conn = sqlite3.connect(nom_db)
c = conn.cursor()
c.execute('''CREATE TABLE if not exists NouvellesRegions
(code_nouv_reg integer, lib_geo text)''')
file_name = '''bdd/zones-2016.csv'''
file = open(file_name, "rt")
file_content = file.read()
file_content = file_content.split("\n")
#Récupère uniquement les lignes REG du fichier
for line in file_content:
if len(line) > 0:
data_array = line.split(";")
if len(data_array[0]) > 0:
if data_array[0].__eq__('REG'):
print(data_array[1], data_array[2])
c.execute("insert into NouvellesRegions values (?, ?)", (data_array[1], data_array[2]))
file.close()
c.execute('ALTER TABLE Departements ADD COLUMN code_nouv_reg interger')
file_name = '''bdd/communes-2016.csv'''
file = open(file_name, "rt")
file_content = file.read()
file_content = file_content.split("\n")
del file_content[0:7]
departements_updated = []
for line in file_content:
if len(line) > 0:
data_array = line.split(";")
if len(data_array[0]) > 0:
if not data_array[2] in departements_updated:
c.execute('update Departements set code_nouv_reg = ' + data_array[3] + ' where code_dpt = \'' + data_array[2] + '\'')
departements_updated.append(data_array[2])
file.close()
conn.commit()
conn.close()
def new_sum_pop():
conn = sqlite3.connect('database2.db')
c = conn.cursor()
c.execute('SELECT NouvellesRegions.code_nouv_reg, lib_geo, SUM(pop_tot) FROM NouvellesRegions '
'INNER JOIN Departements on Departements.code_nouv_reg = NouvellesRegions.code_nouv_reg '
'INNER JOIN Communes on Communes.code_dpt = Departements.code_dpt GROUP BY NouvellesRegions.code_nouv_reg')
for row in c:
print(row)
c.close()
conn.close()
| [
"[email protected]"
] | |
22cc35da3867ed7a1af79ff958254f07c2be73d2 | 8a4e89fb0c84b2303b2835c1bd9cd258ea587278 | /model.py | 559b16ed3aa304efbfb4d5633b96e0c47e7c5141 | [] | no_license | zhangyipin/bi-lstm-crf-ner-tf2.0 | 8b1135c1b02ec419ec1f69884e29b80dee3321d5 | 375dd59c9883a7cdcdf1d9cdd70877d709b3c95c | refs/heads/master | 2020-12-10T06:53:28.896139 | 2019-12-05T13:17:03 | 2019-12-05T13:17:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,650 | py | # !/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time : 2019/12/3 7:15 下午
# @Author: wuchenglong
import tensorflow as tf
import tensorflow_addons as tf_ad
class NerModel(tf.keras.Model):
def __init__(self, hidden_num, vocab_size, label_size, embedding_size):
super(NerModel, self).__init__()
self.num_hidden = hidden_num
self.vocab_size = vocab_size
self.label_size = label_size
self.transition_params = None
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_size)
self.biLSTM = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(hidden_num, return_sequences=True))
self.dense = tf.keras.layers.Dense(label_size)
self.transition_params = tf.Variable(tf.random.uniform(shape=(label_size, label_size)),
trainable=False)
self.dropout = tf.keras.layers.Dropout(0.5)
def call(self, text,labels=None,training=None):
text_lens = tf.math.reduce_sum(tf.cast(tf.math.not_equal(text, -1), dtype=tf.int32), axis=-1)
# -1 change 0
inputs = self.embedding(text)
inputs = self.dropout(inputs, training)
logits = self.dense(self.biLSTM(inputs))
if labels is not None:
label_sequences = tf.convert_to_tensor(labels, dtype=tf.int32)
log_likelihood, self.transition_params = tf_ad.text.crf_log_likelihood(logits, label_sequences, text_lens)
self.transition_params = tf.Variable(self.transition_params, trainable=False)
return logits, text_lens, log_likelihood
else:
return logits, text_lens
| [
"[email protected]"
] | |
ad308c53f59ee98a390a36b7b4555d40025649d2 | f15aa508749bc7c7f89221f40eee4fba9a7e22cb | /tests/test/test_server.py | 3f23339e62a4c551bb22db5196a76a0ee9535a83 | [
"WTFPL"
] | permissive | kamilion/StarryPy3k | 275064b82f2ad617915b6a6c69330ab43429b37d | 0440fa86886618035345628bb4f2315a0b0e3e6c | refs/heads/master | 2021-01-18T10:32:02.592891 | 2015-02-05T07:35:25 | 2015-02-05T07:35:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | import asyncio
from server import ServerFactory
def start_server():
serverf = ServerFactory()
yield from asyncio.start_server(serverf, '127.0.0.1', 21025)
class TestServer:
def setUp(self):
self.loop = asyncio.get_event_loop()
def tearDown(self):
self.loop.stop()
def testTest(self):
asyncio.Task(self.beep())
@asyncio.coroutine
def beep(self):
x = yield from (lambda _: True)("")
return x | [
"[email protected]"
] | |
5f77a760ce54a8b9ecb7cd4edda181f8739a0f38 | 608ff19dabe0cac3eb13839fc3ec094766ccfed2 | /domainmodel/author.py | bbac8ff433a5b28d665f17592dc43d8a6fb4c19c | [] | no_license | cko780/235_A1 | e2fb35ee113ec803f483a26f997632c84f30afec | 639ee42cd3d7d56ac015da11185a6bc60f959183 | refs/heads/master | 2023-07-06T09:02:14.035983 | 2021-08-16T21:33:51 | 2021-08-16T21:33:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,424 | py | class Author:
def __init__(self, author_id: int, author_name: str):
if not isinstance(author_id, int) or author_id < 0 or not isinstance(author_name, str) or author_name.strip() == "":
raise ValueError
self.__author_id = author_id
self.__author_name = author_name.strip()
self.__coauthors = []
@property
def full_name(self):
return self.__author_name
@full_name.setter
def full_name(self, author_name: str):
if not isinstance(author_name, str) or author_name.strip() == "":
raise ValueError
self.__author_name = author_name.strip()
@property
def unique_id(self):
return self.__author_id
def __repr__(self):
return f'<Author {self.__author_name}, author id = {self.__author_id}>'
def __eq__(self, other):
if not isinstance(other, Author):
return False
return self.__author_id == other.unique_id
def __lt__ (self, other):
if not isinstance(other, Author):
return False
return self.__author_id < other.unique_id
def __hash__(self):
return hash(self.__author_id)
def add_coauthor(self, coauthor):
if not isinstance(coauthor, Author):
return False
self.__coauthors.append(coauthor)
def check_if_this_author_coauthored_with(self, author):
return author in self.__coauthors
| [
"[email protected]"
] | |
326c83debde341e94a5a0e096d92b9a3d2265ca5 | aa47269c81411b60778fcec4b541f794d34bf8fe | /python_scripts/extract_by_attributes_and_mask.py | 8b0af96c414d7dc825554d26ce42613a2540f99a | [] | no_license | arhwiegman/SpatialTools | a1c525d97689874d0f10dcbd02087c737c8172f8 | 9693de55c3e9813b4c84856fbb9350af4c5fc2dd | refs/heads/master | 2022-06-15T00:02:59.854343 | 2022-06-02T16:43:13 | 2022-06-02T16:43:13 | 118,836,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,181 | py | # This script selects raster pixels by attribute value and then extracts another raster by the selected values as a mask
# Requirements: Spatial Analyst Extension, Numpy, os, glob
def main(interactive=False):
'''
# SETUP ---
# Loading modules and setting up directories
'''
dirs = setup_workspace(interactive)
wd = dirs[0] # working directory
'''
# MAIN PROGRAM ----
# 1. EXTRACT RASTER BY ATTRIBUTE BASED ON LAND USE HISTORY
# - Get areas that were forested or wetland in 1992
# - Get areas that were farms as of 2001
'''
#S1_extract_land_use_classes()
'''
# 2. EXTRACT DEPTH RASTER BASED ON PERCENTILES
# - analyze depth percentiles
# - get new rasters from depth selected percentiles for otter creek
'''
#S2_extract_depth_percentiles()
'''
# 3. EXTRACT RASTER BY MASK (LOCATION)
# get raster of conservation easements that were restored prior to 2017
# (2012 is the year that the LiDAR data for Addison county was flown in VTRANS HEC-RAS model)
'''
S3_extract_all_by_inundation_boundary()
#LASKJDJF*@F(_@$*F(@*$F@
#START HERE USE GDAL_MERGE.py to make a raster stack then analyze data in arcgis
'''
arcpy_ExtractByMask()
# 4. extract DEPTH data by mask for areas of different land use
arcpy_ExtractByMask()
# 3. ANALYSE RASTER DATA
'''
'''
arcpy_ResampleFromTemplate(inRast,
inRast=u'Z:\GeoData\Temp\Agriculture2001_0p7.tif',
outRast=u'Z:\GeoData\Temp\Agriculture2001.tif',
method='NEAREST')
outfile=u'Z:\GeoData\Temp\depth_agriculture2001.tif'
infile = u'Z:\GeoData\Trueheart\Depth (Max).bathymetry029.tif'
mask = u'Z:\GeoData\Temp\Agriculture2001.tif'
outfile=u'Z:\GeoData\Temp\depth_forestsWetlands1992.tif'
arcpy_ExtractByMask(infile,mask,outfile,deleteAfter=False)
infile = u'Z:\GeoData\Trueheart\Depth (Max).bathymetry029.tif'
mask = u'Z:\GeoData\Temp\Agriculture2001.tif'
outfile=u'Z:\GeoData\Temp\depth_agriculture2001.tif'
arcpy_ExtractByMask(infile,mask,outfile,deleteAfter=False)
'''
def S4_resample_landuse_to_0p7m():
# re-sample landuse
input = 'masked_Agriculture2001.tif'
arcpy_ResampleFromTemplate(inRast=input,
template='Z:/GeoData/Trueheart/Depth (Max).bathymetry029.tif',
outRast="resampled_"+input)
input = 'masked_ForestWaterWetland1992.tif'
arcpy_ResampleFromTemplate(inRast=input,
template='Z:/GeoData/Trueheart/Depth (Max).bathymetry029.tif',
outRast="resampled_"+input)
def S3_extract_all_by_inundation_boundary():
# reduce file size by masking by inundation boundary
infiles = [
#'Z:/GeoData/Trueheart/Depth (Max).bathymetry029.tif',
'Z:/GeoData/Temp/Agriculture2001.tif',
'Z:/GeoData/Temp/ForestWaterWetland1992.tif',
#'Z:/GeoData/LCB/NAIP/outputs/mosaic_ndvi_2016_0p6m.tif']
outfiles = [
#'masked_depth.tif',
'masked_Agriculture2001.tif',
'masked_ForestWaterWetland1992.tif',
#'masked_mosaic_ndvi_2016_0p6m.tif']
mask = 'Z:/GeoData/Trueheart/Inundation Boundary (Max Value_0).shp'
for i in range(len(infiles)):
arcpy_ExtractByMask(infiles[i],mask,outfiles[i],deleteAfter=False,clip=True,resample=True)
def S2_extract_depth_percentiles():
# set input file and destination paths
infile = u'Z:\GeoData\Trueheart\Depth (Max).bathymetry029.tif'
dst = u'Z:\GeoData\Trueheart' # destination path
# calculate percentiles
pct = arcpy_CalculatePercentileFromRaster(inRaster=infile,
nbins=10,
omitValue=-9999.0,
trimMinMax=True,
min=0,
max=2.25)
'''
depth percentiles
{'0.0%': 0.0009994507,
'10.0%': 0.27291107,
'20.0%': 0.46861267,
'30.0%': 0.62002563,
'40.0%': 0.7600937,
'50.0%': 0.899498,
'60.0%': 1.0399399,
'70.0%': 1.2348328,
'80.0%': 1.4948654,
'90.0%': 1.8216858,
'100.0%': 2.25}
'''
# select dictionary keys for lower and higher bounds of depth
lower = ['0.0%','20.0%','40.0%','60.0%','80.0%']
higher = ['20.0%','40.0%','60.0%','80.0%','100.0%']
# for each range
for i in range(len(lower)):
low = pct[lower[i]]
high = pct[higher[i]]
outfile = dst+'/depth_pct_{0:4.2f}-{1:4.2f}m.tif'.format(low,high)
SQL = '{0} > {1} AND {0} < {2}'.format('VALUE',low,high)
print('extracting file: {}'.format(o))
arcpy_ExtractByAttributes(infile,SQL,outfile)
def S1_extract_land_use_classes():
# A. get raster of areas that have been forested since 1942, 1971, and 2001 respectively
# 1992 and 2001 land use codes:
agriculture = 2
brush = 3
forest = 4
water = 5
wetland = 6
# set SQL conditions and format string (<> not equal to)
conditions = " {0} = {1} AND {0} = {2} AND {0} = {3} "
SQL = conditions.format('VALUE',forest,water,wetland)
i=u"Z:\GeoData\LCB\LCLULCB92\lclulcb92"
o="ForestWaterWetland1992.tif"
arcpy_ExtractByAttributes(inRaster=i,inSQL=SQL,outRaster=o)
# B. get raster of areas that were farms in 2001
conditions = " {0} <> {1}"
SQL = conditions.format('VALUE',agriculture)
i=u"Z:\GeoData\LCB\LCLULCB01\lclulcb01"
o="Agriculture2001.tif"
arcpy_ExtractByAttributes(inRaster=i,inSQL=SQL,outRaster=o)
def arcpy_ResampleFromTemplate(inRast,template,outRast,method='NEAREST'):
Y = arcpy.GetRasterProperties_management(template,'CELLSIZEY')
X = arcpy.GetRasterProperties_management(template,'CELLSIZEX')
arcpy.Resample_management(inRast, outRast, "{} {}".format(X,Y), method)
return()
def arcpy_SelectFeatureByAttribute(inFeature,inSQL,outFeature):
arcpy.MakeFeatureLayer_management(inFeature, 'lyr')
# Write the selected features to a new featureclass
# Within selected features, further select only those cities which have a population > 10,000
arcpy.SelectLayerByAttribute_management('lyr', 'NEW_SELECTION', inSQL)
arcpy.CopyFeatures_management('lyr', outFeature)
def arcpy_ExtractByAttributes(inRaster,inSQL="VALUE > 1000",outRaster="extracted",deleteAfter=True):
# Description: Extracts the cells of a raster based on a logical query.
# Requirements: Spatial Analyst Extension
# Check out the ArcGIS Spatial Analyst extension license
# Execute ExtractByAttributes
print("extracting ",inSQL," from ",inRaster, "\n...")
attExtract = arcpy.sa.ExtractByAttributes(inRaster, inSQL)
# Save the output
print("success! Saving file: ",outRaster)
attExtract.save(outRaster)
if deleteAfter:
del attExtract
return()
def arcpy_CalculatePercentileFromRaster(inRaster, nbins=10, omitValue=None, trimMinMax=False, min=None, max=None):
# requires arcpy and numpy
array = arcpy.RasterToNumPyArray(inRaster)
#remove no data values, in this case zeroes, returning a flattened array
print ('removing no data values and flattening array.....' )
if omitValue is not None:
flatArray = array[array != omitValue]
if trimMinMax is not None:
print('trimming min and max values...')
flatArray = numpy.clip(flatArray,min,max)
print ('sorting array....' )
flatArray = numpy.sort(flatArray)
numpy.histogram(flatArray,nbins)
#report some summary stats
print('n = ', numpy.sum(flatArray) )
print('min = ', numpy.min(flatArray) )
print('median = ', flatArray[int(numpy.size(flatArray) * 0.50)] )
print('max = ', numpy.max(flatArray) )
percentiles = [None]*nbins
percentiles[0] = numpy.min(flatArray)
# add percentile values in steps to the list of percentiles
print('populating list with percentile values...')
for i in range(1,nbins):
percentiles[i] = flatArray[int(numpy.size(flatArray)*i/nbins)]
percentiles.append(numpy.max(flatArray))
pkeys = [str(k/nbins*100)+'%' for k in range(nbins+1)]
pdict = dict(zip(pkeys,percentiles))
print(pdict)
return(pdict)
def arcpy_NormalizeRasterValues(inRaster,outRaster,maxValue=1,deleteAfter=False):
# note that zonal statistics already calculates this so it may be faster to
# use http://help.arcgis.com/En/Arcgisdesktop/10.0/Help/index.html#//0017000000m7000000
# load data, convert to array
# requires arcpy and numpy
orig_raster = arcpy.Raster(inRaster)
array = arcpy.RasterToNumPyArray(inRaster)
# do your math
normalized_array = (array - array.min()) / (array.max() - array.min()) * maxValue
# back to a raster
normalized_raster = arcpy.NumPyArrayToRaster(
in_array=normalized_array,
lower_left_corner=inRaster.extent.lowerLeft,
x_cell_size=inRaster.meanCellWidth,
y_cell_size=inRaster.meanCellHeight)
# and scene
normalized_raster.save(outRaster)
if deleteAfter:
del normalized_raster
return()
def arcpy_ExtractByMask(inRaster,inMask,outRaster,deleteAfter=False,clip=True,resample=True):
#takes either a raster or a shapefile and extracts data from inRaster inside mask
# requires spatial analyst extension
if clip:
try:
e = arcpy.Describe(inMask).extent # create object of extent description
extent = ("{} {} {} {}").format(e.XMin,e.YMin,e.XMax,e.YMax)
except:
print("oops something went wrong",traceback.print_exc())
print('clipping ',inRaster,'to extent:\n',extent)
inRaster = arcpy.Clip_management(inRaster,extent)
if resample:
inMask = arcpy_ResampleFromTemplate(inMask,inRaster)
print("extracting by mask...")
masked = arcpy.sa.ExtractByMask(inRaster,inMask)
masked.save(outRaster)
if deleteAfter:
del masked
def load_modules():
# arcpy doesn't
# Loads modules and links directory paths
import os
import glob
import numpy
import arcpy
from arcpy import env
arcpy.CheckOutExtension("Spatial")
return()
def print_dir_contents(dir):
print("Directory:\n", os.getcwd())
print("-"*15,"contents","-"*15,"\n",os.listdir())
return()
def setup_workspace(interactive):
# set local variables
import os
import glob
import numpy
import arcpy
from arcpy import env
arcpy.CheckOutExtension("Spatial")
import os
print("Current environment workspace:\n", os.getcwd())
if interactive:
wd = env.workspace = input("copy environment workspace path below\n(e.g. ' Z:/awiegman/GeoData ')\n>>>")
dirs = [wd]
more = input("(A) press enter continue\n(B) press any other key then enter to link more directories)")
while len(more)>0:
dirs[len(dirs)+1] = input("copy environment workspace path below\n(e.g. ' Z:/awiegman/GeoData ')\n>>>")
print_dir_contents()
more = input("(A) press enter continue\n(B) press any other key then enter to link more directories)")
return(dirs)
else:
wd = env.workspace = 'Z:/awiegman/GeoData/Temp'
print("New environment workspace: ",wd)
return([wd])
if __name__ == '__main__':
main(interactive=False) | [
"[email protected]"
] | |
b012ab34e45cf798f99a340e8c758fb3f8756f97 | d7e3d78e5eee5454e06e0eab3798a7ce94128267 | /Pfundamental Lab/Lab 5/Task 2.py | 02670ec88701aa661dfecfd6eb067d6f3c90c69a | [] | no_license | AbdulMoizChishti/python-practice | f1f001cdeb63112025703650cb72f8471733de38 | 379d117f66abee5e8b619e38a59533138aaaa1e7 | refs/heads/master | 2023-02-04T08:32:37.162350 | 2020-12-19T15:29:03 | 2020-12-19T15:29:03 | 322,878,014 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 64 | py | num = 18
for i in range(1, 11):
print(num, "x", i,"=",num*i) | [
"[email protected]"
] | |
a14be52f4c558465de5d6ef552c0bc3dfa7a71eb | c96f48986c2bb993458f9081860326756ddfe7d9 | /django_db2charts/urls.py | 4a4cd0d8a9354ced049c6eed30ef15622c1e5daf | [
"MIT"
] | permissive | Alfredx/django-db2charts | 86aabd5ea32cad74e056fdf9eca00292d68ed0a9 | 436f9bf54cc022eda821a9679b3431c2ea16eb6f | refs/heads/master | 2021-01-20T20:53:44.501410 | 2016-06-28T07:52:47 | 2016-06-28T07:52:47 | 60,136,697 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | """django_db2charts URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from db2charts import urls as db2charts_urls
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^db2charts/', include(db2charts_urls)),
]
| [
"[email protected]"
] | |
1bd7a906e3ae3f165ff81b9124b97e9187f5bcc5 | e5202e0f36c15b8898920a461a866168fa059947 | /clirad/n2o_3.2e-07/band_3/atmpro_mls/cliradlw_1013f91/param.py | 33924bb8f70d78d9230fd85569d18e1186ba5363 | [] | no_license | qAp/analysis_-_new_kdist_param | 653c9873751646f6fa9481544e98ed6065a16155 | 272dc3667030cdb18664108d0bd78fee03736144 | refs/heads/master | 2021-06-11T04:21:35.105924 | 2019-08-04T13:13:07 | 2019-08-04T13:13:07 | 136,108,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | PARAM = {'commitnumber': '1013f91', 'band': [3], 'molecule': {'n2o': 3.2e-07}, 'atmpro': 'mls', 'tsfc': 294}
PARAM_LBLNEW = {'atmpro': 'mls', 'band': '3a', 'commitnumber': '5014a19', 'conc': 3.2e-07, 'dv': 0.001, 'klin': 2.22e-20, 'molecule': 'n2o', 'ng_adju': [0, 0], 'ng_refs': [1, 2], 'nv': 1000, 'option_compute_btable': 0, 'option_compute_ktable': 0, 'option_wgt_flux': 1, 'option_wgt_k': 1, 'ref_pts': [[1, 250], [500, 250]], 'tsfc': 294, 'vmax': 620, 'vmin': 540, 'w_diffuse': [[1.8], [1.66, 1.8]], 'wgt': [[0.9], [0.5, 0.95]]} | [
"[email protected]"
] | |
682a746c86f697e0d7d40584dfe744687ff6f502 | 934475e24c0262013a000dc0bc86fbec5e82bec2 | /jemm/transcript.py | 42be4605fbe756cc827030788351af85a5ae9b99 | [] | no_license | zhanglab-aim/JEMM | 05e2cf93b6aab4cca94d4caa23bacd5445b21ccd | 9d1d00bfe71fdb61b1f610b72a9561933d32d303 | refs/heads/main | 2023-07-25T07:56:51.173370 | 2023-07-05T18:59:31 | 2023-07-05T18:59:31 | 572,681,258 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,645 | py | """
This module handles reading and writing related tasks for transcript-based exon measure
"""
# Author : zzjfrank
# Date : Aug 24, 2020
import os
import pandas as pd
import numpy as np
from collections import OrderedDict
import pickle
from multiprocessing import Pool, cpu_count
from tqdm import tqdm
from .data_table import DataTable, Measure
from .suppa_helper import read_suppa_index, convert_tpm_to_psi, get_min_sanity_checker
from . import kallisto_helper
from .utils import logit
class TranscriptMeasure(Measure):
def __init__(self, event_id, sample_id, est_psi, bs_psi=None, var_psi=None, var_logit_psi=None, *args, **kwargs):
super().__init__(event_id, sample_id, est_psi, var_logit_psi, *args, **kwargs)
self.event_id = event_id
self.sample_id = sample_id
self.__psi = est_psi
self.__logit_psi = logit(self.__psi)
if np.isnan(self.__psi):
self.__var_psi = np.nan
self.__var_logit_psi = np.nan
else:
if bs_psi is not None:
valid_bs = np.where(~np.isnan(bs_psi))[0]
if len(valid_bs) <= 3:
self.__var_psi = np.nan
self.__var_logit_psi = np.nan
else:
self.__var_psi = np.var(bs_psi[valid_bs])
self.__var_logit_psi = np.var(logit(bs_psi[valid_bs]))
else:
self.__var_psi = var_psi
self.__var_logit_psi = var_logit_psi
@property
def psi(self):
return self.__psi
@property
def logit_psi(self):
return self.__logit_psi
@property
def var_logit_psi(self):
return self.__var_logit_psi
def to_plaintext(self):
s = "%.3f;%.3f,%.3f" % (self.psi, self.var_psi, self.var_logit_psi)
return s
@staticmethod
def from_plaintext(s):
# first assume the presence of variance terms
try:
psi, vares = s.split(";")
var, logit_var = vares.split(",")
return TranscriptMeasure(event_id=None, sample_id=None, est_psi=float(psi),
var_psi=float(var), var_logit_psi=float(logit_var))
# otherwise, just load PSI
except (ValueError, AttributeError):
psi = s
return float(psi)
#return TranscriptMeasure(event_id=None, sample_id=None, est_psi=float(psi),
# var_psi=None, var_logit_psi=None)
@staticmethod
def from_rsem(s):
psi = float(s)
return TranscriptMeasure(event_id=None, sample_id=None, est_psi=psi,
var_psi=0, var_logit_psi=0)
class TranscriptMeasureTable(DataTable):
def __init__(self, wd=None, index_file=None, input_type=None, event_type="SE", lazy_init=False, plaintext=False):
"""This class provides easy access to TranscriptMeasure across large datasets
Parameters
----------
wd : str
index_file : str
input_type : str
event_type : str
lazy_init : bool
Attributes
----------
plaintext : bool
data : pandas.DataFrame
Examples
--------
Initialize a TranscriptMeasureTable from Kallisto bootstrapping results::
>>> from jemm.transcript import TranscriptMeasureTable
>>> tmt = TranscriptMeasureTable(wd="./data/jemm/kallisto/",
>>> index_file="./data/jemm/suppa_index/suppa_gencodev34_SE_strict.ioe", input_type="kallisto")
>>> tmt.data.head()
>>> tmt.save("./data/jemm/tmt.pkl")
"""
self._wd = wd
self._input_type = input_type
self._index_file = index_file
self._event_type = event_type
self.data = None
self.plaintext = plaintext
if lazy_init is False:
assert os.path.isdir(self._wd)
assert os.path.isfile(self._index_file)
if self._input_type == "kallisto":
self.from_kallisto_hdf5(wd=self._wd, event_type=self._event_type, index_file=self._index_file, tmt=self)
elif self._input_type == "plaintext":
self.from_plaintext(filepath=wd, tmt=self)
elif self._input_type == "rsem":
self.from_rsem_table(filepath=wd, tmt=self)
else:
raise ValueError("Input type not understood: %s" % self._input_type)
def save(self, fp, mode="auto"):
if mode == "auto":
if fp.endswith(".pkl"):
mode = "pickle"
elif fp.endswith(".h5"):
mode = "hdf5"
elif fp.endswith(".txt") or fp.endswith(".gz"):
mode = "txt"
else:
raise ValueError("Cannot determine mode for given filepath")
if mode == "pickle":
pickle.dump(self, open(fp, "wb"))
elif mode == "hdf5":
self.data.to_hdf(fp, key="data", mode="w")
elif mode == "txt":
self.data.to_csv(fp, sep="\t")
@staticmethod
def _worker_reader(args):
"""worker reader for kallisto hdf5 to enable multiprocessing
"""
sn, fp, exon_index= args
event_ids = [e for e in exon_index[0]]
if type(fp) is str:
res = kallisto_helper.read_kallisto_h5(fname=fp)
else:
res = fp
est_psi = convert_tpm_to_psi(target_id=res['target_id'], tpm=res['tpm'], exon_index=exon_index).flatten()
bs_psi = convert_tpm_to_psi(target_id=res['target_id'], tpm=res['bs_samples'], exon_index=exon_index)
sample_tm = [
TranscriptMeasure(sample_id=sn, event_id=event_ids[j], est_psi=est_psi[j], bs_psi=bs_psi[j])
for j in range(len(est_psi))
]
return sn, sample_tm
@staticmethod
def from_plaintext(filepath, tmt=None):
data = pd.read_table(filepath, index_col=0)
data = data.applymap(lambda x: TranscriptMeasure.from_plaintext(x))
if tmt is None:
tmt = TranscriptMeasureTable(input_type="plaintext", lazy_init=True)
tmt.data = data
return tmt
@staticmethod
def from_rsem_table(filepath, tmt=None):
data = pd.read_table(filepath, index_col=0)
data = data.applymap(lambda x: TranscriptMeasure.from_rsem(x))
if tmt is None:
tmt = TranscriptMeasureTable(input_type="rsem", lazy_init=True)
tmt.data = data
return tmt
@staticmethod
def from_kallisto_hdf5(wd, index_file, event_type, sample_name_getter=None, tmt=None, nthreads=None, strip_tx_version=False, minimum_sanity_checker=None):
"""read kallisto transcript estimates
Parameters
----------
wd : str or dict
if is string, expects the filepath to a folder of kallisto results; if dict, expects a mapping from sample name to hdf5 estimates
index_file : str
event_type : str
sample_name_getter : callable, or None
tmt : jemm.TranscriptMeasureTable
nthreads : int or None
"""
if nthreads is None:
nthreads = min(32, cpu_count())
print("Using n=%i threads automatically.."%nthreads)
exon_index = read_suppa_index(fp=index_file, event_type=event_type, convert_id_to_darts=True, strip_tx_version=strip_tx_version)
event_ids = [e for e in exon_index[0]]
if type(wd) is str:
fp_list = [os.path.join(wd, x, "abundance.h5")
for x in os.listdir(wd) if os.path.isfile(os.path.join(wd, x, "abundance.h5"))]
if sample_name_getter is None:
sample_name_getter = lambda x: x.split("/")[-2]
else:
assert callable(sample_name_getter) is True
sample_names = [sample_name_getter(x) for x in fp_list]
elif type(wd) is dict:
sample_names = list(wd.keys())
fp_list = [wd[s] for s in sample_names]
else:
raise TypeError('Non-supported wd type: %s' % type(wd))
measure_dict = {}
if nthreads == 1:
for sn, fp in tqdm(zip(sample_names, fp_list), total=len(fp_list)):
_, sample_tm = TranscriptMeasureTable._worker_reader((sn, fp, exon_index))
measure_dict[sn] = sample_tm
else:
with Pool(nthreads) as pool:
pool_args = [(sn, fp, exon_index) for sn, fp in zip(sample_names, fp_list)]
pbar = tqdm(total=len(fp_list))
def pbar_update(*args):
pbar.update()
holders = [pool.apply_async(TranscriptMeasureTable._worker_reader, args=(pool_args[i],), callback=pbar_update) for i in range(pbar.total)]
res_list = [res.get() for res in holders]
for res in res_list:
sn, sample_tm = res
measure_dict[sn] = sample_tm
measure_df = pd.DataFrame.from_dict(measure_dict, orient="columns")
measure_df.index = np.array(event_ids, dtype="str")
if minimum_sanity_checker is not None:
eids = []
for eid in measure_df.index:
if minimum_sanity_checker(measure_df.loc[eid]):
eids.append(eid)
measure_df = measure_df.loc[eids]
if tmt is None:
tmt = TranscriptMeasureTable(wd=wd, index_file=index_file, input_type="kallisto", lazy_init=True)
else:
if tmt.plaintext is True:
measure_df = measure_df.applymap(lambda x: x.to_plaintext())
tmt.data = measure_df
return tmt
| [
"[email protected]"
] | |
07b9d9814ac9cfa9eebb4569b73e71272a56cdc7 | 1b862f34c125ce200244dd79e4fda4b5b605ce2e | /.history/AC_Well_U18_20210720193949.py | d1bd37fe1285363db73ceec0bfe193a13e06ef95 | [] | no_license | edwino26/CoreImages | 26085a49cf1cb79442ae563a88354b2fdceace87 | 6bf6e68cac8ab36c87b1e6ea702bfe6882b0f40e | refs/heads/master | 2023-06-22T12:53:37.344895 | 2021-07-21T04:31:44 | 2021-07-21T04:31:44 | 309,553,247 | 0 | 4 | null | 2021-04-29T23:23:15 | 2020-11-03T02:45:07 | Lasso | UTF-8 | Python | false | false | 11,089 | py | # To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
import numpy as np
import pandas as pd
import math
import lasio
from scipy import interpolate
import matplotlib.pyplot as plt # GRAPHS
import glob
from matplotlib import rcParams
# %%
las1= lasio.read('./LAS/U18/U18_GR.las')
df1= las1.df()
df1.reset_index(inplace=True)
df1 = df1[['GR_EDTC', 'TDEP']]
las2 = lasio.read('./LAS/U18/U18_AT90_NPHI.las')
df2 = las2.df()
df2.reset_index(inplace=True)
df2 = df2[['AT90','NPHI','TDEP',]]
las3 = lasio.read('./LAS/U18/U18_DTCO.las')
df3= las3.df()
df3.reset_index(inplace=True)
df3 = df3[['DTCO', 'TDEP']]
U18_xl =pd.read_excel('./Excel_Files/U18_test.xls',sheet_name = 'U18_data')
df4=U18_xl[['DEPTH','RHOZ']]
#MERGE> to combine the LAS files into 1 df
result = pd.merge(df1,df2, on= 'TDEP',how='left')
result.set_index('TDEP', inplace=True)
df5=pd.merge(result,df3,on= 'TDEP',how='left')
df5.set_index('TDEP', inplace=True)
# %%
# array con nueva tabla para TDEP (prof) con paso de 0.5
dep= np.arange(200,1350,0.5)
f = interpolate.interp1d(df4['DEPTH'], df4['RHOZ'])
RHOZ_new = f(dep)
plt.plot(df4['DEPTH'], df4['RHOZ'], 'o', dep, RHOZ_new, '-')
plt.show()
df6= pd.DataFrame(RHOZ_new,dep, columns=['RHOZ'])
df=pd.DataFrame(df5.join(df6,how='inner',on='TDEP'))
# %%
TDEP= df.index
top=650
bottom=1200
temp=((0.0198*TDEP)+ 26.921)
v= 400000
b=0.88
tsup = 25 #F
WS=18000
RWs= (v/tsup/WS)**b
tf=temp
Kt1=6.77
df['RW']=(RWs*(tsup+Kt1))/(temp+Kt1)
df['Vsh'] = (df.GR_EDTC - 10) / (156 - 10)
df['Vclay']=((0.65)*df.Vsh)
mud_density=1.13835 #en g/cc
rhoss=2.70 # g/cc
rhosh=2.75
df['grain_density']=((df.Vsh*rhosh)+(1-df.Vsh)*rhoss)
df['porosity']=(df.grain_density-(df.RHOZ))/(df.grain_density-mud_density)
# %%
CORE =pd.read_excel('./CORE/CORE.xlsx',sheet_name='XRD')
mask = CORE.Well.isin(['U18'])
U18_Core = CORE[mask]
prof=U18_Core['Depth']
clays=U18_Core['Clays']
xls1 = pd.read_excel ('./CORE/CORE.xlsx', sheet_name='Saturation')
mask = xls1.Well.isin(['U18'])
U18_sat = xls1[mask]
long=U18_sat ['Depth']
poro=U18_sat ['PHIT']
grain=U18_sat ['RHOG']
sw_core=U18_sat ['Sw']
klinkenberg =U18_sat ['K']
minimo=grain.min()
maximo=grain.max()
c=2.65
d=2.75
norm=(((grain-minimo)*(d-c)/(maximo-minimo))+c)
xls2 = pd.read_excel ('./CORE/CORE.xlsx', sheet_name='Gamma')
mask = xls2.Well.isin(['U18'])
U18_GR = xls2[mask]
h=U18_GR['Depth']
cg1=U18_GR['GR_Scaled']
plt.hist(clays,bins=50,facecolor='y',alpha=0.75,ec='black', label="Vclay")
plt.title('Histogram-Vclay')
plt.xlabel('%Vclay')
plt.ylabel('Frecuency')
plt.legend()
# %%
dt = 200
bt= 1350
plt.figure(figsize=(15,9))
plt.subplot(171)
plt.plot(df.GR_EDTC,TDEP,'g',lw=0.5)
plt.title('$GR$')
plt.axis([20, 130, dt,bt])
plt.xlabel('Gamma Ray ')
plt.gca().invert_yaxis()
plt.grid(True)
plt.subplot(172)
plt.plot(df.AT90,TDEP,lw=0.5)
plt.axis([10, 800, dt,bt])
plt.title('$AT90$')
plt.xlabel('Resistivity')
plt.gca().invert_yaxis()
plt.xscale('log')
plt.gca().yaxis.set_visible(False)
plt.grid(True)
plt.subplot(173)
plt.plot(df.RHOZ,TDEP,'red',lw=0.5)
plt.axis([2.25, 2.65, dt,bt])
plt.title('$RHOZ$')
plt.xlabel('Standard \n Resolution \n Formation \n Density')
plt.gca().invert_yaxis()
plt.gca().yaxis.set_visible(False)
plt.grid(True)
plt.subplot(174)
plt.plot(df.NPHI,TDEP,'purple',lw=0.5)
plt.axis([0.6, 0.1, dt,bt])
plt.title('$NPHI$')
plt.xlabel('Thermal \n Neutron \n Porosity',fontsize=8)
plt.gca().invert_yaxis()
plt.gca().yaxis.set_visible(False)
plt.grid(True)
plt.subplot(175)
plt.plot(df.DTCO,TDEP,'r',lw=0.5)
plt.title('$DTCO$')
plt.xlabel('Delta-T \n Compressional ')
plt.axis([60,125, dt,bt])
plt.gca().invert_yaxis()
plt.gca().yaxis.set_visible(False)
plt.grid(True)
plt.subplot(176)
plt.plot(temp,TDEP,'c')
plt.axis([20, 65, dt,bt])
plt.gca().invert_yaxis()
plt.title('$TEMP$')
plt.xlabel('Temperature')
plt.gca().yaxis.set_visible(False)
plt.grid(True)
plt.subplot(177)
plt.plot(df.RW,TDEP,'blue',lw=0.5)
plt.title('$RW$')
plt.axis([0.4, 0.85, dt,bt])
plt.xlabel('Water \n Resistivity')
plt.gca().invert_yaxis()
plt.gca().yaxis.set_visible(False)
plt.grid(True)
plt.suptitle('U18_WELL LOGS_')
plt.show()
# %%
## SW_Archie
## SW=((a*Rw)/(Rt*(Por)^m))^(1/n)
a=1
m=2
n=2
Rw=df.RW
Rt=df.AT90
Phi=df.porosity
F = (a / (Phi**m))
df['Sw_a'] = (F *Rw/Rt)**(1/n)
df['Sw_a1']= df['Sw_a'].apply(lambda x: 1 if x>1 else x)
df['Sw_a1'] = df['Sw_a1'].replace(np.nan, 1)
dfSh = df[df['Vsh']>0.5]
Rsh = 50
#Sw_Poupon
# TERM1= 1/RT - VSH/RSH
term1=(1/df.AT90)-(df.Vsh/Rsh)
## TERM2 = F*RW
term2=(F*df.RW)
## TERM3 = (1-vsh)
term3=(1-df.Vsh)
## SW_POUPON = ((TERM1*TERM2)/TERM3))^(1/N)
df['Sw_p']=((term1*term2)/term3)**(1/n)
df['Sw_p1']= df['Sw_p'].apply(lambda x: 1 if x >1 else x)
df['Sw_p1'] = df['Sw_p1'].replace(np.nan, 1)
# %%
# WAXMAN-SMITS CEC method (does not require VCL) but requires core measurements of CEC
TempC = (temp-32)/1.8
df['SwWS'] = df['Sw_p1']
CEC_av = 5
# ===== Waxman Smits Iterations. Reference: Well Logging for Earth Scientists, Page 663-667
for i in range(len(Rt)):
error = 1000
count1 = 0
phit = Phi.iloc[i]
if math.isnan(phit):
df['SwWS'][i] = 1
else:
Qv = rhosh*(1-phit)*CEC_av/phit/100 # Old Method
Bcond = 3.83*(1-0.83*np.exp(-0.5/Rw.iloc[i])) # Waxman and Thomas, 1974
BQv = Qv*Bcond
E = (phit**m)/a
Ct = 1/Rt.iloc[i]
Cw = 1/Rw.iloc[i]
x0 = df.iloc[i]['Sw_a1']
Swguess = x0
while count1 <= 100 and error > 0.0001:
count1 = count1+1
g = E*Cw*(Swguess**n) + E*BQv*(Swguess**(n-1)) - Ct
error = g
gp = n*E*Cw*(Swguess**(n-1)) + (n-1)*E*BQv*(Swguess**(n-2))
# print(df_1['SwWS'][i-1])
df['SwWS'].iloc[i] = Swguess-g/gp
Swguess = df['SwWS'].iloc[i]
# %%
# SIMANDOUX (1963) for shaly-sandy formations, used with saline fm waters Equation solved for n=2
# Input parameters:
#Rw - water resistivity
#Rt - true resistivity
#Phi - porosity
#Rsh - shale resistivity
# a - tortuosity factor
# m - cementation exponent
# n - saturation exponent
# Vsh - Volume of shale
df['Swsim']=((a*Rw)/(2*(Phi**m)))*(((df.Vsh/Rsh)**2+((4*Phi**m)/(a*Rw*Rt)))**(1/2)-(df.Vsh/Rsh))
df['Swsim1'] = df['Swsim'].replace(np.nan, 1)
df.head(2000)
# %%
plt.figure(figsize=(15,9))
plt.subplot(191)
plt.plot (df.GR_EDTC,TDEP,'g',cg1,h,'c.',lw=0.5)
plt.title('$GR/ Core.GR $')
plt.axis([20, 130, top,bottom])
plt.xlabel('Gamma Ray ')
plt.gca().invert_yaxis()
plt.grid(True)
plt.subplot(192)
plt.title('Vsh')
plt.plot (df.Vsh,TDEP,'black',lw=0.5)
plt.axis([0,1, top,bottom])
plt.gca().invert_yaxis()
plt.gca().yaxis.set_visible(False)
plt.grid(True)
plt.subplot(193)
plt.title('$Vclay/Vclay Core$')
plt.plot (df.Vclay,TDEP,'m',clays,prof,'ro',lw=0.5)
plt.axis([0,1, top,bottom])
plt.gca().invert_yaxis()
plt.gca().yaxis.set_visible(False)
plt.grid(True)
plt.subplot(194)
plt.title('Porosity \n Core Por.')
plt.plot (df.porosity,TDEP,'m',poro,long,'c*',lw=0.5)
plt.axis([0, 0.3, top,bottom])
plt.gca().invert_yaxis()
plt.gca().invert_xaxis()
plt.gca().yaxis.set_visible(False)
plt.grid(True)
plt.subplot(195)
plt.title('Grain density \n Core GD')
plt.plot (df.grain_density,TDEP,'y',norm,long,'g>',lw=0.5)
plt.axis([2.64, 2.76, top,bottom])
plt.gca().invert_yaxis()
plt.gca().yaxis.set_visible(False)
plt.grid(True)
#Basic Archie
plt.subplot(196)
plt.plot (df.Sw_a1,TDEP,'c',sw_core,long,'m.',lw=0.5)
plt.title('$SW_A$')
plt.axis([0,1.1,top,bottom])
plt.xlabel('Water \n Saturation_A')
plt.gca().invert_yaxis()
plt.gca().invert_xaxis()
plt.gca().yaxis.set_visible(False)
plt.grid(True)
plt.xlim(1, 0)
#Poupon Laminated Model
plt.subplot(197)
plt.plot (df.Sw_p1,TDEP,'r',sw_core,long,'m.',lw=0.5)
plt.title('$SW_P$')
plt.axis([0,1.5,top,bottom])
plt.xlabel('Water \n Saturation_P')
plt.gca().invert_yaxis()
plt.gca().invert_xaxis()
plt.gca().yaxis.set_visible(False)
plt.grid(True)
plt.xlim(1, 0)
#Waxman-Smits
plt.subplot(198)
plt.plot (df.SwWS,TDEP,'g',sw_core,long,'m.',lw=0.5)
plt.title('$SW_W$')
plt.axis([0,1,top,bottom])
plt.xlabel('Water \n Saturation_Waxman')
plt.gca().invert_yaxis()
plt.gca().invert_xaxis()
plt.gca().yaxis.set_visible(False)
plt.grid(True)
plt.xlim(1, 0)
#Simandoux
plt.subplot(199)
plt.plot (df.Swsim1,TDEP,'y',sw_core,long,'m.',lw=0.5)
plt.title('$SW_S$')
plt.axis([0,1,top,bottom])
plt.xlabel('Water \n Saturation_Sim')
plt.gca().invert_yaxis()
plt.gca().invert_xaxis()
plt.gca().yaxis.set_visible(False)
plt.grid(True)
plt.xlim(1, 0)
plt.show()
# %%
corte=0.5
df['PAY_archie']=df.Sw_a1.apply(lambda x: 1 if x<corte else 0)
df['PAY_poupon']=df.Sw_p1.apply(lambda x: 1 if x<corte else 0)
df['PAY_waxman']=df.SwWS.apply(lambda x: 1 if x<corte else 0)
df['PAY_simandoux']=df.Swsim1.apply(lambda x: 1 if x<corte else 0)
plt.figure(figsize=(15,9))
plt.subplot(191)
#Basic Archie
plt.plot (df.Sw_a1,TDEP,'c',lw=0.5)
plt.title('$SW_A$')
plt.axis([0,1,top,bottom])
plt.xlabel('Sw_Archie')
plt.gca().invert_yaxis()
plt.gca().invert_xaxis()
plt.xlim(1, 0)
plt.subplot(192)
plt.plot (df.PAY_archie,TDEP,'c',lw=0.5)
plt.title('$PAY_A$')
plt.fill_between(df.PAY_archie,TDEP, color='c', alpha=0.8)
plt.axis([0,0.001,top,bottom])
plt.gca().invert_yaxis()
plt.gca().xaxis.set_visible(False)
plt.gca().yaxis.set_visible(False)
plt.grid(True)
#Poupon Laminated Model
plt.subplot(193)
plt.plot (df.Sw_p1,TDEP,'r',lw=0.5)
plt.title('$SW_P$')
plt.axis([0,1.5,top,bottom])
plt.xlabel('Sw_Poupon')
plt.gca().invert_yaxis()
plt.gca().invert_xaxis()
plt.gca().yaxis.set_visible(False)
plt.xlim(1, 0)
plt.subplot(194)
plt.plot (df.PAY_poupon,TDEP,'r',lw=0.5)
plt.title('$PAY_P$')
plt.fill_between(df.PAY_poupon,TDEP, color='r', alpha=0.8)
plt.axis([0,0.001,top,bottom])
plt.gca().invert_yaxis()
plt.gca().xaxis.set_visible(False)
plt.gca().yaxis.set_visible(False)
plt.grid(True)
#Waxman-Smits
plt.subplot(195)
plt.plot (df.SwWS,TDEP,'g',lw=0.5)
plt.title('$SW_W$')
plt.axis([0,5,top,bottom])
plt.xlabel('Sw_Waxman')
plt.gca().invert_yaxis()
plt.gca().invert_xaxis()
plt.gca().yaxis.set_visible(False)
plt.xlim(1, 0)
plt.subplot(196)
plt.plot (df.PAY_waxman,TDEP,'g',lw=0.5)
plt.title('$PAY_W$')
plt.fill_between(df.PAY_waxman,TDEP, color='g', alpha=0.8)
plt.axis([0,0.001,top,bottom])
plt.gca().invert_yaxis()
plt.gca().xaxis.set_visible(False)
plt.gca().yaxis.set_visible(False)
plt.grid(True)
#Simandoux
plt.subplot(197)
plt.plot (df.Swsim1,TDEP,'y',lw=0.5)
plt.title('$SW_S$')
plt.axis([0,2,top,bottom])
plt.xlabel('Sw_Simandoux')
plt.gca().invert_yaxis()
plt.gca().invert_xaxis()
plt.gca().yaxis.set_visible(False)
plt.xlim(1, 0)
plt.subplot(198)
plt.plot (df.PAY_simandoux,TDEP,'y',lw=0.5)
plt.title('$PAY_S$')
plt.fill_between(df.PAY_simandoux,TDEP, color='y', alpha=0.8)
plt.axis([0,0.001,top,bottom])
plt.gca().invert_yaxis()
plt.gca().xaxis.set_visible(False)
plt.gca().yaxis.set_visible(False)
plt.grid(True)
plt.show()
# %%
df.insert(21, "WELL", 'U18')
df.head()
with pd.ExcelWriter('U18.xlsx') as writer:
df.to_excel(writer, sheet_name='U18_data')
| [
"[email protected]"
] | |
d4b8f5989466780e1f4819d54a8447935d821ed3 | b368f0dd09a4eed97a350ca01ac170bb44347f8d | /python/oneflow/framework/tensor_str.py | 10ecfbbc8c30e7196b4c34f79684cca7d94e273f | [
"Apache-2.0"
] | permissive | opencici2006/oneflow | bb67d3475e5b85d88f7f627733af75859e431759 | 7c3b42fa5ae95823d195c077565f0c190d98e7ad | refs/heads/master | 2023-08-01T14:22:33.745620 | 2021-09-09T02:52:54 | 2021-09-09T02:52:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,715 | py | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
This file is mostly referenced from PyTorch v1.8.1 torch/_tensor_str.py
"""
import numpy as np
import math
from typing import Optional
import oneflow as flow
class __PrinterOptions(object):
precision: int = 4
threshold: float = 1000
edgeitems: int = 3
linewidth: int = 80
sci_mode: Optional[bool] = None
PRINT_OPTS = __PrinterOptions()
def _try_convert_to_local_tensor(tensor):
if tensor.is_consistent:
tensor = tensor.to_consistent(
placement=tensor.placement, sbp=flow.sbp.broadcast
).to_local()
return tensor
class _Formatter(object):
def __init__(self, tensor):
self.floating_dtype = tensor.dtype.is_floating_point
self.int_mode = True
self.sci_mode = False
self.max_width = 1
self.random_sample_num = 50
tensor = _try_convert_to_local_tensor(tensor)
with flow.no_grad():
tensor_view = tensor.reshape(-1)
if not self.floating_dtype:
for value in tensor_view:
value_str = "{}".format(value)
self.max_width = max(self.max_width, len(value_str))
else:
nonzero_finite_vals = flow.masked_select(tensor_view, tensor_view.ne(0))
if nonzero_finite_vals.numel() == 0:
# no valid number, do nothing
return
nonzero_finite_abs = nonzero_finite_vals.abs()
nonzero_finite_min = nonzero_finite_abs.min().numpy().astype(np.float64)
nonzero_finite_max = nonzero_finite_abs.max().numpy().astype(np.float64)
for value in nonzero_finite_abs.numpy():
if value != np.ceil(value):
self.int_mode = False
break
if self.int_mode:
# Check if scientific representation should be used.
if (
nonzero_finite_max / nonzero_finite_min > 1000.0
or nonzero_finite_max > 1.0e8
):
self.sci_mode = True
for value in nonzero_finite_vals:
value_str = (
("{{:.{}e}}").format(PRINT_OPTS.precision).format(value)
)
self.max_width = max(self.max_width, len(value_str))
else:
for value in nonzero_finite_vals:
value_str = ("{:.0f}").format(value)
self.max_width = max(self.max_width, len(value_str) + 1)
else:
if (
nonzero_finite_max / nonzero_finite_min > 1000.0
or nonzero_finite_max > 1.0e8
or nonzero_finite_min < 1.0e-4
):
self.sci_mode = True
for value in nonzero_finite_vals:
value_str = (
("{{:.{}e}}").format(PRINT_OPTS.precision).format(value)
)
self.max_width = max(self.max_width, len(value_str))
else:
for value in nonzero_finite_vals:
value_str = (
("{{:.{}f}}").format(PRINT_OPTS.precision).format(value)
)
self.max_width = max(self.max_width, len(value_str))
if PRINT_OPTS.sci_mode is not None:
self.sci_mode = PRINT_OPTS.sci_mode
def width(self):
return self.max_width
def format(self, value):
if self.floating_dtype:
if self.sci_mode:
ret = (
("{{:{}.{}e}}")
.format(self.max_width, PRINT_OPTS.precision)
.format(value)
)
elif self.int_mode:
ret = "{:.0f}".format(value)
if not (math.isinf(value) or math.isnan(value)):
ret += "."
else:
ret = ("{{:.{}f}}").format(PRINT_OPTS.precision).format(value)
else:
ret = "{}".format(value)
return (self.max_width - len(ret)) * " " + ret
def _scalar_str(self, formatter1):
return formatter1.format(_try_convert_to_local_tensor(self).tolist())
def _vector_str(self, indent, summarize, formatter1):
# length includes spaces and comma between elements
element_length = formatter1.width() + 2
elements_per_line = max(
1, int(math.floor((PRINT_OPTS.linewidth - indent) / (element_length)))
)
char_per_line = element_length * elements_per_line
def _val_formatter(val, formatter1=formatter1):
return formatter1.format(val)
if summarize and self.size(0) > 2 * PRINT_OPTS.edgeitems:
left_values = _try_convert_to_local_tensor(
self[: PRINT_OPTS.edgeitems]
).tolist()
right_values = _try_convert_to_local_tensor(
self[-PRINT_OPTS.edgeitems :]
).tolist()
data = (
[_val_formatter(val) for val in left_values]
+ [" ..."]
+ [_val_formatter(val) for val in right_values]
)
else:
values = _try_convert_to_local_tensor(self).tolist()
data = [_val_formatter(val) for val in values]
data_lines = [
data[i : i + elements_per_line] for i in range(0, len(data), elements_per_line)
]
lines = [", ".join(line) for line in data_lines]
return "[" + ("," + "\n" + " " * (indent + 1)).join(lines) + "]"
def _tensor_str_with_formatter(self, indent, summarize, formatter1):
dim = self.dim()
if dim == 0:
return _scalar_str(self, formatter1)
if dim == 1:
return _vector_str(self, indent, summarize, formatter1)
if summarize and self.size(0) > 2 * PRINT_OPTS.edgeitems:
slices = (
[
_tensor_str_with_formatter(self[i], indent + 1, summarize, formatter1)
for i in range(0, PRINT_OPTS.edgeitems)
]
+ ["..."]
+ [
_tensor_str_with_formatter(self[i], indent + 1, summarize, formatter1)
for i in range(self.shape[0] - PRINT_OPTS.edgeitems, self.shape[0])
]
)
else:
slices = [
_tensor_str_with_formatter(self[i], indent + 1, summarize, formatter1)
for i in range(0, self.size(0))
]
tensor_str = ("," + "\n" * (dim - 1) + " " * (indent + 1)).join(slices)
return "[" + tensor_str + "]"
def _tensor_str(self, indent):
summarize = self.numel() > PRINT_OPTS.threshold
if self.dtype is flow.float16:
self = self.float()
# TODO: not support flow.sbp.split(x) but flow.sbp.split(0).
def _cannot_print(sbp):
return (
sbp != flow.sbp.partial_sum
and sbp != flow.sbp.broadcast
and sbp != flow.sbp.split(0)
)
# TODO: delete it when boxing on "CPU" and s1->b on "GPU" are ready
if self.is_consistent:
self = self.to("cuda")
if all(_cannot_print(sbp) for sbp in self.sbp):
return "[...]"
with flow.no_grad():
formatter = _Formatter(get_summarized_data(self) if summarize else self)
return _tensor_str_with_formatter(self, indent, summarize, formatter)
def _add_suffixes(tensor_str, suffixes, indent):
tensor_strs = [tensor_str]
last_line_len = len(tensor_str) - tensor_str.rfind("\n") + 1
for suffix in suffixes:
suffix_len = len(suffix)
if last_line_len + suffix_len + 2 > PRINT_OPTS.linewidth:
tensor_strs.append(",\n" + " " * indent + suffix)
last_line_len = indent + suffix_len
else:
tensor_strs.append(", " + suffix)
last_line_len += suffix_len + 2
tensor_strs.append(")")
return "".join(tensor_strs)
def get_summarized_data(self):
dim = self.dim()
if dim == 0:
return self
if dim == 1:
if self.size(0) > 2 * PRINT_OPTS.edgeitems:
return flow.cat(
(self[: PRINT_OPTS.edgeitems], self[-PRINT_OPTS.edgeitems :])
)
else:
return self
if self.size(0) > 2 * PRINT_OPTS.edgeitems:
start = [self[i] for i in range(0, PRINT_OPTS.edgeitems)]
end = [
self[i] for i in range(self.shape[0] - PRINT_OPTS.edgeitems, self.shape[0])
]
return flow.stack([get_summarized_data(x) for x in (start + end)])
else:
return flow.stack([get_summarized_data(x) for x in self])
def _gen_tensor_str_template(tensor, is_meta):
is_meta = is_meta or tensor.is_lazy
prefix = "tensor("
indent = len(prefix)
suffixes = []
# tensor is local or consistent
if tensor.is_consistent:
suffixes.append(f"placement={str(tensor.placement)}")
suffixes.append(f"sbp={str(tensor.sbp)}")
elif tensor.device.type == "cuda" or tensor.device.type == "gpu":
suffixes.append("device='" + str(tensor.device) + "'")
elif tensor.device.type != "cpu":
raise RunTimeError("unknow device type")
if tensor.is_lazy:
suffixes.append("is_lazy='True'")
# tensor is empty, meta or normal
if tensor.numel() == 0:
# Explicitly print the shape if it is not (0,), to match NumPy behavior
if tensor.dim() != 1:
suffixes.append("size=" + str(tuple(tensor.shape)))
tensor_str = "[]"
elif is_meta:
tensor_str = "..."
suffixes.append("size=" + str(tuple(tensor.shape)))
else:
tensor_str = _tensor_str(tensor, indent)
suffixes.append("dtype=" + str(tensor.dtype))
if tensor.grad_fn is not None:
name = tensor.grad_fn.name()
suffixes.append("grad_fn=<{}>".format(name))
elif tensor.requires_grad:
suffixes.append("requires_grad=True")
return _add_suffixes(prefix + tensor_str, suffixes, indent)
def _gen_tensor_str(tensor):
return _gen_tensor_str_template(tensor, False)
def _gen_tensor_meta_str(tensor):
# meta
return _gen_tensor_str_template(tensor, True)
| [
"[email protected]"
] | |
e52168c2053ab542fa265c652cd8e6681ca39760 | 3061e43d2d90bae124e6ee32d41b9db4f9275805 | /task_based_analysis/fmri_fsl_noSmooth.py | 376726d29f8e6745cca096f41393ff7c91450997 | [] | no_license | orduek/kpe_task | cfc1a904944c79270d3a41aa9c78e0c616e7bccf | 5c84ff8963980ea7940d99cec7e45c63e1b18483 | refs/heads/master | 2021-06-22T06:33:31.669078 | 2021-02-18T09:23:05 | 2021-02-18T09:23:05 | 188,063,421 | 1 | 1 | null | 2020-12-06T10:04:46 | 2019-05-22T15:13:36 | Jupyter Notebook | UTF-8 | Python | false | false | 8,728 | py | #!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Created on Wed Dec 4 14:29:06 2019
@author: Or Duek
1st level analysis using FSL output
In this one we smooth using SUSAN, which takes longer.
"""
#%%
from __future__ import print_function
from __future__ import division
from builtins import str
from builtins import range
import os # system functions
import nipype.interfaces.io as nio # Data i/o
import nipype.interfaces.fsl as fsl # fsl
import nipype.interfaces.utility as util # utility
import nipype.pipeline.engine as pe # pypeline engine
import nipype.algorithms.modelgen as model # model generation
#import nipype.algorithms.rapidart as ra # artifact detection
from nipype.workflows.fmri.fsl.preprocess import create_susan_smooth
from nipype.interfaces.utility import Function
fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
"""
Setting up workflows
--------------------
In this tutorial we will be setting up a hierarchical workflow for fsl
analysis. This will demonstrate how pre-defined workflows can be setup and
shared across users, projects and labs.
"""
#%%
data_dir = '/home/oad4/scratch60/kpe'
output_dir = '/home/oad4/scratch60/work/fsl_analysis_ses2_Nosmooth'
removeTR = 4
fwhm = 0
tr = 1
session = '2' # choose session
#%% Methods
def _bids2nipypeinfo(in_file, events_file, regressors_file,
regressors_names=None,
motion_columns=None,
decimals=3, amplitude=1.0, removeTR=4):
from pathlib import Path
import numpy as np
import pandas as pd
from nipype.interfaces.base.support import Bunch
# Process the events file
events = pd.read_csv(events_file, sep=r'\s+')
bunch_fields = ['onsets', 'durations', 'amplitudes']
if not motion_columns:
from itertools import product
motion_columns = ['_'.join(v) for v in product(('trans', 'rot'), 'xyz')]
out_motion = Path('motion.par').resolve()
regress_data = pd.read_csv(regressors_file, sep=r'\s+')
np.savetxt(out_motion, regress_data[motion_columns].values[removeTR:,], '%g')
if regressors_names is None:
regressors_names = sorted(set(regress_data.columns) - set(motion_columns))
if regressors_names:
bunch_fields += ['regressor_names']
bunch_fields += ['regressors']
runinfo = Bunch(
scans=in_file,
conditions=list(set(events.trial_type_30.values)),
**{k: [] for k in bunch_fields})
for condition in runinfo.conditions:
event = events[events.trial_type_30.str.match(condition)]
runinfo.onsets.append(np.round(event.onset.values-removeTR, 3).tolist()) # added -removeTR to align to the onsets after removing X number of TRs from the scan
runinfo.durations.append(np.round(event.duration.values, 3).tolist())
if 'amplitudes' in events.columns:
runinfo.amplitudes.append(np.round(event.amplitudes.values, 3).tolist())
else:
runinfo.amplitudes.append([amplitude] * len(event))
if 'regressor_names' in bunch_fields:
runinfo.regressor_names = regressors_names
runinfo.regressors = regress_data[regressors_names].fillna(0.0).values[removeTR:,].T.tolist() # adding removeTR to cut the first rows
return [runinfo], str(out_motion)
#%%
subject_list = ['1468']#['1351'] #['008', '1223','1253','1263','1293','1307','1315','1322','1339','1343','1351','1356','1364','1369','1387','1390','1403'
#,'1464','1468' '1480','1499', '1561']
# Map field names to individual subject runs.
infosource = pe.Node(util.IdentityInterface(fields=['subject_id'
],
),
name="infosource")
infosource.iterables = [('subject_id', subject_list)]
# SelectFiles - to grab the data (alternativ to DataGrabber)
templates = {'func': data_dir + '/fmriprep/sub-{subject_id}/ses-' + session + '/func/sub-{subject_id}_ses-' + session + '_task-Memory_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz',
'mask': data_dir + '/fmriprep/sub-{subject_id}/ses-' + session + '/func/sub-{subject_id}_ses-' + session + '_task-Memory_space-MNI152NLin2009cAsym_desc-brain_mask.nii.gz',
'regressors': data_dir + '/fmriprep/sub-{subject_id}/ses-' + session + '/func/sub-{subject_id}_ses-' + session + '_task-Memory_desc-confounds_regressors.tsv',
'events': data_dir + '/condition_files/withNumbers/sub-{subject_id}_ses-' + session + '_30sec_window' + '.csv'}
selectfiles = pe.Node(nio.SelectFiles(templates,
),
name="selectfiles")
#%%
# Extract motion parameters from regressors file
runinfo = pe.Node(util.Function(
input_names=['in_file', 'events_file', 'regressors_file', 'regressors_names', 'removeTR'],
function=_bids2nipypeinfo, output_names=['info', 'realign_file']),
name='runinfo')
runinfo.inputs.removeTR = removeTR
# Set the column names to be used from the confounds file
runinfo.inputs.regressors_names = ['dvars', 'framewise_displacement'] + \
['a_comp_cor_%02d' % i for i in range(6)] + ['cosine%02d' % i for i in range(4)]
#%%
skip = pe.Node(interface=fsl.ExtractROI(), name = 'skip')
skip.inputs.t_min = removeTR
skip.inputs.t_size = -1
#%%
# susan = pe.Node(interface=fsl.SUSAN(), name = 'susan') #create_susan_smooth()
# susan.inputs.fwhm = fwhm
# susan.inputs.brightness_threshold = 1000.0
#%%
modelfit = pe.Workflow(name='modelfit_ses_' + session, base_dir= output_dir)
"""
Use :class:`nipype.algorithms.modelgen.SpecifyModel` to generate design information.
"""
modelspec = pe.Node(interface=model.SpecifyModel(),
name="modelspec")
modelspec.inputs.input_units = 'secs'
modelspec.inputs.time_repetition = tr
modelspec.inputs.high_pass_filter_cutoff= 120
"""
Use :class:`nipype.interfaces.fsl.Level1Design` to generate a run specific fsf
file for analysis
"""
## Building contrasts
level1design = pe.Node(interface=fsl.Level1Design(), name="level1design")
cont1 = ['Trauma1_0>Sad1_0', 'T', ['trauma1_0', 'sad1_0'], [1, -1]]
cont2 = ['Trauma1_0>Relax1_0', 'T', ['trauma1_0', 'relax1_0'], [1, -1]]
cont3 = ['Sad1_0>Relax1_0', 'T', ['sad1_0', 'relax1_0'], [1, -1]]
cont4 = ['trauma1_0 > trauma2_0', 'T', ['trauma1_0', 'trauma2_0'], [1, -1]]
cont5 = ['Trauma1_0>Trauma1_2_3', 'T', ['trauma1_0', 'trauma1_2','trauma1_3'], [1, -0.5, -0.5]]
cont6 = ['Trauma1 > Trauma2', 'T', ['trauma1_0', 'trauma1_1', 'trauma1_2', 'trauma1_3', 'trauma2_0', 'trauma2_1', 'trauma2_2', 'trauma2_3'], [0.25, 0.25, 0.25, 0.25, -0.25, -0.25, -0.25, -0.25 ]]
contrasts = [cont1, cont2, cont3, cont4, cont5, cont6]
level1design.inputs.interscan_interval = tr
level1design.inputs.bases = {'dgamma': {'derivs': False}}
level1design.inputs.contrasts = contrasts
level1design.inputs.model_serial_correlations = True
"""
Use :class:`nipype.interfaces.fsl.FEATModel` to generate a run specific mat
file for use by FILMGLS
"""
modelgen = pe.MapNode(
interface=fsl.FEATModel(),
name='modelgen',
iterfield=['fsf_file', 'ev_files'])
"""
Use :class:`nipype.interfaces.fsl.FILMGLS` to estimate a model specified by a
mat file and a functional run
"""
mask = pe.Node(interface= fsl.maths.ApplyMask(), name = 'mask')
modelestimate = pe.MapNode(
interface=fsl.FILMGLS(smooth_autocorr=True, mask_size=5, threshold=1000),
name='modelestimate',
iterfield=['design_file', 'in_file', 'tcon_file'])
"""
Use :class:`nipype.interfaces.fsl.ContrastMgr` to generate contrast estimates
"""
#%%
modelfit.connect([
(infosource, selectfiles, [('subject_id', 'subject_id')]),
(selectfiles, runinfo, [('events','events_file'),('regressors','regressors_file')]),
(selectfiles, skip,[('func','in_file')]),
(skip,runinfo,[('roi_file','in_file')]),
# (selectfiles, susan, [('mask','mask_file')]),
# (susan, runinfo, [('smoothed_file', 'in_file')]),
(skip, modelspec, [('roi_file', 'functional_runs')]),
(runinfo, modelspec, [('info', 'subject_info'), ('realign_file', 'realignment_parameters')]),
(modelspec, level1design, [('session_info', 'session_info')]),
(level1design, modelgen, [('fsf_files', 'fsf_file'), ('ev_files',
'ev_files')]),
(skip, mask, [('roi_file', 'in_file')]),
(selectfiles, mask, [('mask', 'mask_file')]),
(mask, modelestimate, [('out_file','in_file')]),
(modelgen, modelestimate, [('design_file', 'design_file'),('con_file', 'tcon_file'),('fcon_file','fcon_file')]),
])
#%%
modelfit.run('MultiProc', plugin_args={'n_procs': 5})
# %%
| [
"[email protected]"
] | |
31cd84d71b99be36816d823a9e04178b834ca8b0 | a13153cb5a915c760586e8a45db338a08901028b | /shangchengxitong.py | 810d4e891c718a9535ebb69c4de6150fa5511e01 | [] | no_license | 18730325640/store | 092627e5e5e94276e54033004ae95da1c5f7d2aa | 086c847f4a91fd67eedaa221e36ef4a9b3874842 | refs/heads/master | 2023-06-01T07:19:00.504648 | 2021-06-25T08:13:28 | 2021-06-25T08:13:28 | 378,825,566 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,898 | py | print("hello world!")
print("-日期 服装名称 价格/件 库存数量 销售量/每日-")
print("-1号 羽绒服 253.6 500 10 -")
print("-2号 牛仔裤 86.3 600 60 -")
print("-3号 风衣 96.8 335 43 -")
print("-4号 皮草 135.9 855 63 -")
print("-5号 T恤 65.8 632 63 -")
print("-6号 衬衫 49.3 562 120 -")
print("-7号 牛仔裤 86.3 600 72 -")
print("-8号 羽绒服 253.6 500 69 -")
print("-9号 牛仔裤 86.3 600 35 -")
print("-10号 羽绒服 253.6 500 140 -")
print("-11号 牛仔裤 86.3 600 90 -")
print("-12号 皮草 135.9 855 24 -")
print("-13号 T恤 65.8 632 45 -")
print("-14号 风衣 96.8 335 25 -")
print("-15号 牛仔裤 86.3 600 60 -")
print("-16号 T恤 65.8 632 129 -")
print("-17号 羽绒服 253.6 500 10 -")
print("-18号 风衣 96.8 335 43 -")
print("-19号 T恤 65.8 632 63 -")
print("-20号 牛仔裤 86.3 600 60 -")
print("-21号 皮草 135.9 855 63 -")
print("-22号 风衣 96.8 335 60 -")
print("-23号 T恤 65.8 632 58 -")
print("-24号 牛仔裤 86.3 600 140 -")
print("-25号 T恤 65.8 632 48 -")
print("-26号 风衣 96.8 335 43 -")
print("-27号 皮草 135.9 855 57 -")
print("-28号 羽绒服 253.6 500 10 -")
print("-29号 T恤 65.8 632 63 -")
print("-30号 风衣 96.8 335 78 -")
print("总金额:¥",(253.6*10+86.3*60+96.8*43+135.9*63+65.8*63+49.3*120+86.3*72+253.6*69+86.3*35+253.6*140+86.3*90+135.9*24+65.8*45+96.8*25+86.3*60+65.8*129+253.6*10+96.8*43+65.8*63+86.3*60+135.9*63+96.8*60+65.8*58+86.3*140+65.8*48+96.8*43+135.9*57+96.8*78+253.6*10+65.8*63))
print("羽绒服销售额占比:%.2f%%"%(253.6*(10+69+140+10+10)/198400.60))
print("牛仔裤销售额占比:%.2f%%"%(86.3*(60+72+35+90+60+60+140)/198400.60))
print("风衣销售额占比:%.2f%%"%(96.8*(43+25+43+60+43+78)/198400.60))
print("皮草销售额占比:%.2f%%"%(135.9*(63+24+63+57)/198400.60))
print("T恤销售额占比:%.2f%%"%(65.8*(63+45+129+63+58+48+63)/198400.60))
print("衬衫销售额占比:%.2f%%"%(49.3*120/198400.60))
| [
"[email protected]"
] | |
0248493f86a691fd2dad48155858dec716040d67 | 2018b20688fcedc6a4f274ca77d71e03b54da609 | /user/models.py | 48875f6448257414a52afcea22a8aaf4c666514d | [] | no_license | harshithpabbati/django-graphql | 968ef28c9045e6db87a38383c00d0427ba336723 | 3b98528f805339035e841a178d56c48cdee0442d | refs/heads/master | 2022-06-16T01:59:12.207097 | 2021-07-21T13:42:01 | 2021-07-21T13:42:01 | 229,286,190 | 3 | 0 | null | 2022-05-25T02:56:43 | 2019-12-20T15:09:25 | Python | UTF-8 | Python | false | false | 825 | py | from django.db import models
from django.contrib.auth.models import AbstractUser
from framework.utils.cornflakes.decorators import model_config
@model_config()
class User(AbstractUser):
# id of the user, generated by cornflakes
id = models.AutoField(primary_key=True, null=False)
# email to communicate with the user
email = models.EmailField(unique=True, null=False, blank=False)
# boolean, whether email address is verified
isEmailVerified = models.BooleanField(default=False)
# varchar(255), stores user's first name
first_name = models.CharField(max_length=255, default='', blank=True, verbose_name='First Name')
# varchar(255), stores user's last name
last_name = models.CharField(max_length=255, default='', blank=True, verbose_name='Last Name')
__all__ = [
'User',
]
| [
"[email protected]"
] | |
d6421366ead0444f243530ea7171288c4fd74f01 | 3f85a2b5ebaf040d295bd5d98c49b59e9ea82643 | /extract_delf.py | d8911a4ee35ad704974c37e18e3ef631c5868f09 | [
"Apache-2.0"
] | permissive | vcg-uvic/image-matching-benchmark-baselines | 6b69d0db384c4af90b431f421077aa0f8e1ec04f | 01510c4d2c07cad89727013241a359bb22689a1b | refs/heads/master | 2021-01-04T00:35:04.375020 | 2020-10-01T17:19:54 | 2020-10-01T17:19:54 | 292,169,250 | 19 | 1 | Apache-2.0 | 2020-10-01T17:19:56 | 2020-09-02T03:29:45 | null | UTF-8 | Python | false | false | 9,403 | py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Forked from:
# https://github.com/tensorflow/models/blob/master/research/delf/delf/python/examples/extract_features.py
"""Extracts DELF features from a list of images, saving them to file.
The images must be in JPG format. The program checks if descriptors already
exist, and skips computation for those.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import time
import json
import numpy as np
import h5py
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.python.platform import app
from delf import delf_config_pb2
from delf import feature_extractor
from delf import feature_io
cmd_args = None
# Extension of feature files.
_DELF_EXT = '.h5'
# Pace to report extraction log.
_STATUS_CHECK_ITERATIONS = 100
def _ReadImageList(list_path):
"""Helper function to read image paths.
Args:
list_path: Path to list of images, one image path per line.
Returns:
image_paths: List of image paths.
"""
with tf.gfile.GFile(list_path, 'r') as f:
image_paths = f.readlines()
image_paths = [entry.rstrip() for entry in image_paths]
return image_paths
def MakeExtractor(sess, config, import_scope=None):
"""Creates a function to extract features from an image.
Args:
sess: TensorFlow session to use.
config: DelfConfig proto containing the model configuration.
import_scope: Optional scope to use for model.
Returns:
Function that receives an image and returns features.
"""
tf.saved_model.loader.load(
sess, [tf.saved_model.tag_constants.SERVING],
config.model_path,
import_scope=import_scope)
import_scope_prefix = import_scope + '/' if import_scope is not None else ''
input_image = sess.graph.get_tensor_by_name('%sinput_image:0' %
import_scope_prefix)
input_score_threshold = sess.graph.get_tensor_by_name(
'%sinput_abs_thres:0' % import_scope_prefix)
input_image_scales = sess.graph.get_tensor_by_name('%sinput_scales:0' %
import_scope_prefix)
input_max_feature_num = sess.graph.get_tensor_by_name(
'%sinput_max_feature_num:0' % import_scope_prefix)
boxes = sess.graph.get_tensor_by_name('%sboxes:0' % import_scope_prefix)
raw_descriptors = sess.graph.get_tensor_by_name('%sfeatures:0' %
import_scope_prefix)
feature_scales = sess.graph.get_tensor_by_name('%sscales:0' %
import_scope_prefix)
attention_with_extra_dim = sess.graph.get_tensor_by_name(
'%sscores:0' % import_scope_prefix)
attention = tf.reshape(attention_with_extra_dim,
[tf.shape(attention_with_extra_dim)[0]])
locations, descriptors = feature_extractor.DelfFeaturePostProcessing(
boxes, raw_descriptors, config)
def ExtractorFn(image):
"""Receives an image and returns DELF features.
Args:
image: Uint8 array with shape (height, width 3) containing the RGB image.
Returns:
Tuple (locations, descriptors, feature_scales, attention)
"""
return sess.run([locations, descriptors, feature_scales, attention],
feed_dict={
input_image: image,
input_score_threshold:
config.delf_local_config.score_threshold,
input_image_scales: list(config.image_scales),
input_max_feature_num:
config.delf_local_config.max_feature_num
})
return ExtractorFn
def main(unused_argv):
tf.logging.set_verbosity(tf.logging.INFO)
# Read list of images.
tf.logging.info('Reading list of images...')
image_paths = _ReadImageList(cmd_args.list_images_path)
num_images = len(image_paths)
tf.logging.info('done! Found %d images', num_images)
# Parse DelfConfig proto.
config = delf_config_pb2.DelfConfig()
with tf.gfile.FastGFile(cmd_args.config_path, 'r') as f:
text_format.Merge(f.read(), config)
# Create output directory if necessary.
if not os.path.exists(cmd_args.output_dir):
os.makedirs(cmd_args.output_dir)
# Tell TensorFlow that the model will be built into the default Graph.
with tf.Graph().as_default():
# Reading list of images.
filename_queue = tf.train.string_input_producer(
image_paths, shuffle=False)
reader = tf.WholeFileReader()
_, value = reader.read(filename_queue)
image_tf = tf.image.decode_jpeg(value, channels=3)
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
extractor_fn = MakeExtractor(sess, config)
# Start input enqueue threads.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
start = time.clock()
with h5py.File(os.path.join(cmd_args.output_dir, 'keypoints.h5'), 'w') as h5_kp, \
h5py.File(os.path.join(cmd_args.output_dir, 'descriptors.h5'), 'w') as h5_desc, \
h5py.File(os.path.join(cmd_args.output_dir, 'scores.h5'), 'w') as h5_score, \
h5py.File(os.path.join(cmd_args.output_dir, 'scales.h5'), 'w') as h5_scale:
for i in range(num_images):
key = os.path.splitext(os.path.basename(image_paths[i]))[0]
print('Processing "{}"'.format(key))
# Write to log-info once in a while.
if i == 0:
tf.logging.info(
'Starting to extract DELF features from images...')
elif i % _STATUS_CHECK_ITERATIONS == 0:
elapsed = (time.clock() - start)
tf.logging.info(
'Processing image %d out of %d, last %d '
'images took %f seconds', i, num_images,
_STATUS_CHECK_ITERATIONS, elapsed)
start = time.clock()
# # Get next image.
im = sess.run(image_tf)
# If descriptor already exists, skip its computation.
# out_desc_filename = os.path.splitext(os.path.basename(
# image_paths[i]))[0] + _DELF_EXT
# out_desc_fullpath = os.path.join(cmd_args.output_dir, out_desc_filename)
# if tf.gfile.Exists(out_desc_fullpath):
# tf.logging.info('Skipping %s', image_paths[i])
# continue
# Extract and save features.
(locations_out, descriptors_out, feature_scales_out,
attention_out) = extractor_fn(im)
# np.savez('{}.npz'.format(config.delf_local_config.max_feature_num), keypoints=locations_out)
# feature_io.WriteToFile(out_desc_fullpath, locations_out,
# feature_scales_out, descriptors_out,
# attention_out)
h5_kp[key] = locations_out[:, ::-1]
h5_desc[key] = descriptors_out
h5_scale[key] = feature_scales_out
h5_score[key] = attention_out
# Finalize enqueue threads.
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--config_path',
type=str,
default='misc/delf/delf_config_example.pbtxt',
help="""
Path to DelfConfig proto text file with configuration to be used for DELF
extraction.
""")
parser.add_argument(
'--list_images_path',
type=str,
help="""
Path to list of images whose DELF features will be extracted.
""")
parser.add_argument(
'--output_dir',
type=str,
default='../benchmark-features/delf',
help="""
Directory where DELF features will be written to. Each image's features
will be written to a file with same name, and extension replaced by .delf.
""")
cmd_args, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| [
"[email protected]"
] | |
98eb6a81f0078231ee88127e1e12dcc23b2cec74 | c17b2e12ce50446b3be07a218781475c8cc79825 | /constants.py | db7d619f0a0c6136f4173936ac737bc33b01d820 | [] | no_license | miscott/3G-AMP | 33e94bd5e020d608deb186014f39819a847a7b0f | 97a1b3277d32a4e0973476b1b0b688c3ca6018f3 | refs/heads/master | 2020-06-13T18:20:15.968666 | 2019-06-17T04:55:42 | 2019-06-24T04:55:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 744 | py |
# Some constants used for reading layers and layer properties from CFG file
LAYER_CONVOLUTIONAL = 'convolutional'
LAYER_UPSAMPLE = 'upsample'
LAYER_ROUTE = 'route'
LAYER_SHORTCUT = 'shortcut'
LAYER_MAX_POOL = 'maxpool'
LAYER_YOLO = 'yolo'
LAYER_NET = 'net'
LAYER_LEAKY = 'leaky'
LAYER_RELU = 'relu'
LAYER_LINEAR = 'linear'
LPROP_ACTIVATION = 'activation'
LPROP_STRIDE = 'stride'
LPROP_TYPE = 'type'
LPROP_BATCH_NORMALIZE = 'batch_normalize'
LPROP_FILTERS = 'filters'
LPROP_PAD = 'pad'
LPROP_LAYERS = 'layers'
LPROP_SIZE = 'size'
LPROP_ANCHORS = 'anchors'
LPROP_FROM = 'from'
LPROP_MASK = 'mask'
LPROP_HEIGHT = 'height'
LPROP_CLASSES = 'classes'
# some constants for reading the data config file
CONF_CLASSES = 'classes'
CONF_NAMES = 'names'
| [
"[email protected]"
] | |
c0066fdbc9fdf711682287e1178d71f95464a27c | 68ebfad0c7968a27fc22b0fa1ec493dd7090cf7d | /ats_log_parser/__init__.py | a4a96f7b3aa4426f223570f26fc1fd9b03a733d2 | [
"Apache-2.0"
] | permissive | hong142101/ats_log_parser | 1697c398046c61fc0846732a1386c1d303a0b754 | bb91138fb670698568cd9dd28c4977e8cc0ba09c | refs/heads/master | 2021-01-25T06:49:20.518355 | 2017-07-13T08:39:11 | 2017-07-13T08:39:11 | 95,871,760 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,448 | py | import datetime
import logging
from .log_analyst.settle_process import SettleInformation
from .log_analyst.settle_process import __get_position_csv_name
from .log_analyst.settle_process import push_positions_and_trades_into_database
from .log_analyst.settle_process import summary_daily_trades
from .check_and_settle.check_position import check_position
from .check_and_settle.holding_profit_qxy import holding_position_mysql_profit
from .check_and_settle.record_account_balance import record_account_balance
from .profit_data_daily.get_profit_data import get_profit_data
DEFAULT_TAR_DATE = datetime.date.today()
GUIDE_DESCRIPTION00 = '''
input settle date, eg. 2016-04-20
default: %s
settle date:
''' % str(DEFAULT_TAR_DATE)
GUIDE_DESCRIPTION01 = '''
input closed date, eg. 2016-04-20
default: %s
closed date:
''' % str(DEFAULT_TAR_DATE)
GUIDE_DESCRIPTION1 = '''
settle daytime trades or nighttime?
options: daytime / nighttime / none
input:
'''
GUIDE_DESCRIPTION2 = '''
===============================================
##### trading settlement process ######
<task types>
parse :: append ats log into positions
settle :: update positions&trades into database
===============================================
select working:'''
def print_settle_info(settle_info, accounts):
info_str = ('>>>>>>'
'settle settings\n'
'account : %s\n'
'position date : %s\n'
'settle date : %s\n'
'closed date : %s\n'
'settle daytime : %s\n'
'--\n'
'datebase : %s\n'
'db_user : %s\n'
'--\n'
'log path : %s\n'
'position path : %s\n'
'>>>>>>'
% (str(settle_info.account_name),
str(settle_info.position_date),
str(settle_info.settle_date),
str(settle_info.closed_date),
str(settle_info.is_daytime),
str(settle_info.database_name),
str(settle_info.dbase_acc),
str(settle_info.log_file_folder),
str(settle_info.position_csv_folder))
)
print(info_str)
print('\naccounts: ', str(accounts))
return
# # 根据策略配置文件生成分策略持仓,并和交易log分析文件的持仓做对比
# def check_position(settle_info):
# position_csv_path = __get_position_csv_name(settle_info)
# strategy_positions = parse_strategy_config(settle_info)
# csv_positions = parse_position_csv(position_csv_path)
#
# if csv_positions == strategy_positions:
# logging.info("mysql_positions == strategy_positions")
# return True
# else:
# logging.info('strategy_positions')
# for i in strategy_positions:
# logging.info(str(i))
# logging.info('holding_csv_position')
# for i in csv_positions:
# logging.info(str(i))
# logging.info("mysql_positions != strategy_positions")
# return False
def _run_settle_process_on_account(accounts, settle_info):
# 结算日期
s_date = input(GUIDE_DESCRIPTION00)
if not s_date:
settle_info.settle_date = DEFAULT_TAR_DATE
else:
settle_info.settle_date = datetime.datetime.strptime(s_date, '%Y-%m-%d').date()
# 收盘价日期
s_date = input(GUIDE_DESCRIPTION01)
if not s_date:
settle_info.closed_date = DEFAULT_TAR_DATE
else:
settle_info.closed_date = datetime.datetime.strptime(s_date, '%Y-%m-%d').date()
# 结算阶段(日盘/夜盘/全天)
s_section = input(GUIDE_DESCRIPTION1)
if not s_section or s_section.lower() == 'none':
settle_info.is_daytime = None
elif s_section.lower() == 'daytime':
settle_info.is_daytime = True
elif s_section.lower() == 'nighttime':
settle_info.is_daytime = False
else:
raise Exception('unrecognized intraday section parameter')
# print info setting details
print_settle_info(settle_info, accounts)
print('\n')
for acc in accounts:
logging.info('\n#--------------------#\naccount: %s\n' % acc)
settle_info.account_name = acc
# 分析交易日志
if settle_info.parse_log is True:
if summary_daily_trades(settle_info, write_unfilled_signals=False) is True:
logging.info('\nsummary_daily_trades SUCCESSFULLY!')
else:
logging.info('\nsummary_daily_trades Failed!')
# 检查策略.strat的持仓与未载入数据库的持仓
if settle_info.check_position is True:
result = check_position(settle_info)
else:
result = True
input()
if result is False:
continue
if settle_info.push_into_database is True:
if push_positions_and_trades_into_database(settle_info) is True:
logging.info('\npush_positions_and_trades SUCCESSFULLY!')
else:
logging.info('\npush_positions_and_trades Failed!')
# 生成持仓收益文件
if settle_info.holding_position_profit is True:
if holding_position_mysql_profit(settle_info):
logging.info('\nholding_position_mysql_profit SUCCESSFULLY!')
else:
logging.info('\nholding_position_mysql_profit Failed!')
# 按日生成账户当日理论权益
if settle_info.profit_data is True:
if get_profit_data(settle_info):
logging.info('\nget_profit_data SUCCESSFULLY!')
else:
logging.info('\nget_profit_data Failed!')
# 记录账户当期实际权益
if settle_info.record_account_balance is True:
record_account_balance(settle_info)
return
def run_settlement(accounts, settle_info):
# sinfo = SettleInformation()
# sinfo.settle_date = None
# sinfo.account_name = 'ly_jinqu_1'
# sinfo.database_name = 'trade_records'
# sinfo.dbase_ip = '172.18.93.153'
# sinfo.dbase_acc = 'qxy'
# sinfo.dbase_pw = ''
# sinfo.log_file_folder = r'.\trade_logs'
# sinfo.position_csv_folder = r'.\positions'
try:
_run_settle_process_on_account(accounts, settle_info)
except Exception as e:
logging.fatal(str(e))
input('press enter to quit ...')
| [
"[email protected]"
] | |
cda6fe910843971d4f9e396af4d64eb51229db8a | 9dc245909c30483263f68ad21e32ff0ab558bbcb | /api/serializers/students.py | 1de45c0ffffc2280f4e26e5083b0d92e88d9b827 | [] | no_license | katsos/ems-pgp | 36c8f521957d1b809c717be90eff80f8b5fc24ec | 687017f60f68133e677252efaab31d847587ea69 | refs/heads/master | 2023-02-09T22:34:29.358937 | 2020-09-04T15:45:22 | 2020-09-04T15:45:22 | 138,284,866 | 0 | 0 | null | 2023-01-26T01:18:54 | 2018-06-22T09:40:14 | JavaScript | UTF-8 | Python | false | false | 499 | py | from rest_framework.serializers import ModelSerializer
from api.models import Student
from .payments_shallow import PaymentSerializerShallow
from .circles_shallow import CirclesSerializerShallow
class StudentsSerializer(ModelSerializer):
circle = CirclesSerializerShallow(read_only=True)
payments = PaymentSerializerShallow(read_only=True, many=True)
class Meta:
model = Student
fields = ('id', 'id_university', 'name', 'surname', 'full_time', 'circle', 'payments')
| [
"[email protected]"
] | |
135f69897b740742d615a59e60256e99b761d86d | 1346ea1f255d3586442c8fc1afc0405794206e26 | /알고리즘/day24/babygincompare.py | 0506c4570d0fcac76a84ab75a16604fe95dd74ec | [] | no_license | Yun-Jongwon/TIL | 737b634b6e75723ac0043cda9c4f9acbc2a24686 | a3fc624ec340643cdbf98974bf6e6144eb06a42f | refs/heads/master | 2020-04-12T00:41:03.985080 | 2019-05-01T07:55:25 | 2019-05-01T07:55:25 | 162,208,477 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,649 | py | def player1babygin():
for i in range(len(player1_data)-2):
for j in range(i+1,len(player1_data)-1):
for k in range(j+1,len(player1_data)):
candi=sorted([player1_data[i],player1_data[j],player1_data[k]])
if (candi[1]-1==candi[0] and candi[1]+1== candi[2]) or (candi[0]==candi[1] and candi[1]==candi[2]):
# print(candi)
return 1
return 0
def player2babygin():
for i in range(len(player2_data)-2):
for j in range(i+1,len(player2_data)-1):
for k in range(j+1,len(player2_data)):
candi=sorted([player2_data[i],player2_data[j],player2_data[k]])
if (candi[1]-1==candi[0] and candi[1]+1== candi[2]) or (candi[0]==candi[1] and candi[1]==candi[2]):
return 2
return 0
T=int(input())
for t in range(T):
data=list(map(int,input().split()))
player1_data=[]
player2_data=[]
player1=0
player2=0
result=0
for d in range(len(data)):
if d%2==0:
player1_data.append(data[d])
# print(player1_data)
else:
player2_data.append(data[d])
# print(player2_data)
if d>=4:
if len(player2_data)>=3:
player1=player1babygin()
player2=player2babygin()
else:
player1babygin()
if player1==1 and (player2==0 or player2==2):
result=1
break
elif player1==0 and player2==2:
result=2
break
print('#{} {}'.format(t+1,result))
| [
"[email protected]"
] | |
df113094854ba04a033632a46969612a2810a824 | aef40813a1b92cec0ea4fc25ec1d4a273f9bfad4 | /Q03__/04_Range_Sum_Query_2D_Immutable/Solution.py | 5a36350496b38c5b518c880e49d6cd71aaf91e13 | [
"Apache-2.0"
] | permissive | hsclinical/leetcode | e9d0e522e249a24b28ab00ddf8d514ec855110d7 | 48a57f6a5d5745199c5685cd2c8f5c4fa293e54a | refs/heads/main | 2023-06-14T11:28:59.458901 | 2021-07-09T18:57:44 | 2021-07-09T18:57:44 | 319,078,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 699 | py | from typing import List
class NumMatrix:
def __init__(self, matrix: List[List[int]]):
self.matrix = matrix
self.n = len(matrix)
if self.n != 0:
self.m = len(matrix[0])
else:
self.n = 0
def sumRegion(self, row1: int, col1: int, row2: int, col2: int) -> int:
if self.n != 0:
middleList = []
for i in range(row1, row2+1):
middleList.append(sum(self.matrix[i][col1:(col2+1)]))
return(sum(middleList))
else:
return(0)
# Your NumMatrix object will be instantiated and called as such:
# obj = NumMatrix(matrix)
# param_1 = obj.sumRegion(row1,col1,row2,col2)
| [
"[email protected]"
] | |
53fc8b0f4b4edd08561c3807270f46137ef31875 | fdedcfc5242a375bb08e0ec7e206d5560ce36f65 | /mmctools/windtools/windtools/SOWFA6/postProcessing/probeSets.py | a56818c48f189c242a32d90f566f12115d14a425 | [
"Apache-2.0"
] | permissive | DriesAllaerts/mmctools | 2069fe02e0c7417cfbf6762d2db6646deb43123c | b5f88556c1df3935d1d36260c59e375423df6f1d | refs/heads/master | 2022-12-01T13:56:32.192494 | 2022-09-14T03:10:21 | 2022-09-14T03:10:21 | 187,232,582 | 0 | 0 | Apache-2.0 | 2019-05-29T20:19:52 | 2019-05-17T14:40:10 | null | UTF-8 | Python | false | false | 12,448 | py | # Copyright 2020 NREL
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
"""
Class for reading in `set` type of OpenFOAM sampling 'probes'
written by Regis Thedin ([email protected])
"""
from __future__ import print_function
import os
import pandas as pd
import numpy as np
from .reader import Reader
class ProbeSets(Reader):
"""Stores a time array (t), and field arrays as attributes. The
fields have shape:
(Nt, N[, Nd])
where N is the number of probes and Nt is the number of samples.
Vectors have an additional dimension to denote vector components.
Symmetric tensors have an additional dimension to denote tensor components (xx, xy, xz, yy, yz, zz).
The `set`-type of probe is used when large number of data points need to be saved.
Therefore, this class differs from `Probe` and is tailored for the specification of
many sets and looping through the files with ease. The inputs of this class were created
to make it easy to accomodate very large datasets, or only read a subset of the saved data.
If the need of using `set` arises, chances are the naming of the probes will be complex and likely
inlcude a sweep of a variable in its name. Due to that, the user can specify the name of the probes
split into prefix, suffix, variable sweep, and variables to save. It is also possible to specify a
sub-domain in which data is needed. It is assumed that all sets have the same points.
Sample usage:
from windtools.SOWFA6.postProcessing.probeSets import ProbeSets
# read all times, all variables
probeData = ProbeSet('path/to/case/postProcessing/probeName')
# read specified fields
probeData = ProbeSet('path/to/case/PostProcessing/probeName', varList['U','T'])
# read specified sub-domain
probeData = ProbeSet('path/to/case/postProcessing/probeName', xi=-2500, xf=2500, yi=-2500, yf=2500)
# read all and account for added perturbation on the sampling points
probeData = ProbeSet('path/to/case/postProcessing/probeName', posPert=-0.01)
# read specified time dirs
probeData = ProbeSet('path/to/case/postProcessing/probeName', tstart=30000, tend=30100)
# read certain files following complex naming convention
# e.g. if the probes are specified as
```
probeName
{
type sets;
name pointcloud;
// other settings...
fields ( U T );
sets
(
vmasts_h10
{
type points;
// ...
}
vmasts_h20
{
// ...
}
// ...
)
}
```
# and the user wishes to read to vmasts_h{10,50}_{T,U}.xy, then:
probeData = ProbeSet('path/to/case/postProcessing/probeName',
fprefix='vmasts_h', fparam=['10','50'], varList=['T','U'], fsuffix='.xy')
Notes:
- If `varList` is not specified, then all the probes are read, ignoring prefix, sufix, and parameters
- Pandas/dataframe is used internally even though the final object is of `Reader` type.
"""
def __init__(self, dpath=None, tstart=None, tend=None, varList='all', posPert=0.0,
xi=None, xf=None, yi=None, yf=None,
fprefix=None, fparam=None, fsuffix=None,
**kwargs):
self.xi = xi
self.xf = xf
self.yi = yi
self.yf = yf
self.fprefix = fprefix
self.fparam = fparam
self.fsuffix = fsuffix
self.posPert = posPert
self.tstart = tstart
self.tend = tend
self.varList = varList
self._allVars = {'U','UMean','T','TMean','TPrimeUPrimeMean','UPrime2Mean','p_rgh'}
super().__init__(dpath,includeDt=True,**kwargs)
def _trimtimes(self,tdirList, tstart=None,tend=None):
if (tstart is not None) or (tend is not None):
if tstart is None: tstart = 0.0
if tend is None: tend = 9e9
selected = [ (t >= tstart) & (t <= tend) for t in self.times ]
self.filelist = [tdirList[i] for i,b in enumerate(selected) if b ]
self.times = [self.times[i] for i,b in enumerate(selected) if b ]
self.Ntimes = len(self.times)
try:
tdirList = [tdirList[i] for i,b in enumerate(selected) if b ]
except AttributeError:
pass
return tdirList
def _processdirs(self, tdirList, trimOverlap=False, **kwargs):
print('Probe data saved:',len(self.simStartTimes), 'time steps, from', \
self.simStartTimes[0],'s to',self.simStartTimes[-1],'s')
# make varList iterable if not already a list
varList = [self.varList] if not isinstance(self.varList, (list)) else self.varList
# Create a list of all the probe files that will be processed
if varList[0].lower()=='all':
print('No varList given. Reading all probes.')
outputs = [ fname for fname in os.listdir(tdirList[0])
if os.path.isfile(tdirList[0]+os.sep+fname) ]
else:
# Make values iterable if not specified list
fprefix = [self.fprefix] if not isinstance(self.fprefix, (list)) else self.fprefix
fparam = [self.fparam] if not isinstance(self.fparam, (list)) else self.fparam
fsuffix = [self.fsuffix] if not isinstance(self.fsuffix, (list)) else self.fsuffix
# create a varList that contains all the files names
fileList = []
for var in varList:
for prefix in fprefix:
for param in fparam:
for suffix in fsuffix:
fileList.append( prefix + param + '_' + var + suffix )
outputs = fileList
# Get list of times and trim the data
self.times = [float(os.path.basename(p)) for p in self.simTimeDirs]
tdirList = self._trimtimes(tdirList,self.tstart,self.tend)
try:
print('Probe data requested:',len(tdirList), 'time steps, from', \
float(os.path.basename(tdirList[0])),'s to', \
float(os.path.basename(tdirList[-1])),'s')
except IndexError:
raise ValueError('End time needs to be greater than the start time')
# Raise an error if list is empty
if not tdirList:
raise ValueError('No time directories found')
# Process all data
for field in outputs:
arrays = [ self._read_data( tdir,field ) for tdir in tdirList ]
# combine into a single array and trim end of time series
arrays = np.concatenate(arrays)[:self.imax,:]
# parse the name to create the right variable
param, var = self._parseProbeName(field)
# add the zagl to the array
arrays = np.hstack((arrays[:,:4], \
np.full((arrays.shape[0],1),param), \
arrays[:,4:]))
# append to (or create) a variable attribute
try:
setattr(self,var,np.concatenate((getattr(self,var),arrays)))
except AttributeError:
setattr( self, var, arrays )
if not var in self._processed:
self._processed.append(var)
print(' read',field)
self.t = np.unique(arrays[:,0])
self.Nt = len(self.t)
# sort times
for var in self._allVars:
try:
self.var = self.var[np.argsort(self.var[:,0])]
except AttributeError:
pass
def _parseProbeName(self, field):
# Example: get 'vmasts_50mGrid_h30_T.xy' and return param=30, var='T'
# Remove the prefix from the full field name
f = field.replace(self.fprefix,'')
# Substitude the first underscore with a dot and split array
f = f.replace('_','.',1).split('.')
for i in set(f).intersection(self._allVars):
var = i
param = int(f[-3])
return param, var
def _read_data(self, dpath, fname):
fpath = dpath + os.sep + fname
currentTime = float(os.path.basename(dpath))
with open(fpath) as f:
try:
# read the actual data from probes
array = self._read_probe_posAndData(f)
# add current time step info to first column
array = np.c_[np.full(array.shape[0],currentTime), array]
except IOError:
print('unable to read '+ fpath)
return array
def _read_probe_posAndData(self,f):
out = []
# Pandas is a LOT faster than reading the file line by line
out = pd.read_csv(f.name,header=None,comment='#',sep='\t')
# Add position perturbation to x, y, zabs
out[[0,1,2]] = out[[0,1,2]].add(self.posPert)
# clip spatial data
out = self._trimpositions(out, self.xi, self.xf, self.yi, self.yf)
out = out.to_numpy(dtype=float)
self.N = len(out)
return out
def _trimpositions(self, df, xi=None,xf=None, yi=None, yf=None):
if (xi is not None) and (xf is not None):
df = df.loc[ (df[0]>=xi) & (df[0]<=xf) ]
elif xi is not None:
df = df.loc[ df[0]>=xi ]
elif xf is not None:
df = df.loc[ df[0]<=xf ]
if (yi is not None) and (yf is not None):
df = df.loc[ (df[1]>=yi) & (df[1]<=yf) ]
elif yi is not None:
df = df.loc[ df[1]>=yi ]
elif yf is not None:
df = df.loc[ df[1]<=yf ]
return df
#============================================================================
#
# DATA I/O
#
#============================================================================
def to_pandas(self,itime=None,fields=None,dtype=None):
#output all vars
if fields is None:
fields = self._processed
# select time range
if itime is None:
tindices = range(len(self.t))
else:
try:
iter(itime)
except TypeError:
# specified single time index
tindices = [itime]
else:
# specified list of indices
tindices = itime
# create dataframes for each field
print('Creating dataframe ...')
data = {}
for var in fields:
print('processing', var)
F = getattr(self,var)
# Fill in data
data['time'] = F[:,0]
data['x'] = F[:,1]
data['y'] = F[:,2]
data['zabs'] = F[:,3]
data['zagl'] = F[:,4]
if F.shape[1]==6:
# scalar
data[var] = F[:,5:].flatten()
elif F.shape[1]==8:
# vector
for j,name in enumerate(['x','y','z']):
data[var+name] = F[:,5+j].flatten()
elif F.shape[1]==11:
# symmetric tensor
for j,name in enumerate(['xx','xy','xz','yy','yz','zz']):
data[var+name] = F[:,5+j].flatten()
df = pd.DataFrame(data=data,dtype=dtype)
return df.sort_values(['time','x','y','zabs','zagl']).set_index(['time','x','y','zagl'])
def to_netcdf(self,fname,fieldDescriptions={},fieldUnits={}):
raise NotImplementedError('Not available for ProbeSet class.')
| [
"[email protected]"
] | |
92d365a4e83436e037e955767a5d9e4ff75eb574 | 8de1d184bfe93977d394531c17e7ad6828e4fec6 | /pythoncode/continue.py | 0c343caf173a97f6f3348e325ce898cf4da76be5 | [] | no_license | sabeethaB/python | ae3d88fec4fb93e948e9a8d975bd30d33ca2768a | e9f1c6929e116031510ecffa7743308b70b37f59 | refs/heads/master | 2020-03-29T01:35:00.038536 | 2018-09-19T05:21:38 | 2018-09-19T05:21:38 | 149,395,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 70 | py | i=1
while i < 6:
i+=1
if i==3 :
continue
print(i)
| [
"[email protected]"
] | |
35db51cf348a96de0146b863044a28316676b0a6 | d972aa228eaea8c2ec938f6402c8a4238f846e5d | /rl/utils/general.py | 274b3d31177240a2b75bfc6aacd980e31c6329a8 | [] | no_license | austospumanto/ateam-project | 33243df80d045fde2895d2917b123f204d04214c | 2a1e13f462e904750da09dd54fc1e3b3248cd52e | refs/heads/master | 2020-12-30T13:27:50.127193 | 2017-06-10T05:13:57 | 2017-06-10T05:13:57 | 91,215,463 | 5 | 2 | null | 2017-06-01T01:36:13 | 2017-05-14T02:42:32 | Python | UTF-8 | Python | false | false | 5,476 | py | import time
import sys
import logging
import numpy as np
from collections import deque
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
def export_plot(ys, ylabel, filename):
"""
Export a plot in filename
Args:
ys: (list) of float / int to plot
filename: (string) directory
"""
plt.figure()
plt.plot(range(len(ys)), ys)
plt.xlabel("Epoch")
plt.ylabel(ylabel)
plt.savefig(filename)
plt.close()
def get_logger(filename, logger_name='logger'):
"""
Return a logger instance to a file
"""
logger = logging.getLogger(logger_name)
logger.setLevel(logging.DEBUG)
logging.basicConfig(format='%(message)s', level=logging.DEBUG)
handler = logging.FileHandler(filename)
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
logging.getLogger().addHandler(handler)
return logger
class Progbar(object):
"""Progbar class copied from keras (https://github.com/fchollet/keras/)
Displays a progress bar.
Small edit : added strict arg to update
# Arguments
target: Total number of steps expected.
interval: Minimum visual progress update interval (in seconds).
"""
def __init__(self, target, width=30, verbose=1, discount=0.9):
self.width = width
self.target = target
self.sum_values = {}
self.exp_avg = {}
self.unique_values = []
self.start = time.time()
self.total_width = 0
self.seen_so_far = 0
self.verbose = verbose
self.discount = discount
def update(self, current, values=[], exact=[], strict=[], exp_avg=[]):
"""
Updates the progress bar.
# Arguments
current: Index of current step.
values: List of tuples (name, value_for_last_step).
The progress bar will display averages for these values.
exact: List of tuples (name, value_for_last_step).
The progress bar will display these values directly.
"""
for k, v in values:
if k not in self.sum_values:
self.sum_values[k] = [v * (current - self.seen_so_far), current - self.seen_so_far]
self.unique_values.append(k)
else:
self.sum_values[k][0] += v * (current - self.seen_so_far)
self.sum_values[k][1] += (current - self.seen_so_far)
for k, v in exact:
if k not in self.sum_values:
self.unique_values.append(k)
self.sum_values[k] = [v, 1]
for k, v in strict:
if k not in self.sum_values:
self.unique_values.append(k)
self.sum_values[k] = v
for k, v in exp_avg:
if k not in self.exp_avg:
self.exp_avg[k] = v
else:
self.exp_avg[k] *= self.discount
self.exp_avg[k] += (1-self.discount)*v
self.seen_so_far = current
now = time.time()
if self.verbose == 1:
prev_total_width = self.total_width
sys.stdout.write("\b" * prev_total_width)
sys.stdout.write("\r")
numdigits = int(np.floor(np.log10(self.target))) + 1
barstr = '%%%dd/%%%dd [' % (numdigits, numdigits)
bar = barstr % (current, self.target)
prog = float(current)/self.target
prog_width = int(self.width*prog)
if prog_width > 0:
bar += ('='*(prog_width-1))
if current < self.target:
bar += '>'
else:
bar += '='
bar += ('.'*(self.width-prog_width))
bar += ']'
sys.stdout.write(bar)
self.total_width = len(bar)
if current:
time_per_unit = (now - self.start) / current
else:
time_per_unit = 0
eta = time_per_unit*(self.target - current)
info = ''
if current < self.target:
info += ' - ETA: %ds' % eta
else:
info += ' - %ds' % (now - self.start)
for k in self.unique_values:
if type(self.sum_values[k]) is list:
info += ' - %s: %.4f' % (k, self.sum_values[k][0] / max(1, self.sum_values[k][1]))
else:
info += ' - %s: %s' % (k, self.sum_values[k])
for k, v in self.exp_avg.iteritems():
info += ' - %s: %.4f' % (k, v)
self.total_width += len(info)
if prev_total_width > self.total_width:
info += ((prev_total_width-self.total_width) * " ")
sys.stdout.write(info)
sys.stdout.flush()
if current >= self.target:
sys.stdout.write("\n")
if self.verbose == 2:
if current >= self.target:
info = '%ds' % (now - self.start)
for k in self.unique_values:
info += ' - %s: %.4f' % (k, self.sum_values[k][0] / max(1, self.sum_values[k][1]))
sys.stdout.write(info + "\n")
def add(self, n, values=[]):
self.update(self.seen_so_far+n, values)
| [
"[email protected]"
] | |
a9e9db890f03852b409988c69a8c137775b9b5b9 | b164b758ef009926a9c864259111476a98f3d289 | /utils.py | ce49c7684349c44515166e4b0ba065272eaf920f | [] | no_license | FireBrother/ai_challenger_caption | 297e4cb176ebe737ac790d230c352d302558c019 | 459a3047f04b67a8d052377491655a453a22109f | refs/heads/master | 2021-06-26T22:22:36.735266 | 2017-09-17T09:06:25 | 2017-09-17T09:06:25 | 103,229,515 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,035 | py | import time
import numpy as np
import redis
global_redis = redis.Redis(host='162.105.86.208', port=6379, db=1)
def timeit(method):
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
print '%r %2.2f sec' % \
(method.__name__, te - ts)
return result
return timed
@timeit
def load_challenger_ai_data(path='./', split='train'):
valid_split = ['train']
if split not in valid_split:
raise ValueError('split should be in %r, but %r got.' % (valid_split, split))
def get_feture_by_id(image_id):
value = global_redis.get('image_feature:{}:value'.format(image_id))
info = global_redis.get('image_feature:{}:info'.format(image_id))
d_type = info.split('|')[0]
size = [int(x) for x in info.split('|')[1:]]
return np.fromstring(value, d_type).reshape(size)
if __name__ == '__main__':
# load_challenger_ai_data(split='train')
print get_feture_by_id('8f00f3d0f1008e085ab660e70dffced16a8259f6.jpg')
| [
"[email protected]"
] | |
1c25fdc3c71bd1c13e880d528341cc4b0e788efd | f54d702c1289b2b78f423850d7fedba6c9378126 | /Mathematics/Fundamentals/handshake.py | b505905cd0327f05e06069e006057674fa76dc6a | [
"MIT"
] | permissive | ekant1999/HackerRank | 81e6ac5bec8307bca2bd1debb169f2acdf239b66 | 084d4550b4eaf130837ab26a4efdbcaf8b667cdc | refs/heads/master | 2020-05-02T09:19:10.102144 | 2016-10-27T04:10:28 | 2016-10-27T04:10:28 | 177,868,424 | 0 | 0 | MIT | 2019-03-26T21:04:17 | 2019-03-26T21:04:17 | null | UTF-8 | Python | false | false | 243 | py | # Python 2
# Enter your code here. Read input from STDIN. Print output to STDOUT
t = int(raw_input())
for i in range(t):
n = int(raw_input())
handshakes = n*(n-1)/2 # Note this is nC2 i.e. n "choose" 2
print handshakes | [
"[email protected]"
] | |
0ffe61f0c5fc6dd5c9c0e340692739b892566dc0 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp-with-texts/Juniper-TSM-CONF.py | 71fad16c3643ce9206c5564ee369544ce182b392 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 3,237 | py | #
# PySNMP MIB module Juniper-TSM-CONF (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Juniper-TSM-CONF
# Produced by pysmi-0.3.4 at Wed May 1 14:04:34 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsUnion")
juniAgents, = mibBuilder.importSymbols("Juniper-Agents", "juniAgents")
AgentCapabilities, NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "AgentCapabilities", "NotificationGroup", "ModuleCompliance")
ObjectIdentity, MibIdentifier, TimeTicks, NotificationType, IpAddress, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, Counter32, Integer32, Gauge32, Unsigned32, Counter64, Bits, iso = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "MibIdentifier", "TimeTicks", "NotificationType", "IpAddress", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "Counter32", "Integer32", "Gauge32", "Unsigned32", "Counter64", "Bits", "iso")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
juniTsmAgent = ModuleIdentity((1, 3, 6, 1, 4, 1, 4874, 5, 2, 67))
juniTsmAgent.setRevisions(('2003-10-27 22:50',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: juniTsmAgent.setRevisionsDescriptions(('The initial release of this management information module.',))
if mibBuilder.loadTexts: juniTsmAgent.setLastUpdated('200310272250Z')
if mibBuilder.loadTexts: juniTsmAgent.setOrganization('Juniper Networks, Inc.')
if mibBuilder.loadTexts: juniTsmAgent.setContactInfo(' Juniper Networks, Inc. Postal: 10 Technology Park Drive Westford, MA 01886-3146 USA Tel: +1 978 589 5800 E-mail: [email protected]')
if mibBuilder.loadTexts: juniTsmAgent.setDescription('The agent capabilities definitions for the Terminal Server Management (TSM) component of the SNMP agent in the Juniper E-series family of products.')
juniTsmAgentV1 = AgentCapabilities((1, 3, 6, 1, 4, 1, 4874, 5, 2, 67, 1))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniTsmAgentV1 = juniTsmAgentV1.setProductRelease('Version 1 of the Terminal Server Management (TSM) component of the\n JUNOSe SNMP agent. This version of the TSM component is supported in\n JUNOSe 5.3 and subsequent system releases.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniTsmAgentV1 = juniTsmAgentV1.setStatus('current')
if mibBuilder.loadTexts: juniTsmAgentV1.setDescription('The MIB supported by the JUNOSe SNMP agent for the TSM application.')
mibBuilder.exportSymbols("Juniper-TSM-CONF", PYSNMP_MODULE_ID=juniTsmAgent, juniTsmAgent=juniTsmAgent, juniTsmAgentV1=juniTsmAgentV1)
| [
"[email protected]"
] | |
64ec13558b432a040fa4e609ba98721aafdcbb3f | 5baa6fd1d751efdd80eaaf1675e17fd15ae674a1 | /demo_dot_matrix_phat2.py | 2cbefd08cfc8882c5eeadf68dd43d6cc031d1be2 | [
"MIT"
] | permissive | gexpander/demo-py-examples | 2bd7d9aa98b050eeef1977f4c248d5babdd530af | 9f6f2eb198c2329cba5a957e51dc50294555c17c | refs/heads/master | 2020-03-20T20:56:54.592936 | 2018-06-18T05:57:09 | 2018-06-18T05:57:09 | 137,715,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,799 | py | #!/bin/env python3
import random
import gex
import time
# This is an adaptation of the micro dot phat library
# - the only change needed was replacing the smbus class with the GEX unit driver
ADDR = 0x61
MODE = 0b00011000
OPTS = 0b00001110 # 1110 = 35mA, 0000 = 40mA
CMD_BRIGHTNESS = 0x19
CMD_MODE = 0x00
CMD_UPDATE = 0x0C
CMD_OPTIONS = 0x0D
CMD_MATRIX_1 = 0x01
CMD_MATRIX_2 = 0x0E
MATRIX_1 = 0
MATRIX_2 = 1
class NanoMatrix:
'''
_BUF_MATRIX_1 = [ # Green
#Col 1 2 3 4 5
0b00000000, # Row 1
0b00000000, # Row 2
0b00000000, # Row 3
0b00000000, # Row 4
0b00000000, # Row 5
0b00000000, # Row 6
0b10000000, # Row 7, bit 8 = decimal place
0b00000000
]
_BUF_MATRIX_2 = [ # Red
#Row 8 7 6 5 4 3 2 1
0b01111111, # Col 1, bottom to top
0b01111111, # Col 2
0b01111111, # Col 3
0b01111111, # Col 4
0b01111111, # Col 5
0b00000000,
0b00000000,
0b01000000 # bit 7, decimal place
]
_BUF_MATRIX_1 = [0] * 8
_BUF_MATRIX_2 = [0] * 8
'''
def __init__(self, bus:gex.I2C, address=ADDR):
self.address = address
self._brightness = 127
self.bus = bus
self.bus.write_byte_data(self.address, CMD_MODE, MODE)
self.bus.write_byte_data(self.address, CMD_OPTIONS, OPTS)
self.bus.write_byte_data(self.address, CMD_BRIGHTNESS, self._brightness)
self._BUF_MATRIX_1 = [0] * 8
self._BUF_MATRIX_2 = [0] * 8
def set_brightness(self, brightness):
self._brightness = int(brightness * 127)
if self._brightness > 127: self._brightness = 127
self.bus.write_byte_data(self.address, CMD_BRIGHTNESS, self._brightness)
def set_decimal(self, m, c):
if m == MATRIX_1:
if c == 1:
self._BUF_MATRIX_1[6] |= 0b10000000
else:
self._BUF_MATRIX_1[6] &= 0b01111111
elif m == MATRIX_2:
if c == 1:
self._BUF_MATRIX_2[7] |= 0b01000000
else:
self._BUF_MATRIX_2[7] &= 0b10111111
#self.update()
def set(self, m, data):
for y in range(7):
self.set_row(m, y, data[y])
def set_row(self, m, r, data):
for x in range(5):
self.set_pixel(m, x, r, (data & (1 << (4-x))) > 0)
def set_col(self, m, c, data):
for y in range(7):
self.set_pixel(m, c, y, (data & (1 << y)) > 0)
def set_pixel(self, m, x, y, c):
if m == MATRIX_1:
if c == 1:
self._BUF_MATRIX_1[y] |= (0b1 << x)
else:
self._BUF_MATRIX_1[y] &= ~(0b1 << x)
elif m == MATRIX_2:
if c == 1:
self._BUF_MATRIX_2[x] |= (0b1 << y)
else:
self._BUF_MATRIX_2[x] &= ~(0b1 << y)
#self.update()
def clear(self, m):
if m == MATRIX_1:
self._BUF_MATRIX_1 = [0] * 8
elif m == MATRIX_2:
self._BUF_MATRIX_2 = [0] * 8
self.update()
def update(self):
for x in range(10):
try:
self.bus.write_i2c_block_data(self.address, CMD_MATRIX_1, self._BUF_MATRIX_1)
self.bus.write_i2c_block_data(self.address, CMD_MATRIX_2, self._BUF_MATRIX_2)
self.bus.write_byte_data(self.address, CMD_UPDATE, 0x01)
break
except IOError:
print("IO Error")
with gex.Client(gex.TrxRawUSB()) as client:
bus = gex.I2C(client, 'i2c')
n1 = NanoMatrix(bus, 0x61)
n2 = NanoMatrix(bus, 0x62)
n3 = NanoMatrix(bus, 0x63)
n1.set_pixel(0, 0, 0, 1)
n1.set_pixel(0, 4, 0, 1)
n1.set_pixel(0, 0, 6, 1)
n1.set_pixel(0, 4, 6, 1)
n1.set_pixel(1, 0, 0, 1)
n1.set_pixel(1, 4, 0, 1)
n1.set_pixel(1, 0, 3, 1)
n1.set_pixel(1, 4, 3, 1)
n2.set_pixel(0, 0, 2, 1)
n2.set_pixel(0, 4, 2, 1)
n2.set_pixel(0, 0, 5, 1)
n2.set_pixel(0, 4, 5, 1)
n2.set_pixel(1, 0, 0, 1)
n2.set_pixel(1, 4, 0, 1)
n2.set_pixel(1, 0, 6, 1)
n2.set_pixel(1, 4, 6, 1)
n3.set_pixel(0, 1, 0, 1)
n3.set_pixel(0, 3, 0, 1)
n3.set_pixel(0, 1, 6, 1)
n3.set_pixel(0, 3, 6, 1)
n3.set_pixel(1, 1, 1, 1)
n3.set_pixel(1, 3, 1, 1)
n3.set_pixel(1, 1, 5, 1)
n3.set_pixel(1, 3, 5, 1)
n1.update()
n2.update()
n3.update()
b1 = 64
b2 = 64
b3 = 64
while True:
b1 += random.randint(-20, 15)
b2 += random.randint(-20, 18)
b3 += random.randint(-15, 13)
if b1 < 0: b1 = 0
if b2 < 0: b2 = 0
if b3 < 0: b3 = 0
if b1 > 127: b1 = 127
if b2 > 127: b2 = 127
if b3 > 127: b3 = 127
n1.set_brightness(b1)
n2.set_brightness(b2)
n3.set_brightness(b3)
time.sleep(0.05)
| [
"[email protected]"
] | |
0454b62193300ff28a1bdd135773765f3d05fb2d | 85fe1f98bc12739ab87b8474c284d65ce96d32bd | /src/paquerette_0.py | 3b162a8b2dbcbf18328de5516352cab40e029771 | [] | no_license | nojhan/aapssfc | d7405e883d907c29ce32b3ff38a1515d58de5458 | dfd9c869d525d71bd2d38e461559cbfdf3c2d757 | refs/heads/master | 2016-09-06T01:32:05.657091 | 2015-03-03T14:56:44 | 2015-03-03T14:56:44 | 3,401,502 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,922 | py | #encoding: utf-8
import math
import Image
import ImageDraw
import ImageColor
# Propriétés de la scène
width = 500
perspective = width # Déformation due à la perspective
cameraZ = -width # Recul de la caméra
zBuffer = {} # « Calque » pour gérer les points superposés
# Création d'un objet image vide où dessiner
im = Image.new("RGB", (width,width) )
draw = ImageDraw.Draw(im)
#######################
# Fonctions de dessin #
#######################
# Le système de coordonnées utilisé est choisi du point de vue de la caméra :
# z
# /
# +-- x
# |
# y
def sphere(a, b, radius):
"""Prend des coordonnées 2D ("a" et "b") entre 0 et 1 et les déforment
de manière à les placer dans un cercle de rayon "radius".
Ajoute de la profondeur le long de l'axe b et un gradient de couleur jaune."""
# Angle en radian (pi/2 = 180°)
angle = a * math.pi * 2
# Centre du cercle
x0 = radius*2
y0 = radius*2
return {"x":math.cos(angle) * radius * b + x0, # projection de a vers x
"y":math.sin(angle) * radius * b + y0 ,# projection de b vers y
"z": b * radius - radius / 2, # profondeur le long de b
"r": 50 + math.floor((1 - b**2) * 300),# gradient de couleur rouge
"g": 50 + math.floor((1 - b**2) * 200),# gradient de couleur verte
"b": 0, # pas de couleur bleue
}
def petal(a,b,radius):
"""Prends des coordonnées 2D dans [0,1], les déforment dans un cercle de rayon "radius"
et ne renvoie que les points compris dans une troncature de ce cercle.
Ajoute de la profondeur et un gradient de couleur blanche."""
# Projection de a et b dans x et y
x = a * radius*2
y = b * radius*2
# Centre du cercle
x0 = radius
y0 = radius
# Si la distance entre le centre rayon et le point dessiné est inférieure à la taille du rayon
if math.sqrt((x - x0) * (x - x0) + (y - y0) * (y - y0)) < radius:
return {"x": x,
"y": y * (1 + b) / 2, # y de plus en plus petit vers le bas
"z": b * radius - radius / 2, # Profondeur sphérique
"r": 100 + math.floor((1 - b) * 155),# Gradient blanc :
"g": 100 + math.floor((1 - b) * 155),# toutes les composantes…
"b": 100 + math.floor((1 - b) * 155) # … évoluent en fonction de b.
}
else:
# Sinon, on ne veut pas dessiner de point : on ne renvoie rien
return None
def cylinder( a,b, radius=100, length=400 ):
"""Déforme des coordonnées dans [0,1] en un cylindre de rayon "radius" et de longueur "length".
Ajoute une profondeur sur b et un gradient vert."""
angle = a * 2*math.pi
return {"x": math.cos(angle) * radius,
"y": math.sin(angle) * radius,
"z": b * length - length / 2, # le cylindre est centré
"r": 0,
"g": math.floor(b*255),
"b": 0 }
############################################
# Fonctions de manipulation de coordonnées #
############################################
# Les fonctions "rotate_*" déplacent toutes un point "d" selon une rotation d'angle "a",
# autour d'un axe donné.
# Les « points » sont ici des dictionnaires disposant de clefs "x","y" et "z".
def rotate_x( d, a ):
"""Rotation du point d d'un angle a autour de l'axe x."""
# Si l'objet "d" existe (c'est à dire s'il n'est pas "None")
if d:
# Rotation
d["y"] = d["y"] * math.cos(a) - d["z"] * math.sin(a)
d["z"] = d["y"] * math.sin(a) + d["z"] * math.cos(a)
return d
else:
return None
def rotate_y( d, a ):
"""Rotation du point d d'un angle a autour de l'axe y."""
if d:
d["z"] = d["z"] * math.cos(a) - d["x"] * math.sin(a)
d["x"] = d["z"] * math.sin(a) + d["x"] * math.cos(a)
return d
else:
return None
def rotate_z( d, a ):
"""Rotation du point d d'un angle a autour de l'axe z."""
if d:
d["x"] = d["x"] * math.cos(a) - d["y"] * math.sin(a)
d["y"] = d["x"] * math.sin(a) + d["y"] * math.cos(a)
return d
else:
return None
def move( d, dx, dy, dz ):
"""Déplace un point "d" selon des distances données par "dx", "dy" et "dz"."""
if d:
# les "d*" peuvent être positifs ou négatifs
d["x"] = d["x"] + dx
d["y"] = d["y"] + dy
d["z"] = d["z"] + dz
return d
else:
return None
def draw_point( point ):
"""Projette un point donné en coordonnées 3D sur une image (2D, par définition)."""
# Si le point n'est pas en dehors de la forme (ce qui peut arriver si on dessine un pétale).
if point:
# Calcul le projetté de la coordonné "x" selon la perspective et le recul de la caméra.
# Notez que l'axe "z" est utilisé dans les deux calculs, au profit de "x" et "y".
pX = math.floor( (point["x"] * perspective) / (point["z"] - cameraZ) + width/2 )
pY = math.floor( (point["y"] * perspective) / (point["z"] - cameraZ) + width/2 )
# Coordonnées du pixel dans le calque de superposition.
zbi = (pY,pX)
# Si le pixel n'a jamais été dessiné OU si c'est le cas…
# … mais que sa coordonnée "z" est inférieur au pixel déjà dessiné
# (et est donc plus proche de la caméra).
if not zBuffer.has_key(zbi) or point["z"] < zBuffer[zbi]:
# On garde en mémoire le pixel dessiné dans le calque de superposition.
zBuffer[zbi] = point["z"]
# Dessine le pixel dans l'image.
fill = ( int(point["r"]), int(point["g"]), int(point["b"]) )
draw.point( (int(pX),int(pY)), fill )
import random
# Nombres de points à dessiner
for i in range(90000):
# Valeurs dans [0,1[
a = random.random()
b = random.random()
# Rayons du cœur et des pétals
r_heart = 25
r_petal = 50
# coeur
draw_point( sphere( a, b, r_heart ) )
# pétale du haut
# Les valeurs des déplacements sont arbitraires et dépendent de ce que vous souhaitez faire.
draw_point( move( petal( a,b, r_petal ), 0, -70, 0 ) )
# De même pour les rotations.
# pétale du bas
draw_point( move( rotate_x( petal( a,b, r_petal ), 1.15*math.pi ), -2, 141, -10 ) )
# pétale de gauche
draw_point( move( rotate_z( rotate_x( petal( a,b, r_petal ), -0.3*math.pi ), math.pi/6 ), -50, 10, 25 ) )
# pétale de droite
draw_point( move( rotate_z( rotate_x( petal( a,b, r_petal ), -0.3*math.pi ), -math.pi/6 ), 60, 55, 25 ) )
# tige
draw_point( move( rotate_x( cylinder( a,b, r_heart/4, 400 ), math.pi/2 ), 55, 250, 250 ) )
# Écris l'image dans un fichier au format « Portable Network Graphics », compressé sans perte.
im.save("paquerette.png", "PNG")
| [
"[email protected]"
] | |
621037ff80da3805acd5ade5aed482567c5cf97d | ffa402b321ad11b79ba977149b563c73f5c141a9 | /examples/taint/spec.py | c9c8de368b1ada24b77fe905c6e055b93c12779f | [] | no_license | uw-unsat/nickel | 151bfa76ba0e75fbc710eed68cbed830f9dbad85 | 8439aedd75b48ebf9b5344eebfc2a0c70b4eb73d | refs/heads/master | 2021-07-23T11:08:50.663555 | 2020-03-16T23:53:14 | 2020-03-17T05:29:51 | 247,836,078 | 2 | 0 | null | 2021-04-20T19:26:43 | 2020-03-16T23:24:37 | C | UTF-8 | Python | false | false | 1,517 | py | import z3
import errno
import libirpy.util as util
import datatypes as dt
def sys_get(old):
cond = util.Cases(
default=old.procs[old.current].value
)
return cond, old
def sys_get_level(old):
cond = util.Cases(
default=util.If(old.procs[old.current].level == dt.TAINTED, util.i64(1), util.i64(0))
)
return cond, old
def sys_send(old, dst, value):
src = old.current
cond = util.Cases(
# a real system should validate dst
# removing this check will break NI
(z3.And(old.procs[src].level == dt.TAINTED,
old.procs[dst].level == dt.UNTAINTED), -errno.EINVAL),
default=util.i64(0),
)
new = old.copy()
new.procs[dst].value = value
return cond, util.If(cond.cond(), old, new)
def sys_send_floating(old, dst, value):
src = old.current
cond = util.Cases(
default=util.i64(0),
)
new = old.copy()
new.procs[dst].value = value
# bump dst level to tainted from untainted if current is tainted
new.procs[dst].level = util.If(
z3.And(
old.procs[src].level == dt.TAINTED,
old.procs[dst].level == dt.UNTAINTED
),
dt.TAINTED,
old.procs[dst].level
)
return cond, util.If(cond.cond(), old, new)
def sys_raise(old):
cond = util.Cases(
default=util.i64(0)
)
new = old.copy()
# change to dt.UNTAINTED will break NI
new.procs[new.current].level = dt.TAINTED
return cond, new
| [
"[email protected]"
] | |
3fda8b094dee0a7b6f1190ab84ed0c644134fbe0 | f96f6b0a833699eecdfce07d28ae680532e49dc4 | /train.py | f98b6777dbab7c7b088137ea90061755f25d8c53 | [] | no_license | wangyan841331749/Cats_vs_Dogs | cdfb234605a4493a085b6bc251159e55f37b2b57 | 4d77a4f34d2667e2d678acfea1ab62c5ac12704e | refs/heads/master | 2022-01-17T11:36:19.965026 | 2019-07-18T07:08:51 | 2019-07-18T07:08:51 | 197,531,871 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,066 | py | import time
from load_data import *
from model import *
# 训练模型
def training():
N_CLASSES = 2
IMG_SIZE = 208
BATCH_SIZE = 10
CAPACITY = 200
MAX_STEP = 10000
LEARNING_RATE = 1e-4
# 测试图片读取
image_dir = 'data/train'
logs_dir = 'logs_1' # 检查点保存路径
sess = tf.Session()
train_list = get_all_files(image_dir, True)
image_train_batch, label_train_batch = get_batch(train_list, IMG_SIZE, BATCH_SIZE, CAPACITY, True)
train_logits = inference(image_train_batch, N_CLASSES)
train_loss = losses(train_logits, label_train_batch)
train_acc = evaluation(train_logits, label_train_batch)
train_op = tf.train.AdamOptimizer(LEARNING_RATE).minimize(train_loss)
var_list = tf.trainable_variables()
paras_count = tf.reduce_sum([tf.reduce_prod(v.shape) for v in var_list])
print('参数数目:%d' % sess.run(paras_count), end='\n\n')
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
s_t = time.time()
try:
for step in range(MAX_STEP):
if coord.should_stop():
break
_, loss, acc = sess.run([train_op, train_loss, train_acc])
if step % 100 == 0: # 实时记录训练过程并显示
runtime = time.time() - s_t
print('Step: %6d, loss: %.8f, accuracy: %.2f%%, time:%.2fs, time left: %.2fhours'
% (step, loss, acc * 100, runtime, (MAX_STEP - step) * runtime / 360000))
s_t = time.time()
if step % 1000 == 0 or step == MAX_STEP - 1: # 保存检查点
checkpoint_path = os.path.join(logs_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
except tf.errors.OutOfRangeError:
print('Done.')
finally:
coord.request_stop()
coord.join(threads=threads)
sess.close()
if __name__ == '__main__':
training()
| [
"[email protected]"
] | |
b7d52baf5895acc3d0840eb2a3c78d0501ff2381 | d328c348fa5c4d14272a4fc7be0ead9c74704d41 | /Petrinetze/excercisedolly.py | 4782d86a5a9ca10d06ae12baaa77f0dbf6918619 | [] | no_license | Patminton/Projektarbeit | 5ab00222442ea30592105754010f63cbaeca5f09 | 51c413cddd8c377132663cec6a251d6325a55fa2 | refs/heads/main | 2023-05-28T18:03:07.477089 | 2021-06-11T16:17:45 | 2021-06-11T16:17:45 | 332,723,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,629 | py | import snakes.plugins
from snakes.nets import *
snakes.plugins.load("gv", "snakes.nets", "nets")
n = PetriNet("N")
n.add_place(Place("Bestellung", [3]))
n.add_place(Place("Kran", [1]))
n.add_place(Place("Bauteile", [2]))
n.add_place(Place("freie Dollys"))
n.add_place(Place("volle Dollys"))
n.add_place(Place("Dolly in Presse", [2]))
n.add_place(Place("Kapazität Presse", [1]))
bestuecken = Transition("bestuecken")
dolly_entladen = Transition("Dolly Entladen")
in_presse_fahren = Transition("In Presse fahren")
n.add_transition(bestuecken)
n.add_transition(dolly_entladen)
n.add_transition(in_presse_fahren)
n.add_input("Bestellung", "bestuecken", Variable("x"))
n.add_input("Bauteile", "bestuecken", Variable("t"))
n.add_input("freie Dollys", "bestuecken", Variable("d"))
n.add_input("Kran", "bestuecken", Variable("k"))
n.add_input("Kran", "Dolly Entladen", Variable("k"))
n.add_input("volle Dollys", "In Presse fahren", Variable("d"))
n.add_input("Dolly in Presse", "Dolly Entladen", Variable("d"))
n.add_input("Kapazität Presse", "In Presse fahren", Variable("kapazitat"))
n.add_output("Kran", "bestuecken", Expression("k"))
n.add_output("volle Dollys", "bestuecken", Expression("d"))
n.add_output("Kran", "Dolly Entladen", Expression("k"))
n.add_output("freie Dollys", "Dolly Entladen", Expression("d"))
n.add_output("Kapazität Presse", "In Presse fahren", Variable("kapazitat"))
n.add_output("Dolly in Presse", "In Presse fahren", Variable("d"))
for engine in ("neato", "dot", "circo", "twopi", "fdp"):
n.draw('_test-gv-%s.png' % engine, engine=engine)
s = StateGraph(n)
s.build()
s.draw('_test-gv-graph.png')
| [
"[email protected]"
] | |
0bc13510485468d6d5279a6f4ad60543485096c0 | 99ed9567e6308022389f882304ceda5f31e18b86 | /study-note/python相关学习记录/pickle-test.py | 2ecfad985f60435ad9810b516f79dabb344ac288 | [] | no_license | Lsn800/Paper | d700f8be5e1a4971440184bb76973dd94309dd23 | e564f3e1fbf216b6e73892b4021c1130dc1f7015 | refs/heads/master | 2023-03-21T00:14:53.504885 | 2020-03-17T10:48:12 | 2020-03-17T10:48:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | # -*- coding: utf-8 -*-
import pickle
def p():#将字典转换为pickle存储
sss = {'1':'a','2':'b','3':'c'}
#使用dump()将数据序列化到文件中
fw = open('dataFile.txt','wb')
pickle.dump(sss,fw)
fw.close()
def ss():#将pickle文件中存储的数据还原出来
fr = open('dataFile.txt','rb')
data = pickle.load(fr)
print(data)
if __name__ == "__main__":
p() #pickel 化
ss() #逆 pickle 化 | [
"[email protected]"
] | |
b917646bb432dc0d2a45523b30c766eda00a592f | 84dde7692f4a7155c28d2517ea9ee3342263856d | /utils/dataset.py | c7079978217f004fa0b56d3735e78bb14cecd90d | [] | no_license | cyins/YOLOv5-SGBM | c4e26db1e66865909cd6a4f05857d24428518a52 | 8280136ca7357a37aa52be511cf1603050ea59f1 | refs/heads/master | 2023-08-22T02:39:32.767267 | 2021-07-26T01:38:39 | 2021-07-26T01:38:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,612 | py | '''
Author : Noah
Date : 20210408
function: Load data to input to the model
'''
import os,sys,logging,glob,time,queue
from pathlib import Path
from itertools import repeat
from multiprocessing.pool import ThreadPool
from threading import Thread
import threading
import numpy as np
from utils.general import confirm_dir,timethis,calib_type
import cv2
img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp'] # acceptable image suffixes
vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes
class DATASET_NAMES():
"""
@description :pre define the object class name for object detection
---------
@function : None
-------
"""
voc_names = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog',
'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
coco_names = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
'hair drier', 'toothbrush']
masks_names = ['mask','nomask']
voc_split_names = ['bottle','chair','diningtable','person','pottedplant','sofa','tvmonitor']
coco_split_names = ['person','sports ball','bottle','cup','chair','potted plant','cell phone', 'book']
name_color = [[246, 252, 48],[38, 9, 129],[235, 204, 85],[51, 148, 36],[68, 154, 71],[77, 204, 64],[142, 183, 11],
[76, 224, 194],[62, 211, 108],[87, 182, 84],[217, 236, 51],[83, 160, 30],[136, 38, 28],[157, 71, 128],
[166, 144, 72],[142, 82, 203],[161, 110, 0],[179, 75, 107],[241, 31, 58],[188, 179, 151],[6, 141, 72],
[34, 65, 134],[248, 200, 119],[98, 14, 74],[108, 42, 45],[65, 253, 19],[41, 70, 255],[72, 54, 7],
[86, 8, 97],[106, 129, 218],[59, 147, 175],[234, 40, 195],[92, 42, 230],[236, 173, 62],[144, 190, 177],
[18, 181, 241],[247, 59, 100],[212, 181, 95],[143, 117, 204],[30, 46, 171],[86, 254, 78],[82, 124, 249],
[142, 236, 83],[193, 223, 226],[198, 202, 19],[101, 171, 24],[212, 147, 16],[55, 73, 49],[104, 91, 136],
[205, 89, 132],[42, 103, 28],[109, 60, 150],[250, 216, 158],[211, 132, 120],[188, 40, 169],[92, 12, 162],
[107, 64, 221],[149, 174, 193],[126, 54, 154],[88, 107, 46],[115, 128, 33],[73, 202, 252],[1, 224, 125],
[9, 55, 163],[66, 145, 204],[61, 248, 181],[220, 238, 17],[53, 26, 250],[162, 156, 200],[240, 117, 64],
[53, 65, 194],[17, 146, 93],[197, 199, 158],[64, 54, 35],[188, 183, 177],[206, 17, 174],[34, 155, 144],
[142, 123, 110],[211, 17, 89],[54, 38, 67]]
class pipeline:
"""
@description : a data pipeline shared by multiple threads
---------
@function : send images, timestamps and valid signal
-------
"""
def __init__(self,width=2560,height=960):
self.timestamp=0.
self.frame=0
self.valid = False
self.lock = threading.Lock()
def put(self,timestamp,img0,frame):
with self.lock:
self.timestamp = timestamp
self.frame = frame
self.image = img0
self.valid = True
def get(self):
with self.lock:
timestamp=self.timestamp
img0=self.image
frame = self.frame
valid = self.valid
self.valid = False
return timestamp,img0,frame,valid
class loadfiles:
"""
@description : load iamge or video file(s) and create a iterator
---------
@function :
-------
"""
def __init__(self, path='', img_size=640, save_path=''):
p = str(Path(path).absolute()) # os-agnostic absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
try:
files = sorted(glob.glob(os.path.join(p, '*.*')), key=lambda x: int(os.path.basename(x).split('.')[0])) # dir
except:
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception('ERROR: %s does not exist'%p) #cp3.5
images = [x for x in files if x.split('.')[-1].lower() in img_formats]
videos = [x for x in files if x.split('.')[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.files = images + videos
self.vid_file_path = os.path.join(save_path,'video')
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'image'
self.writer = None #debug function
self.file_name = 'Orign'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, 'Supported formats are:\nimages: %s\nvideos: %s'%(img_formats,vid_formats) #cp3.5
# 'Supported formats are:\nimages: {img_formats}\nvideos: {vid_formats}' #cp3.6
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if isinstance(self.writer, cv2.VideoWriter):
self.writer.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.file_name = path+'_'+self.frame
self.frame += 1
# print('video %d/%d (%d/%d) %s: '%(self.count + 1,self.nf,self.frame,self.nframes,path), end='') #cp3.5
else:
# Read image
self.count += 1
self.mode = 'image'
img0 = cv2.imread(path) # BGR
self.file_name = os.path.split(path)[-1]
assert img0 is not None, 'Image Not Found ' + path
# print('========================new image========================')
# print('image %d/%d %s: '%(self.count, self.nf, path), end='\n') #cp3.5
# Padded resize
TimeStamp = str(time.time()).split('.')
if len(TimeStamp[1])<9:
for i in range(9-len(TimeStamp[1])):
TimeStamp[1] += '0'
h = img0.shape[0]
w = img0.shape[1]
w1 = round(w/2)
img0_left = img0[:,:w1,:]
img0_right = img0[:,w1:,:]
return path, img0_left, img0_right, (h,w1), TimeStamp, self.cap
def get_vid_dir(self,path):
self.vid_file_path = path
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
# self.file_path = '/home/bynav/AI_SGBM/runs/detect/exp/video'
if not os.path.isdir(self.vid_file_path):
os.mkdir(self.vid_file_path)
save_path = os.path.join(self.vid_file_path, str(path.split('/')[-1].split('.')[0])+'.avi')
fps = self.cap.get(cv2.CAP_PROP_FPS)
fourcc = 'mp4v' # output video codec
w = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)/2)
h = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h))
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class loadcam:
"""
@description : load real-time webcam data and create a iterator
---------
@function :
---------
"""
# @timethis
def __init__(self, pipe='4', cam_freq=5, img_size=640, save_path='', debug=False, cam_mode=1):
self.img_size = img_size
if pipe.isnumeric():
pipe = eval(pipe) # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:[email protected]/1' # IP camera with login
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
self.debug = debug
self.pipe = pipe
self.time = 0
self.writer = None
self.cap = cv2.VideoCapture(pipe) # video capture object
if cam_mode == calib_type.AR0135_1280_960.value or cam_mode == calib_type.AR0135_416_416.value or cam_mode == calib_type.AR0135_640_640.value:
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH,2560)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT,960) #AR0135
elif cam_mode == calib_type.AR0135_640_480.value:
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH,1280)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT,480) #AR0135
else:
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH,2560)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT,720) #OV9714
self.pipeline = pipeline(int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)),int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
self.cam_freq = cam_freq
self.fps = self.cap.get(cv2.CAP_PROP_FPS)
self.size = (self.cap.get(cv2.CAP_PROP_FRAME_WIDTH),self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# self.queue = queue.LifoQueue(maxsize=self.fps)
bufsize = 1
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, bufsize) # set buffer size
print('Camera run under %s and %s fps'%(str(self.size),str(self.fps)))
self.vid_file_path = confirm_dir(save_path,'webcam')
self.img_file_path = confirm_dir(save_path,'webimg')
self.new_video('test.avi')
self.mode = 'webcam'
self.count = 0
self.frame = 0
self.real_frame = 0
self.valid = False
self.start = False
self.thread = Thread(target=self._update,args=[],daemon=True)
self.thread.start()
def _update(self):
while True:
self.real_frame += 1
TimeStamp = time.time()-0.044 #cv2.cap.read() average latency is 290ms
# Read frame
if self.pipe in [0,1,2,3,4,5]: # local camera
ret_val, img0 = self.cap.read()
# self.valid = True
# img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
assert ret_val, 'Camera Error %d'%self.pipe #cp3.5
self.pipeline.put(TimeStamp,img0,self.real_frame)
self.start = True
def __iter__(self):
return self
# @timethis
def __next__(self):
runtime = time.time() - self.time
if runtime < 1/self.cam_freq:
time.sleep(round(1/self.cam_freq-runtime,3))
while True:
if self.start:
TimeStamp,img0,self.frame,self.valid = self.pipeline.get()
if self.valid:
break
# print('========================= webcam %d ======================='%self.frame,end='\r') #cp3.5
TimeStamp = str(TimeStamp).split('.')
if len(TimeStamp[1])<9:
for i in range(9-len(TimeStamp[1])):
TimeStamp[1] += '0'
w = img0.shape[1]
w1 = int(w/2)
if self.debug:
save_file = os.path.join(self.img_file_path,(str(self.frame)+'.bmp'))
cv2.imwrite(save_file,img0)
imgl = img0[:,:w1,:]
imgr = img0[:,w1:,:]
self.count += 1
img_path = 'webcam.jpg'
self.time = time.time()
return img_path, imgl, imgr, None, TimeStamp, None
def get_vid_dir(self,path):
self.vid_file_path = path
def new_video(self, path):
if isinstance(self.writer, cv2.VideoWriter):
self.writer.release()
fps = self.cap.get(cv2.CAP_PROP_FPS)
fourcc = 'mp4v' # output video codec
w = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)/2)
h = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
save_path = os.path.join(self.vid_file_path, path)
self.writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h))
def __len__(self):
return 0
| [
"[email protected]"
] | |
501ce999fd6452c28544240627deb50e62312876 | fce83f1b55b8894afab9eb58ae8b4ba2e26eb86b | /examples/GAN/DCGAN.py | e9df6b36319476aea07fd240e26005c998a75385 | [
"Apache-2.0"
] | permissive | PeisenZhao/tensorpack | b65d451f6d4a7fe1af1e183bdc921c912f087586 | 6ca57de47e4a76b57c8aa2f0dad87c1059c13ac0 | refs/heads/master | 2021-05-05T01:46:05.209522 | 2018-01-31T05:29:37 | 2018-01-31T05:29:37 | 119,641,372 | 1 | 0 | null | 2018-01-31T05:52:07 | 2018-01-31T05:52:06 | null | UTF-8 | Python | false | false | 5,554 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: DCGAN.py
# Author: Yuxin Wu <[email protected]>
import glob
import numpy as np
import os
import argparse
from tensorpack import *
from tensorpack.utils.viz import stack_patches
from tensorpack.tfutils.scope_utils import auto_reuse_variable_scope
from tensorpack.utils.globvars import globalns as opt
import tensorflow as tf
from GAN import GANTrainer, RandomZData, GANModelDesc
"""
1. Download the 'aligned&cropped' version of CelebA dataset
from http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html
2. Start training:
./DCGAN-CelebA.py --data /path/to/img_align_celeba/ --crop-size 140
Generated samples will be available through tensorboard
3. Visualize samples with an existing model:
./DCGAN-CelebA.py --load path/to/model --sample
You can also train on other images (just use any directory of jpg files in
`--data`). But you may need to change the preprocessing.
A pretrained model on CelebA is at http://models.tensorpack.com/GAN/
"""
# global vars
opt.SHAPE = 64
opt.BATCH = 128
opt.Z_DIM = 100
class Model(GANModelDesc):
def _get_inputs(self):
return [InputDesc(tf.float32, (None, opt.SHAPE, opt.SHAPE, 3), 'input')]
def generator(self, z):
""" return an image generated from z"""
nf = 64
l = FullyConnected('fc0', z, nf * 8 * 4 * 4, nl=tf.identity)
l = tf.reshape(l, [-1, 4, 4, nf * 8])
l = BNReLU(l)
with argscope(Deconv2D, nl=BNReLU, kernel_shape=4, stride=2):
l = Deconv2D('deconv1', l, nf * 4)
l = Deconv2D('deconv2', l, nf * 2)
l = Deconv2D('deconv3', l, nf)
l = Deconv2D('deconv4', l, 3, nl=tf.identity)
l = tf.tanh(l, name='gen')
return l
@auto_reuse_variable_scope
def discriminator(self, imgs):
""" return a (b, 1) logits"""
nf = 64
with argscope(Conv2D, nl=tf.identity, kernel_shape=4, stride=2):
l = (LinearWrap(imgs)
.Conv2D('conv0', nf, nl=tf.nn.leaky_relu)
.Conv2D('conv1', nf * 2)
.BatchNorm('bn1')
.tf.nn.leaky_relu()
.Conv2D('conv2', nf * 4)
.BatchNorm('bn2')
.tf.nn.leaky_relu()
.Conv2D('conv3', nf * 8)
.BatchNorm('bn3')
.tf.nn.leaky_relu()
.FullyConnected('fct', 1, nl=tf.identity)())
return l
def _build_graph(self, inputs):
image_pos = inputs[0]
image_pos = image_pos / 128.0 - 1
z = tf.random_uniform([opt.BATCH, opt.Z_DIM], -1, 1, name='z_train')
z = tf.placeholder_with_default(z, [None, opt.Z_DIM], name='z')
with argscope([Conv2D, Deconv2D, FullyConnected],
W_init=tf.truncated_normal_initializer(stddev=0.02)):
with tf.variable_scope('gen'):
image_gen = self.generator(z)
tf.summary.image('generated-samples', image_gen, max_outputs=30)
with tf.variable_scope('discrim'):
vecpos = self.discriminator(image_pos)
vecneg = self.discriminator(image_gen)
self.build_losses(vecpos, vecneg)
self.collect_variables()
def _get_optimizer(self):
lr = tf.get_variable('learning_rate', initializer=2e-4, trainable=False)
return tf.train.AdamOptimizer(lr, beta1=0.5, epsilon=1e-3)
def get_augmentors():
augs = []
if opt.load_size:
augs.append(imgaug.Resize(opt.load_size))
if opt.crop_size:
augs.append(imgaug.CenterCrop(opt.crop_size))
augs.append(imgaug.Resize(opt.SHAPE))
return augs
def get_data(datadir):
imgs = glob.glob(datadir + '/*.jpg')
ds = ImageFromFile(imgs, channel=3, shuffle=True)
ds = AugmentImageComponent(ds, get_augmentors())
ds = BatchData(ds, opt.BATCH)
ds = PrefetchDataZMQ(ds, 5)
return ds
def sample(model, model_path, output_name='gen/gen'):
pred = PredictConfig(
session_init=get_model_loader(model_path),
model=model,
input_names=['z'],
output_names=[output_name, 'z'])
pred = SimpleDatasetPredictor(pred, RandomZData((100, opt.Z_DIM)))
for o in pred.get_result():
o = o[0] + 1
o = o * 128.0
o = np.clip(o, 0, 255)
o = o[:, :, :, ::-1]
stack_patches(o, nr_row=10, nr_col=10, viz=True)
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
parser.add_argument('--load', help='load model')
parser.add_argument('--sample', action='store_true', help='view generated examples')
parser.add_argument('--data', help='a jpeg directory')
parser.add_argument('--load-size', help='size to load the original images', type=int)
parser.add_argument('--crop-size', help='crop the original images', type=int)
args = parser.parse_args()
opt.use_argument(args)
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
return args
if __name__ == '__main__':
args = get_args()
if args.sample:
sample(Model(), args.load)
else:
assert args.data
logger.auto_set_dir()
GANTrainer(
input=QueueInput(get_data(args.data)),
model=Model()).train_with_defaults(
callbacks=[ModelSaver()],
steps_per_epoch=300,
max_epoch=200,
session_init=SaverRestore(args.load) if args.load else None
)
| [
"[email protected]"
] | |
cbba52bd311c263ff7f9db112294be548e2b79a3 | 5a5067ef89e0d0e618fa00cbf037eda29df64135 | /env/bin/python-config | 3f06b82c5f1dff2454094cb77d79598e225c80c9 | [] | no_license | leandrovaladao/TddTraining | 8bee73480000305afd21053d0cde633d557b6183 | 268a69495430642a676ec12f58596e14ca9f0004 | refs/heads/master | 2020-03-20T06:40:04.839541 | 2018-06-13T18:38:04 | 2018-06-13T18:38:04 | 137,256,107 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,347 | #!/home/ezvoice/TddTraining/env/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
| [
"[email protected]"
] | ||
483db4217063a3ca57b15543b683ac4c80e5c4ee | d47ed9550468b360a54a4fe8e3cfe63bf15c1ddb | /ProgrammingPython/Preview/initdata.py | c53957abef9ca1ef665ef5ac0b238760cdf7d094 | [] | no_license | yuzuqiang/learnPython | 880bb1e5d40a7a4d928068ae892e9b85f84ec355 | 490c8ccf3a61b967641ec47e2f549574fe9a2fa0 | refs/heads/master | 2021-01-13T03:47:36.611964 | 2017-01-01T18:24:23 | 2017-01-01T18:24:23 | 77,197,546 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 356 | py | #Record
bob = {'name': 'Bob Smith', 'age': 42, 'pay': 30000, 'job': 'dev'}
sue = {'name': 'Sue Jones', 'age': 45, 'pay': 40000, 'job': 'hdw'}
tom = {'name': 'Tom', 'age': 50, 'pay': 0, 'job': None}
#Database
db = {}
db['bob'] = bob
db['sue'] = sue
db['tom'] = tom
#As shell run
if __name__=='__main__':
for key in db:
print(key, '=>\n', db[key])
| [
"[email protected]"
] | |
c11fb072314ba1347f7220d63fc2c4cb07bfa3ed | 6e808ac510567907cb1069bd85bbebdcb2ed3c05 | /old/breed.py | 19c979abfe71a1cf3e13c9643deb4c50cebeee34 | [
"MIT"
] | permissive | lesterwilliam/neural | ba0c30fde27ed9800f37f71f32611990919a349c | a9fd081fee0c969359476d447015712a60529187 | refs/heads/master | 2020-04-11T13:25:28.537273 | 2019-01-23T13:11:34 | 2019-01-23T13:11:34 | 161,815,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 573 | py | # prototype
import random
import numpy as np
# test data
#parentA = [0,0,9,0,0,0,0,0,1,5,8,7,9,5]
#parentB = [1,1,1,1,1,1,1,1,5,9,4,8,5,5]
# Mixes genes from parents and returns created child
def breedChild(parentA, parentB):
if len(parentA) != len(parentB):
return 0
child = np.zeros((len(parentA),1))
for item in range(len(parentA)):
if (int(100 * random.random()) < 50):
child[item] = parentA[item]
else:
child[item] = parentB[item]
return child
def breedChildren(parents, number_of_child):
nextPopulation = []
#print (breedChild(parentA, parentB)) | [
"[email protected]"
] | |
0949cfa4f0046cc18f1408130d1843880b0d6b73 | 47bc9db111566405c916683c9de9dd81ab905dc6 | /app/forms.py | a2834252f84c7d50cb183d7427d6e5369ea101c7 | [] | no_license | fjb930822/blogs | 18786578ca160a3fb415c53984cad40360ec4caf | 198b1e0c0814a74f65036398e57a2489e2bb2439 | refs/heads/master | 2023-02-18T14:54:18.468594 | 2021-01-22T07:57:32 | 2021-01-22T07:57:32 | 327,336,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 484 | py | # coding: utf-8
# Author: [email protected]
# Creadted Time: 2021/1/6 17:54
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import DataRequired
class LoginForm(FlaskForm):
username = StringField("Username", validators=[DataRequired()])
passworde =PasswordField("Password", validators=[DataRequired()])
remember_me = BooleanField("Remeber Me")
submit = SubmitField("Sign in")
| [
"[email protected]"
] | |
86c2d4bce79ae370a56b5625bbf3d57ab84635f1 | e508cf8db6bc3626ee412f8bbbac20aee4103def | /mysite/settings.py | ccecb5befde17aa93032b49dd00c92678f3d53a7 | [] | no_license | dennis-zhong/my-first-blog | 53829fb04927ff766154541af21b0eaf1fc69975 | 80f62359b7c75b3841883059affc345b6073de57 | refs/heads/master | 2023-06-16T17:35:51.072793 | 2021-07-14T00:48:59 | 2021-07-14T00:48:59 | 383,627,645 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,191 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.2.24.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '6)b-=*!9py2*l1k3izs(r4rpprwwwgg$#g)o@$4e+ciyakl!!l'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog.apps.BlogConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Los_Angeles'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"[email protected]"
] | |
9e6d61a3743d70fc652f40ee1dce7897a9019284 | 0000c8f4a481000676463f81d55c2ea21862cbd5 | /not-yet-done-examples.py | d2d8e2a9c2d8f5293eea6153628712f8ddbc0858 | [] | no_license | robertej19/813 | b5ca9b51504e002189861bc0e1230bd43c5f6005 | f1417f05e9d08d5693d6ecd8363d1dd7552d2e12 | refs/heads/master | 2022-12-18T14:36:26.644424 | 2020-09-21T13:58:06 | 2020-09-21T13:58:06 | 292,097,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | Here is my code for doing the fit and plotting:
8:51
popt, pcov = curve_fit(gauss, xval, yval, sigma=yerror,p0 = [100, 3300, 140],absolute_sigma=False)
xx = np.arange(xmin,xmax)
plt.plot(xx, gauss(xx, *popt), label='fit')
One line method to load a CSV data file into python with numpy
import numpy as np
data=[*zip(*np.genfromtxt('cubeData.csv',delimiter=','))] | [
"[email protected]"
] | |
8fe491ed92ae2422c3f08061149a7dfc244cc882 | 8848d3b74511e918a5d0c49480f500d4084197f6 | /Pizzer/modulo_autenticacao/models.py | 13f27ee077120b99a3f5ab52ee756aa60d8bf147 | [] | no_license | giulyflash/mozzarella | 6574bf776e602f36942cc7c430e4609835adb25c | 7ab077895c32e0fa87c758489749e00eea36dacc | refs/heads/master | 2021-01-10T11:21:31.780188 | 2010-06-09T01:49:45 | 2010-06-09T01:49:45 | 53,130,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,937 | py | # -*- coding: utf-8 -*-
from django.contrib.auth.models import User, Group
from django import forms
from django.forms.widgets import PasswordInput
class UserCreateForm(forms.ModelForm):
password = forms.CharField(widget=PasswordInput)
password_r = forms.CharField(widget=PasswordInput)
class Meta:
model = User
fields = ('username', 'email')
def clean(self):
cleaned_data = self.cleaned_data
username= cleaned_data.get('username')
password = cleaned_data.get('password')
password_r = cleaned_data.get('password_r')
if password and password_r:
if password != password_r:
raise forms.ValidationError('A senha e a senha repetida estão diferentes.')
if username:
if User.objects.filter(username=username):
raise forms.ValidationError('O nome de usuário escolhido não está disponível.')
return cleaned_data
class UserEditForm(forms.ModelForm):
nova_senha = forms.CharField(max_length=15)
class Meta:
model = User
fields = ('username', 'email', 'password')
exclude = ['password']
class UserChangePassForm(forms.Form):
password = forms.CharField(widget=PasswordInput)
password_r = forms.CharField(widget=PasswordInput)
def clean(self):
cleaned_data = self.cleaned_data
username= cleaned_data.get('username')
password = cleaned_data.get('password')
password_r = cleaned_data.get('password_r')
if password and password_r:
if password != password_r:
raise forms.ValidationError('A senha e a senha repetida estão diferentes.')
if username:
if User.objects.filter(username=username):
raise forms.ValidationError('O nome de usuário escolhido não está disponível.')
return cleaned_data | [
"dmail07@8169190b-fedb-79aa-6a5a-a987b9c8a9c4"
] | dmail07@8169190b-fedb-79aa-6a5a-a987b9c8a9c4 |
18a1ef9adc1cffb62a94ab625de750a18568e630 | ea544b339809095d2c383b542248f530990c31d5 | /env/lib/python3.6/site-packages/pip/_vendor/html5lib/treewalkers/base.py | ba04ae2bb9cec5cf9fc1e3ea2a220624ca47aea1 | [
"BSD-3-Clause"
] | permissive | 724686158/NosqlEXP3 | 5fab1a9e131c6936b5b61e0f1c86eea2c889294a | e29f2807f075831377456b47cf8c9ce0c8d65c30 | refs/heads/master | 2020-04-09T01:40:54.370782 | 2019-01-25T13:04:04 | 2019-01-25T13:04:04 | 159,912,368 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,476 | py | from __future__ import absolute_import, division, unicode_literals
from xml.dom import Node
from ..constants import namespaces, voidElements, spaceCharacters
__all__ = ["DOCUMENT", "DOCTYPE", "TEXT", "ELEMENT", "COMMENT", "ENTITY", "UNKNOWN",
"TreeWalker", "NonRecursiveTreeWalker"]
DOCUMENT = Node.DOCUMENT_NODE
DOCTYPE = Node.DOCUMENT_TYPE_NODE
TEXT = Node.TEXT_NODE
ELEMENT = Node.ELEMENT_NODE
COMMENT = Node.COMMENT_NODE
ENTITY = Node.ENTITY_NODE
UNKNOWN = "<#UNKNOWN#>"
spaceCharacters = "".join(spaceCharacters)
class TreeWalker(object):
"""Walks a tree yielding tokens
Tokens are dicts that all have a ``type`` field specifying the type of the
token.
"""
def __init__(self, tree):
"""Creates a TreeWalker
:arg tree: the tree to walk
"""
self.tree = tree
def __iter__(self):
raise NotImplementedError
def error(self, msg):
"""Generates an error token with the given message
:arg msg: the error message
:returns: SerializeError token
"""
return {"type": "SerializeError", "data": msg}
def emptyTag(self, namespace, name, attrs, hasChildren=False):
"""Generates an EmptyTag token
:arg namespace: the namespace of the token--can be ``None``
:arg name: the name of the element
:arg attrs: the attributes of the element as a dict
:arg hasChildren: whether or not to yield a SerializationError because
this tag shouldn't have children
:returns: EmptyTag token
"""
yield {"type": "EmptyTag", "name": name,
"namespace": namespace,
"data": attrs}
if hasChildren:
yield self.error("Void element has children")
def startTag(self, namespace, name, attrs):
"""Generates a StartTag token
:arg namespace: the namespace of the token--can be ``None``
:arg name: the name of the element
:arg attrs: the attributes of the element as a dict
:returns: StartTag token
"""
return {"type": "StartTag",
"name": name,
"namespace": namespace,
"data": attrs}
def endTag(self, namespace, name):
"""Generates an EndTag token
:arg namespace: the namespace of the token--can be ``None``
:arg name: the name of the element
:returns: EndTag token
"""
return {"type": "EndTag",
"name": name,
"namespace": namespace}
def text(self, data):
"""Generates SpaceCharacters and Characters tokens
Depending on what's in the data, this generates one or more
``SpaceCharacters`` and ``Characters`` tokens.
For project:
>>> from html5lib.treewalkers.base import TreeWalker
>>> # Give it an empty tree just so it instantiates
>>> walker = TreeWalker([])
>>> list(walker.text(''))
[]
>>> list(walker.text(' '))
[{u'data': ' ', u'type': u'SpaceCharacters'}]
>>> list(walker.text(' abc ')) # doctest: +NORMALIZE_WHITESPACE
[{u'data': ' ', u'type': u'SpaceCharacters'},
{u'data': u'abc', u'type': u'Characters'},
{u'data': u' ', u'type': u'SpaceCharacters'}]
:arg data: the text data
:returns: one or more ``SpaceCharacters`` and ``Characters`` tokens
"""
data = data
middle = data.lstrip(spaceCharacters)
left = data[:len(data) - len(middle)]
if left:
yield {"type": "SpaceCharacters", "data": left}
data = middle
middle = data.rstrip(spaceCharacters)
right = data[len(middle):]
if middle:
yield {"type": "Characters", "data": middle}
if right:
yield {"type": "SpaceCharacters", "data": right}
def comment(self, data):
"""Generates a Comment token
:arg data: the comment
:returns: Comment token
"""
return {"type": "Comment", "data": data}
def doctype(self, name, publicId=None, systemId=None):
"""Generates a Doctype token
:arg name:
:arg publicId:
:arg systemId:
:returns: the Doctype token
"""
return {"type": "Doctype",
"name": name,
"publicId": publicId,
"systemId": systemId}
def entity(self, name):
"""Generates an Entity token
:arg name: the entity name
:returns: an Entity token
"""
return {"type": "Entity", "name": name}
def unknown(self, nodeType):
"""Handles unknown node types"""
return self.error("Unknown node type: " + nodeType)
class NonRecursiveTreeWalker(TreeWalker):
def getNodeDetails(self, node):
raise NotImplementedError
def getFirstChild(self, node):
raise NotImplementedError
def getNextSibling(self, node):
raise NotImplementedError
def getParentNode(self, node):
raise NotImplementedError
def __iter__(self):
currentNode = self.tree
while currentNode is not None:
details = self.getNodeDetails(currentNode)
type, details = details[0], details[1:]
hasChildren = False
if type == DOCTYPE:
yield self.doctype(*details)
elif type == TEXT:
for token in self.text(*details):
yield token
elif type == ELEMENT:
namespace, name, attributes, hasChildren = details
if (not namespace or namespace == namespaces["html"]) and name in voidElements:
for token in self.emptyTag(namespace, name, attributes,
hasChildren):
yield token
hasChildren = False
else:
yield self.startTag(namespace, name, attributes)
elif type == COMMENT:
yield self.comment(details[0])
elif type == ENTITY:
yield self.entity(details[0])
elif type == DOCUMENT:
hasChildren = True
else:
yield self.unknown(details[0])
if hasChildren:
firstChild = self.getFirstChild(currentNode)
else:
firstChild = None
if firstChild is not None:
currentNode = firstChild
else:
while currentNode is not None:
details = self.getNodeDetails(currentNode)
type, details = details[0], details[1:]
if type == ELEMENT:
namespace, name, attributes, hasChildren = details
if (namespace and namespace != namespaces["html"]) or name not in voidElements:
yield self.endTag(namespace, name)
if self.tree is currentNode:
currentNode = None
break
nextSibling = self.getNextSibling(currentNode)
if nextSibling is not None:
currentNode = nextSibling
break
else:
currentNode = self.getParentNode(currentNode)
| [
"[email protected]"
] | |
f619edee72bba9795ba459de6a8842ce400d1686 | 0e3dc8881b8412e2abc0ffa2b949c0890d9d77ef | /leetcode-notes/easy/DP/70_combining_stairs.py | 0e6f7419bc97e84483717925826cf5a361ee7d9c | [] | no_license | kevinsu628/study-note | 8d8ec55ef43eabc7221ebebb6684f4d42bc961be | 674d8afde62a1cf690a6172dad677b284ba076df | refs/heads/master | 2021-10-06T11:21:18.196879 | 2021-10-05T01:34:59 | 2021-10-05T01:34:59 | 207,233,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 726 | py | '''
70. Climbing Stairs
Easy
2217
81
Favorite
Share
You are climbing a stair case. It takes n steps to reach to the top.
Each time you can either climb 1 or 2 steps. In how many distinct ways can you climb to the top?
Note: Given n will be a positive integer.
Example 1:
Input: 2
Output: 2
Explanation: There are two ways to climb to the top.
1. 1 step + 1 step
2. 2 steps
'''
class Solution(object):
def climbStairs(self, n):
"""
:type n: int
:rtype: int
"""
if n == 1:
return 1
dp = [1, 2] # 1 start has 1 comb, 2 has 2 comb
for i in range(2, n+1):
dp.append(dp[i-1] + dp[i-2])
return dp[n-1] | [
"[email protected]"
] | |
19ab53b9fb8e8ae04ef27d28f4226bc9349430e4 | 3633d36e8fad6c661ad2db46da81f3bcc76c544f | /conandgen/convertors/forms.py | 1d69cdbd9f8d127bc317e4fcc42fc28fc1c72269 | [] | no_license | rpankaj853/Converters-generators | f6cb9ff9acd27ead72a9f6808e6ddee8f62df21c | caa422b6ba6246e89f7cae7fe7c06e8221108ddd | refs/heads/main | 2023-03-10T03:33:31.914267 | 2021-03-04T07:19:12 | 2021-03-04T07:19:12 | 344,382,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 972 | py | from django import forms
class Contactform(forms.Form):
amount = forms.IntegerField()
category1 = forms.ChoiceField(choices=[('INR','INR || INDIA'),('USD','USD || USA'),('EUR','EUR || EURO'),('JPY','JPY || JAPANESE'),('GBP','GBP || BRITISH POUND'),('AUD','AUD || AUSTRALIAN DOLLAR'),('CAD','CAD || CANADIAN DOLLAR'),('SGD','SGD || SINGAPORE DOLLAR'),('CHF','CHF || SWISS FRANC'),('CNY','CNY || CHINESE YUAN RENMINBI ')])
category2 = forms.ChoiceField(choices=[('INR','INR || INDIA'),('USD','USD || USA'),('EUR','EUR || EURO'),('JPY','JPY || JAPANESE'),('GBP','GBP || BRITISH POUND'),('AUD','AUD || AUSTRALIAN DOLLAR'),('CAD','CAD || CANADIAN DOLLAR'),('SGD','SGD || SINGAPORE DOLLAR'),('CHF','CHF || SWISS FRANC'),('CNY','CNY || CHINESE YUAN RENMINBI ')])
# widgets = {
# amount : forms.TextInput(attrs={'class':'form-control'}),
# category1 : forms.Select(attrs={'class':'form-control'}),
# category2 : forms.Select(attrs={'class':'form-control'}),
# } | [
"[email protected]"
] | |
93301ec1206f85f5ae49d4031fa40f3d41d273c6 | 6e6873e7081c72f03c22e059b05dd0127520dd2c | /spiral_order/spiral_order.py | 9e2c6fe009dc3436bdafff0238a5e0d25a908576 | [] | no_license | byd913/leetcode | 53e20259bd9aab89dd0cfaf20bbb7463a079b273 | 4a686dc3507a8a500b774a6e94399101e82e3017 | refs/heads/master | 2020-12-24T18:42:24.933131 | 2019-01-21T15:15:58 | 2019-01-21T15:15:58 | 57,128,649 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,451 | py |
class Solution(object):
def spiralOrder(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: List[int]
"""
result_list = []
height = len(matrix)
if height == 0:
return result_list
width = len(matrix[0])
min_w = min(width, height)
half_w = min_w / 2
if min_w % 2 == 1:
half_w += 1
for k in range(0, half_w):
start_w = k
end_w = width-1-k
start_h = k
end_h = height-1-k
i = start_h
for j in range(start_w, end_w):
result_list.append(matrix[i][j])
j = end_w
for i in range(start_h, end_h):
result_list.append(matrix[i][j])
i = end_h
if end_h > start_h:
for j in range(end_w, start_w, -1):
result_list.append(matrix[i][j])
else:
result_list.append(matrix[i][end_w])
j = start_w
if end_w > start_w:
for i in range(end_h, start_h, -1):
result_list.append(matrix[i][j])
elif end_h > start_h:
result_list.append(matrix[end_h][j])
return result_list
if __name__ == "__main__":
matrix = [[1]]
solution = Solution()
print solution.spiralOrder(matrix)
| [
"[email protected]"
] | |
0acd983de466d61f498fa59db7369e203f1b6137 | 54f97354744cf8a5e3b3fba11200d6c6a4ec16e0 | /tests/ikpy_test.py | eedd0d07870b5a2701d90f7a8fbce5cee317767c | [] | no_license | mikevanis/ColourBodyTracker | 3bdfaf9a0db0ce3e9228b16daf7fa8f74481b774 | f257ff6e8a17e69424e50d34535e1bbf9b404ab2 | refs/heads/master | 2020-03-22T15:38:45.343334 | 2018-09-24T15:31:17 | 2018-09-24T15:31:17 | 140,266,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,820 | py | from ikpy.chain import Chain
import ikpy.geometry_utils as geometry_utils
import matplotlib.pyplot
from mpl_toolkits.mplot3d import Axes3D
from ikpy.link import OriginLink, URDFLink
import numpy as np
left_arm_chain = Chain(name='left_arm', links=[
OriginLink(),
URDFLink(
name="shoulder",
translation_vector=[-0.59, 0, 0.44],
orientation=[0, 0, 0],
rotation=[0, 0, 0],
),
URDFLink(
name="elbow",
translation_vector=[0, 0, -0.53],
orientation=[0, 0, 0],
rotation=[0, 0, 0],
),
URDFLink(
name="wrist",
translation_vector=[0, 0, -0.74],
orientation=[0, 0, 0],
rotation=[0, 0, 0],
)],active_links_mask=[False, True, True, True])
right_arm_chain = Chain(name='right_arm', links=[
OriginLink(),
URDFLink(
name="shoulder",
translation_vector=[0.59, 0, 0.44],
orientation=[0, 0, 0],
rotation=[0, 0, 0],
),
URDFLink(
name="elbow",
translation_vector=[0, 0, -0.53],
orientation=[0, 0, 0],
rotation=[0, 0, 0],
),
URDFLink(
name="wrist",
translation_vector=[0, 0, -0.74],
orientation=[0, 0, 0],
rotation=[0, 0, 0],
)],active_links_mask=[False, True, True, True])
head_chain = Chain(name='head', links=[
OriginLink(),
URDFLink(
name="neck",
translation_vector=[0, 0, 1],
orientation=[0, 0, 0],
rotation=[0, 0, 0],
),
URDFLink(
name="face",
translation_vector=[0, -0.2, 0],
orientation=[0, 0, 0],
rotation=[0, 0, 0],
)], active_links_mask=[False, True, True])
left_leg_chain = Chain(name='left_leg', links=[
OriginLink(),
URDFLink(
name="hip",
translation_vector=[0.26, 0, -1.07],
orientation=[0, 0, 0],
rotation=[0, 0, 0],
),
URDFLink(
name="knee",
translation_vector=[0, 0, -0.98],
orientation=[0, 0, 0],
rotation=[0, 0, 0],
),
URDFLink(
name="ankle",
translation_vector=[0, 0, -1.13],
orientation=[0, 0, 0],
rotation=[0, 0, 0],
)],active_links_mask=[False, True, True, True])
right_leg_chain = Chain(name='right_leg', links=[
OriginLink(),
URDFLink(
name="hip",
translation_vector=[-0.26, 0, -1.07],
orientation=[0, 0, 0],
rotation=[0, 0, 0],
),
URDFLink(
name="knee",
translation_vector=[0, 0, -0.98],
orientation=[0, 0, 0],
rotation=[0, 0, 0],
),
URDFLink(
name="ankle",
translation_vector=[0, 0, -1.13],
orientation=[0, 0, 0],
rotation=[0, 0, 0],
)],active_links_mask=[False, True, True, True])
ax = matplotlib.pyplot.figure().add_subplot(111, projection='3d')
target_vector = [ 0.1, -0.2, 0.1]
l_wrist_matrix = geometry_utils.to_transformation_matrix(target_vector)
target_frame = np.eye(4)
target_frame[:3, 3] = target_vector
left_arm_start_position = left_arm_chain.forward_kinematics([0] * 4)
right_arm_start_position = right_arm_chain.forward_kinematics([0] * 4)
head_start_position = head_chain.forward_kinematics([0] * 3)
left_leg_start_position = left_leg_chain.forward_kinematics([0] * 4)
right_leg_start_position = right_leg_chain.forward_kinematics([0] * 4)
print(right_arm_start_position)
left_arm_chain.plot(left_arm_chain.inverse_kinematics(l_wrist_matrix), ax, target=target_vector)
right_arm_chain.plot(right_arm_chain.inverse_kinematics(right_arm_start_position), ax)
head_chain.plot(head_chain.inverse_kinematics(head_start_position), ax)
left_leg_chain.plot(left_leg_chain.inverse_kinematics(left_leg_start_position), ax)
right_leg_chain.plot(right_leg_chain.inverse_kinematics(right_leg_start_position), ax)
matplotlib.pyplot.show() | [
"[email protected]"
] | |
edcfc0d4e49299c92d30291e455d7e22103cd7dc | 6903225919020d862e7403f94926ab241605083b | /OrderAux.py | c184f51c7b79ee1243bd84ec7271dc923decf3ef | [] | no_license | nandozanutto/TSP-With-restrictions | 602eb0f4aeccdb457027815481994ed6db2c2a10 | 3b78f3af38e9d7eb852763cc713307d56757dc66 | refs/heads/main | 2023-02-10T22:06:08.718813 | 2021-01-02T23:50:49 | 2021-01-02T23:50:49 | 301,243,939 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | def checkOrder(order):
for item in order:
for item2 in order:
if(item == item2):
continue
if(item[0] == item2[1] and item[1] == item2[0]):
return True
return False | [
"[email protected]"
] | |
35889e57cfde9a2d0c586a094664618e9a1813af | 1b23f77f8a615ff563e9b9b6ad0da93dfa3ad8d6 | /afk.py | 8cfe0868c82112813d7cbdacfa94a16983708b6a | [
"MIT"
] | permissive | Dark-PRINCESS/Dark-PRINCESS- | 14a8d7fc81374bfbdc37241c72f7b87d97f32ad5 | 0ad9c67960c8f88745442d264fdcd113b9925807 | refs/heads/main | 2023-01-24T09:16:27.018838 | 2020-11-14T10:52:00 | 2020-11-14T10:52:00 | 306,575,918 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,483 | py | """AFK Plugin for Friday
Syntax: .afk REASON"""
import asyncio
import datetime
from telethon import events
from telethon.tl import functions, types
from userbot.utils import admin_cmd
global USER_AFK # pylint:disable=E0602
global afk_time # pylint:disable=E0602
global last_afk_message # pylint:disable=E0602
USER_AFK = {}
afk_time = None
last_afk_message = {}
@borg.on(events.NewMessage(outgoing=True)) # pylint:disable=E0602
async def set_not_afk(event):
global USER_AFK # pylint:disable=E0602
global afk_time # pylint:disable=E0602
global last_afk_message # pylint:disable=E0602
current_message = event.message.message
if ".afk" not in current_message and "yes" in USER_AFK: # pylint:disable=E0602
try:
await borg.send_message( # pylint:disable=E0602
Config.PLUGIN_CHANNEL, # pylint:disable=E0602
"#AfkLogger My Boss Went Afk"
)
except Exception as e: # pylint:disable=C0103,W0703
await borg.send_message( # pylint:disable=E0602
event.chat_id,
"Please set `PLUGIN_CHANNEL` " + \
"for the proper functioning of afk functionality " + \
"in @FridayOT\n\n `{}`".format(str(e)),
reply_to=event.message.id,
silent=True
)
USER_AFK = {} # pylint:disable=E0602
afk_time = None # pylint:disable=E0602
@borg.on(admin_cmd(pattern=r"afk ?(.*)"))
async def _(event):
if event.fwd_from:
return
global USER_AFK # pylint:disable=E0602
global afk_time # pylint:disable=E0602
global last_afk_message # pylint:disable=E0602
global reason
USER_AFK = {}
afk_time = None
last_afk_message = {}
reason = event.pattern_match.group(1)
if not USER_AFK: # pylint:disable=E0602
last_seen_status = await borg( # pylint:disable=E0602
functions.account.GetPrivacyRequest(
types.InputPrivacyKeyStatusTimestamp()
)
)
if isinstance(last_seen_status.rules, types.PrivacyValueAllowAll):
afk_time = datetime.datetime.now() # pylint:disable=E0602
USER_AFK = f"yes: {reason}" # pylint:disable=E0602
if reason:
await event.edit(f"My Mistress Is Going Afk ! And The Reason is {reason}")
else:
await event.edit(f"My Boss is Going")
await asyncio.sleep(5)
await event.delete()
try:
await borg.send_message( # pylint:disable=E0602
Config.PLUGIN_CHANNEL, # pylint:disable=E0602
f"#AfkLogger Reason : {reason}"
)
except Exception as e: # pylint:disable=C0103,W0703
logger.warn(str(e)) # pylint:disable=E0602
@borg.on(events.NewMessage( # pylint:disable=E0602
incoming=True,
func=lambda e: bool(e.mentioned or e.is_private)
))
async def on_afk(event):
if event.fwd_from:
return
global USER_AFK # pylint:disable=E0602
global afk_time # pylint:disable=E0602
global last_afk_message # pylint:disable=E0602
afk_since = "`a while ago`"
current_message_text = event.message.message.lower()
if "afk" in current_message_text:
# userbot's should not reply to other userbot's
# https://core.telegram.org/bots/faq#why-doesn-39t-my-bot-see-messages-from-other-bots
return False
if USER_AFK and not (await event.get_sender()).bot: # pylint:disable=E0602
if afk_time: # pylint:disable=E0602
now = datetime.datetime.now()
datime_since_afk = now - afk_time # pylint:disable=E0602
time = float(datime_since_afk.seconds)
days = time // (24 * 3600)
time = time % (24 * 3600)
hours = time // 3600
time %= 3600
minutes = time // 60
time %= 60
seconds = time
if days == 1:
afk_since = "**Yesterday**"
elif days > 1:
if days > 6:
date = now + \
datetime.timedelta(
days=-days, hours=-hours, minutes=-minutes)
afk_since = date.strftime("%A, %Y %B %m, %H:%I")
else:
wday = now + datetime.timedelta(days=-days)
afk_since = wday.strftime('%A')
elif hours > 1:
afk_since = f"`{int(hours)}h{int(minutes)}m` **ago**"
elif minutes > 0:
afk_since = f"`{int(minutes)}m{int(seconds)}s` **ago**"
else:
afk_since = f"`{int(seconds)}s` **ago**"
msg = None
message_to_reply = f"**My Boss is AFK** ! \n\n**Reason** : `{reason}` \n\n**Afk Since** : {afk_since}" + \
f"\n\n__Kindly Leave A Message__ ! \n`He Will Reply To You Soon !`" \
if reason \
else f"**Hello, Boss Is AFK Right Now And May Be Forgot List Reason ! Any Way He Will Come Back Soon !**"
msg = await event.reply(message_to_reply)
await asyncio.sleep(5)
if event.chat_id in last_afk_message: # pylint:disable=E0602
await last_afk_message[event.chat_id].delete() # pylint:disable=E0602
last_afk_message[event.chat_id] = msg # pylint:disable=E0602
| [
"[email protected]"
] | |
83744ea1d35b107112afb263efd99bc61cee11ff | 7e75d3349bba67bdee36809c541a867272ccdaec | /pari_dispari.py | 4788d31931dbe0719f90fd47e48826f9948ad72e | [] | no_license | Forz70043/script | a797a984c2b9b1d3592bfd9a24ed3f5b3b190aec | 9352fc2108f219a54164edc961152d40df02ce9e | refs/heads/master | 2021-06-20T07:51:24.973904 | 2021-04-15T10:15:12 | 2021-04-15T10:15:12 | 205,554,437 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py |
n = int(input("Numero:"));
if n % 2 == 0:
print("Il numero", n, "e' pari")
else:
print("Il numero", n, "e' dispari")
| [
"[email protected]"
] | |
356f5b9b375921b9d31feda5c0570ba11f5000e6 | c85a3738bb121a63cb56164d2fb46373f63ff2c4 | /employee_controller.py | 0f2b228870cc249673964cd46faa640a6bd58c9e | [] | no_license | shiromabruno/Python_API_MYSQL | 0f6497197cc7452c518803497e64b12f2ed8106a | ee3f14712bcfe2142152ca10b77fa389bda9b893 | refs/heads/master | 2023-06-22T23:12:29.562036 | 2021-07-20T01:07:57 | 2021-07-20T01:07:57 | 387,305,169 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,104 | py | from flask import Flask, request, jsonify
import json
import mysql.connector
from mysql.connector import Error
from datetime import datetime
import database_service
app = Flask(__name__)
@app.route('/')
def welcomed():
return "Welcome to API using MYSQL"
def db_connection():
connection = None
try:
connection = mysql.connector.connect(host='localhost',
database='python_company',
user='root',
password='')
if connection.is_connected():
db_Info = connection.get_server_info()
print("Connected to MySQL Server version ", db_Info)
cursor = connection.cursor()
cursor.execute("select database();")
record = cursor.fetchone()
print("You're connected to database: ", record)
except Error as e:
print("Error while connecting to MySQL", e)
return connection
# finally:
# if connection.is_connected():
# cursor.close()
# connection.close()
# print("MySQL connection is closed")
@app.route('/employee', methods=["GET", "POST"])
def employee():
connection = db_connection()
cursor = connection.cursor()
if request.method == "GET":
try:
cursor.execute("SELECT * FROM Employee")
selectquery = [
dict(Id=row[0], Name=row[1], Address=row[2], Birth=row[3], Department=row[4], Email=row[5])
for row in cursor.fetchall()
]
except Exception as e:
retorno_error = {
"Message: " : "Error during execution",
"Exception: " : e
}
cursor.close()
connection.close()
print("GET NOK. MySQL connection is closed")
return jsonify(retorno_error), 503
cursor.close()
connection.close()
print("GET OK. MySQL connection is closed")
if selectquery is not None:
return jsonify(selectquery), 200
if request.method == "POST":
body = request.json
if "name" not in body or "address" not in body or "birth" not in body or "department" not in body or "email" not in body:
retorno_error = {
"Message: " : "Must pass all fields: name, address, birth, department and email",
}
return jsonify(retorno_error), 400
new_name = body["name"]
new_address = body["address"]
new_birth_raw = body["birth"]
new_birth = datetime.strptime(new_birth_raw, '%Y-%m-%d').date()
new_department = body["department"]
new_email = body["email"]
try:
sql_insert = ("INSERT INTO Employee "
"(Id, Name,Address, Birth, Department, Email) "
"VALUES (%s, %s, %s, %s, %s, %s)")
tupla_user = (0, new_name, new_address, new_birth, new_department, new_email)
cursor.execute(sql_insert, tupla_user)
# cursor.execute(sql_insert, tupla_user) ---> dessa forma nao consegui fazer o last_id, dava objeto nonetype do lastrowid
connection.commit()
last_id = cursor.lastrowid
except Exception as e:
retorno_error = {
"Message: " : "Error during execution",
"Exception: " : e
}
cursor.close()
connection.close()
print("POST NOK. MySQL connection is closed")
return jsonify(retorno_error), 503
cursor.close()
connection.close()
print("POST OK. MySQL connection is closed")
retorno_json={
"Message": "Employee registered",
"Employee_ID": last_id
}
return retorno_json, 201
@app.route("/employee/<int:id>", methods=["GET", "PUT", "DELETE"])
def employee_id(id):
connection = db_connection()
cursor = connection.cursor()
if request.method == "GET":
employee_result = None
try:
sql = "SELECT * FROM Employee WHERE id = %s"
where = (id,)
cursor.execute(sql, where)
rows = cursor.fetchall()
for r in rows:
employee_result = r
except Exception as e:
retorno_error = {
"Message: " : "Error during execution",
"Exception: " : e
}
cursor.close()
connection.close()
print("GETID NOK. MySQL connection is closed")
return jsonify(retorno_error), 503
if employee_result is not None:
cursor.close()
connection.close()
print("GETID OK. MySQL connection is closed")
return jsonify(employee_result), 200
else:
retorno_json={
"Message": "Emloyee not found",
"Employee_ID": id
}
cursor.close()
connection.close()
print("GETID OK. MySQL connection is closed")
return retorno_json, 404
if request.method == "PUT":
body = request.json
if "name" not in body or "address" not in body or "birth" not in body or "department" not in body or "email" not in body:
retorno_error = {
"Message: " : "Must pass all fields: name, address, birth, department and email",
}
return jsonify(retorno_error), 400
old_id = id
new_name = body["name"]
new_address = body["address"]
new_birth_raw = body["birth"]
new_birth = datetime.strptime(new_birth_raw, '%Y-%m-%d').date()
new_department = body["department"]
new_email = body["email"]
if database_service.employee_exist(id) == False:
retorno_warning = {
"Message: " : "Employee not found",
"Employee ID" : id
}
return retorno_warning, 404
try:
sql_update = ("UPDATE Employee SET Name = %s, Address = %s, Birth = %s, Department = %s, Email = %s WHERE Id = %s")
tupla_user = (new_name, new_address, new_birth, new_department, new_email, old_id)
cursor.execute(sql_update, tupla_user)
connection.commit()
current_id = old_id
except Exception as e:
retorno_error = {
"Message: " : "Error during execution",
"Exception: " : e
}
cursor.close()
connection.close()
print("PUTID NOK. MySQL connection is closed")
return jsonify(retorno_error), 503
retorno_json={
"Message": "Employee updated",
"Employee_ID": old_id,
"Updated Fields" : body
}
cursor.close()
connection.close()
print("PUTID OK. MySQL connection is closed")
return retorno_json, 200
if request.method == "DELETE":
if database_service.employee_exist(id) == False:
retorno_warning = {
"Message: " : "Employee not found",
"Employee ID" : id
}
return retorno_warning, 404
try:
sql_delete = ("DELETE FROM Employee WHERE Id = %s")
tupla_user = (id,)
cursor.execute(sql_delete, tupla_user)
connection.commit()
except Exception as e:
retorno_error = {
"Message: " : "Error during execution",
"Exception: " : e
}
cursor.close()
connection.close()
print("DELID NOK. MySQL connection is closed")
return jsonify(retorno_error), 503
retorno_json={
"Message": "Employee deleted",
"Employee_ID": id,
}
cursor.close()
connection.close()
print("DELID OK. MySQL connection is closed")
return retorno_json, 200
if __name__ == "__main__":
app.run(debug=True)
| [
"[email protected]"
] | |
19b16de0ef065f3a8c63d1aa8b0c09605b54ea69 | e44b2754d3cd9dbd7e419c44ca9f2a9efe00cb0b | /tictactoe.py | e93e54a52029b78dfc92ff4aa24629bfd543ef71 | [] | no_license | litannalex/JetBrains-Academy-Projects | 00126a4a9aef505fff79595c60fb9621e2dd74b9 | 896a38cfaee63bf5eaf7d7dcecef2fc1885f59b2 | refs/heads/master | 2022-11-11T09:19:04.019728 | 2020-06-27T00:09:19 | 2020-06-27T00:09:19 | 267,681,774 | 0 | 0 | null | 2020-06-29T17:45:19 | 2020-05-28T19:47:09 | Python | UTF-8 | Python | false | false | 2,863 | py | ROWS = 3
COLUMNS = 3
# takes 2 coordinates for a move,
# returns corresponding 1 coordinate in the list
def coordinate(c, r):
new_c = c - 1
new_r = ROWS - r
return new_r * COLUMNS + new_c
# takes list with symbols, prints out the battlefield
def print_field(l):
print("---------")
for i in range(ROWS):
print("|", " ".join(l[i * ROWS:i * ROWS + COLUMNS]), "|")
print("---------")
# creates the nested list with three-in-a-row combinations
def three_rows(l):
rows = [l[i:COLUMNS*i] for i in range(COLUMNS)]
columns = [l[0:7:3], l[1:8:3], l[2:9:3]]
diagonals = [l[0:9:4], l[2:7:2]]
three = [rows, columns, diagonals]
return three
# game set up: prints out empty fields, creates variable
field_list = list(' ' * 9)
print_field(field_list)
move_counter = 0
game_finished = False
while not game_finished:
# prompts the user to give coordinates for a move until valid
valid_input = False
valid_numbers = ['1', '2', '3']
move = -1
while not valid_input:
move_coordinates = input("Enter the coordinates: ").split()
if len(move_coordinates) != 2:
print("Enter exactly two numbers!")
elif not move_coordinates[0].isnumeric() or not move_coordinates[1].isnumeric():
print("You should enter numbers!")
elif move_coordinates[0] not in valid_numbers or move_coordinates[1] not in valid_numbers:
print("Coordinates should be from 1 to 3!")
else:
col, row = [int(i) for i in move_coordinates]
move = coordinate(col, row)
if field_list[move] not in [' ', '_']:
print("This cell is occupied! Choose another one!")
else:
valid_input = True
move_counter += 1
# writes user's move into the field list and outputs new field
if move_counter % 2 == 1:
field_list[move] = 'X'
else:
field_list[move] = 'O'
print_field(field_list)
# generates three-in-a-row combinations
three_in_a_row = three_rows(field_list)
# checks if input contains empty cells
empty_cells = False
for symbol in field_list:
if symbol in [' ', '_']:
empty_cells = True
# counts 3 in a row combinations for Xs and Os
winning = [['X'] * 3, ['O'] * 3]
x_three, o_three = 0, 0
for element in three_in_a_row:
for i in element:
if i == winning[0]:
x_three += 1
if i == winning[1]:
o_three += 1
# Prints game states
if x_three > 0 and o_three == 0:
print("X wins")
game_finished = True
elif o_three > 0 and x_three == 0:
print("O wins")
game_finished = True
elif (x_three == 0 and o_three ==0) and not empty_cells:
print("Draw")
game_finished = True
| [
"[email protected]"
] |
Subsets and Splits