blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
10a6013dcc36183777720bbc2952c93d81e122df
|
0f60e5a4bffa7372f6461aba4f0e58de4e3508bb
|
/Pandas/panda21.py
|
00ddfd6fe203e441b705dfd802516e4eaf340740
|
[] |
no_license
|
akshatrastogi25/Python
|
519130d6671438d20b0e6928e597e2b9c5bf722f
|
a3e8a1cbc96d09e4f8a6674c23c74074bfb65a9a
|
refs/heads/master
| 2023-03-26T02:14:14.092925 | 2021-03-25T12:10:31 | 2021-03-25T12:10:31 | 286,788,623 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 436 |
py
|
import pandas as pd
one = pd.DataFrame({
'Name': ['Alex', 'Amy', 'Allen', 'Alice', 'Ayoung'],
'subject_id':['sub1','sub2','sub4','sub6','sub5'],
'Marks_scored':[98,90,87,69,78]},
index=[1,2,3,4,5])
two = pd.DataFrame({
'Name': ['Billy', 'Brian', 'Bran', 'Bryce', 'Betty'],
'subject_id':['sub2','sub4','sub3','sub6','sub5'],
'Marks_scored':[89,80,79,97,88]},
index=[1,2,3,4,5])
print pd.concat([one,two],axis=1)
|
[
"[email protected]"
] | |
5cc36422b73d03749a636e7774ca3d172b0d9ff4
|
4d8ab37262db91870dbf85b03674b9586e1ce25c
|
/remindme.py
|
2870e2f7302021e1b1d42051d336b028e09821ba
|
[] |
no_license
|
aldnav/texmo
|
4865c1011dd5c31eaabc67f52e573475905f2e45
|
f751b68a1d3ab75b31e5b4a16a09458cf408c626
|
refs/heads/master
| 2020-03-19T05:18:21.307786 | 2018-06-21T23:50:44 | 2018-06-21T23:50:44 | 135,918,810 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,187 |
py
|
import os
import nexmo
import requests
from datetime import datetime
NEXMO_API_KEY = os.getenv('NEXMO_API_KEY')
NEXMO_API_SECRET = os.getenv('NEXMO_API_SECRET')
MYNUM = os.getenv('MYNUM')
OWMAP_KEY = os.getenv('OWMAP_KEY')
CITY_ID = 1701947
OWMAP_API_ENDPOINT = 'https://api.openweathermap.org/data/2.5/forecast?id=%s&appid=%s&units=metric'
client = nexmo.Client(key=NEXMO_API_KEY, secret=NEXMO_API_SECRET)
def send_messages(message):
print('Sending messages')
for m in message.split('\n'):
send_message(m)
def send_message(message='Hello self'):
print('Sending message')
client.send_message({
'from': 'aldnav',
'to': MYNUM,
'text': message
})
def get_forecast(city=CITY_ID):
print('Getting forecast')
r = requests.get(OWMAP_API_ENDPOINT % (city, OWMAP_KEY))
results = r.json()['list']
forecast = [
"%s %s %s" % (
datetime.fromtimestamp(x['dt']).strftime('%m-%d %I %p'),
x['weather'][0]['description'],
x['main']['temp'])
for x in list(results)
]
return '\n'.join(forecast)
if __name__ == '__main__':
forecast = get_forecast()
send_messages(forecast)
|
[
"[email protected]"
] | |
b8c70af9726a94eba9ac6a43188c0994be97dfcb
|
cdc9a8bc051be72de5bace23fd0637701d699da3
|
/preprocess/create_stanford_labels.py
|
880bf6d76e11854488987df9b35ea38a1836deac
|
[
"Apache-2.0"
] |
permissive
|
marshuang80/pe-slice-finder
|
4a51a8f7ef90f836d3cd5935f89a3e7f13c1fd63
|
2426a55c404e8eb694110351d604d6bdd613e5ae
|
refs/heads/master
| 2022-12-29T02:20:42.135931 | 2020-10-13T04:16:47 | 2020-10-13T04:16:47 | 296,091,898 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,236 |
py
|
import os
import sys
sys.path.append(os.getcwd())
import h5py
import pickle
import argparse
import pandas as pd
from constants import *
from tqdm import tqdm
from collections import defaultdict
def main(args):
# create hdf5 file
hdf5_fh = h5py.File(args.hdf5_file, 'a')
slice_labels = pickle.load(open(args.pickle_file, 'rb'))
results = defaultdict(list)
for series in hdf5_fh.keys():
# skip if no labelss
if series not in slice_labels.keys():
continue
for slice_idx in range(hdf5_fh[series].shape[0]):
label = 1 if slice_idx in slice_labels[series] else 0
results['series'].append(series)
results['slice_idx'].append(slice_idx)
results['label'].append(label)
# save as csv
df = pd.DataFrame.from_dict(results)
df.to_csv('slice_labels.csv')
# clean up
hdf5_fh.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--hdf5_file', type=str, default='/data4/PE_stanford/Stanford_data/data.hdf5')
parser.add_argument('--pickle_file', type=str, default='/data4/PE_stanford/Stanford_data/slice_labels.pkl')
args = parser.parse_args()
main(args)
|
[
"[email protected]"
] | |
256a6d27be8d5d13a5dd9e245e5f27869dd2051e
|
b04cd64d459c607591c319732f1214afe5ed011b
|
/practice/orm外键练习.py
|
fc302d552a6d7a571dc46676e1517c22210409d0
|
[] |
no_license
|
xm369083227/pyScripts
|
fb318938946eec8e5557095187d6f58e433b2309
|
26ce200fe3e51bf699ffca0588a63c91a355bdfc
|
refs/heads/master
| 2020-03-27T23:31:00.003520 | 2018-09-20T06:06:33 | 2018-09-20T06:06:33 | 147,323,947 | 0 | 0 | null | 2018-09-05T09:56:48 | 2018-09-04T09:47:53 |
Python
|
UTF-8
|
Python
| false | false | 2,254 |
py
|
#author:xm
#coding:utf-8
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column,Integer,String,DATE,ForeignKey
from sqlalchemy.orm import sessionmaker,relationship
#连接mysql引擎,echo为程序运行信息的启动开关
engine = create_engine("mysql+pymysql://root:[email protected]/test1",encoding='utf-8',echo=True)
Base = declarative_base() #生成orm基类
class Student(Base):
__tablename__ = 'student'#表名
id = Column(Integer,primary_key=True)
name = Column(String(64),nullable=False)
register_date = Column(DATE,nullable=False)
gender = Column(String(32),nullable=False)
class Study(Base):
__tablename__ = 'study_record'#表名
id = Column(Integer,primary_key=True)
day = Column(Integer,nullable=False)
status = Column(String(32),nullable=False)
stu_id = Column(Integer,ForeignKey("student.id"))#关联到student表的id
#通过此关系可以在study中直接调用my_record字段查询student中的对象
student = relationship("Student", backref="my_record")
Base.metadata.create_all(engine)#创建表结构
Session_class = sessionmaker(bind=engine)#创建与数据库的会话session class ,注意,这里返回给session的是个class,不是实例
session = Session_class()#生成session实例
# s1 = Student(name="xm",register_date="2018-09-05",gender="M")
# s2 = Student(name="uzi",register_date="2018-08-05",gender="M")
# s3 = Student(name="mlxg",register_date="2018-07-05",gender="M")
# s4 = Student(name="faker",register_date="2018-06-05",gender="M")
#
# #1代表xm,2代表uzi
# stu_obj1 = Study(day=1,status="YES",stu_id=1)
# stu_obj2 = Study(day=2,status="NO",stu_id=1)
# stu_obj3 = Study(day=3,status="YES",stu_id=1)
# stu_obj4 = Study(day=4,status="YES",stu_id=2)
#
# #将对象实例添加进数据库session
# session.add_all([s1,s2,s3,s4,stu_obj1,stu_obj2,stu_obj3,stu_obj4])
#以“xm”为关键字查出所有xm的行
stu_obj = session.query(Student).filter(Student.name=="xm").all()
#可以通过Student对象以my_record字段查出Study对象中的属性
print("teacher:"+stu_obj[0].name,"day:",stu_obj[0].my_record[0].day,"status:"+stu_obj[0].my_record[0].status)
session.commit()#提交
|
[
"[email protected]"
] | |
87769467ecf3d65809aa4655fc0bb940bc6f246b
|
eb99f42ae01901a3d4a249b75555c6a7749c02af
|
/AES_CBC/main.py
|
97cbfa9ebd5e9fb3583fa5dcc6901938ba78b9cc
|
[] |
no_license
|
mylenrise/Cryptography
|
e89109142fb0fcbad5cf6e1a07496feea0cb4e2c
|
a7726c33f1d7d2af30eca281526c983ccf2e4fc2
|
refs/heads/master
| 2020-04-28T13:10:39.801307 | 2019-09-08T11:47:26 | 2019-09-08T11:47:26 | 175,300,036 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,485 |
py
|
import pickle
import binascii
FILENAME = "text.dat"
with open(FILENAME, "rb") as file:
text = pickle.load(file)
blockSize = 32
key = "dead"
def toText(bits, encoding='utf-8', errors='surrogatepass'):
n = int(bits, 2)
return n.to_bytes((n.bit_length() + 7) // 8, 'big').decode(encoding, errors) or '\0'
def toBits(text, encoding='utf-8', errors='surrogatepass'):
bits = bin(int.from_bytes(text.encode(encoding, errors), 'big'))[2:]
return bits.zfill(8 * ((len(bits) + 7) // 8))
def split(text, blockSize):
blocks = []
fill = blockSize - len(text) % blockSize
blocks_amount = len(text) // blockSize
for i in range(blocks_amount):
blocks.append(text[i * blockSize:i * blockSize + blockSize])
blocks.append(text[blocks_amount * blockSize:] + '1' + '0' * (fill - 1))
return blocks
def XoR(block, key):
res = ''
for i in range(len(block)):
res += str((int)(block[i]) ^ (int)(key[i]))
return res
# s_encrypt=[0xf, 0x3, 0xa, 0x8, 0x5, 0xd, 0x6, 0xe, 0x4, 0x7, 0xc, 0x9, 0x2, 0x0, 0x1, 0xb]
s_encrypt_table = ['1111', '0011', '1010', '1000', '0101', '1101', '0110', '1110', '0100', '0111', '1100', '1001',
'0010', '0000', '0001', '1011']
s_decrypt_table = ['1101', '1110', '1100', '0001', '1000', '0100', '0110', '1001', '0011', '1011', '0010', '1111',
'1010', '0101', '0111', '0000']
p_encrypt_table = [3, 23, 17, 9, 21, 19, 2, 15, 24, 16, 14, 25, 30, 26, 11, 28, 8, 27, 29, 22, 1, 31, 7, 10, 18, 5, 0,
4, 12, 6, 13, 20]
p_decrypt_table = [26, 20, 6, 0, 27, 25, 29, 22, 16, 3, 23, 14, 28, 30, 10, 7, 9, 2, 24, 5, 31, 4, 19, 1, 8, 11, 13, 17,
15, 18, 12, 21]
def s_encrypt(block):
res = ''
for i in range(blockSize // 4):
res += s_encrypt_table[int(block[i * 4:i * 4 + 4], base=2)]
return res
def s_decrypt(block):
res = ''
for i in range(blockSize // 4):
res += s_decrypt_table[int(block[i * 4:i * 4 + 4], base=2)]
return res
def p_encrypt(block):
res = ''
for i in range(blockSize):
res += block[p_encrypt_table[i]]
return res
def p_decrypt(block):
res = ''
for i in range(blockSize):
res += block[p_decrypt_table[i]]
return res
def delete_fill(block):
n = len(block) - len(block.rstrip("0"))
return block[:len(block) - n - 1]
key = toBits(key)
def encrypt():
blocks = split(toBits(text), blockSize)
xorBlocks = []
for block in blocks:
xorBlocks.append(XoR(block, key))
subBlocks = []
for block in xorBlocks:
subBlocks.append(s_encrypt(block))
res = []
for block in subBlocks:
res.append(p_encrypt(block))
return res
encrypt_blocks = encrypt()
print("***ENCRYPTION RESULT***")
print(encrypt_blocks)
print()
def decrypt():
decrypt_temp = []
for block in encrypt_blocks:
decrypt_temp.append(XoR(s_decrypt(p_decrypt(block)), key))
decrypt_temp[len(decrypt_temp) - 1] = (delete_fill(decrypt_temp[len(decrypt_temp) - 1]))
res = []
for block in decrypt_temp:
res.append(toText(block))
return res
Lenore = ''.join(decrypt())
print("***DECRYPTION RESULT***")
print(Lenore)
if text == Lenore:
print("\n***DECRYPTION SUCCESSED***")
else:
print("\n***DECRYPTION FAILED***")
|
[
"[email protected]"
] | |
a451e7f6d55784dc1c2e5f0f53537cc861388029
|
96cebf0a96ce88ee10c5b73006e82f82e03e3445
|
/data_loader/setup_database.py
|
2f16a4125c0ec95865dc063346b818618f9e6428
|
[] |
no_license
|
madhukar01/fampay_ytsearch
|
6d97067555807b6d221996105308f282b764d074
|
34f7da7587b6e09c097c96eef506d6ea09a3c993
|
refs/heads/master
| 2022-12-10T02:18:49.319391 | 2020-08-26T08:39:47 | 2020-08-26T08:39:47 | 290,190,217 | 0 | 0 | null | 2020-08-26T08:39:48 | 2020-08-25T10:57:18 | null |
UTF-8
|
Python
| false | false | 699 |
py
|
from rethinkdb import RethinkDB
###############################################################################
# Database setup script
###############################################################################
def setup_database():
r = RethinkDB()
r.connect(host='localhost', port=28015).repl()
try:
# Create databases
r.db_create('platform').run()
# Create tables
r.db('platform').table_create('videos',
primary_key='video_id').run()
except Exception as e:
print('Database is already setup')
else:
print('Database setup successful')
if __name__ == '__main__':
setup_database()
|
[
"[email protected]"
] | |
9439da95bdf627509cf8fe25d37f12226346b06e
|
157112318ce2f16c91b2f4cf27913dbb5d134379
|
/text_clsf_lib/preprocessing/vectorization/data_vectorizers.py
|
0bc6d3528213d662ee961c4ef898ad4e87985b40
|
[] |
no_license
|
dawidbrzozowski/sentiment_analysis
|
550f8258f9dcd0f70cc97c5ad0ac2167e948d028
|
f016984a560cfd437cbcef59cdf5aae67916cabb
|
refs/heads/master
| 2023-02-17T20:11:14.915771 | 2021-01-18T18:26:30 | 2021-01-18T18:26:30 | 330,398,553 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 933 |
py
|
from text_clsf_lib.preprocessing.vectorization.output_vectorizers import OutputVectorizer
from text_clsf_lib.preprocessing.vectorization.text_vectorizers import TextVectorizer
class DataVectorizer:
"""
This class is meant to vectorize X and y (texts and outputs).
To perform that, it uses TextVectorizer and OutputVectorizer.
vectorize(...) method should return X and y vectorized.
"""
def __init__(self, text_vectorizer: TextVectorizer, output_vectorizer: OutputVectorizer):
self.text_vectorizer = text_vectorizer
self.output_vectorizer = output_vectorizer
def fit(self, texts, outputs):
self.text_vectorizer.fit(texts)
self.output_vectorizer.fit(outputs)
def vectorize(self, texts, outputs):
return self.text_vectorizer.vectorize(texts), self.output_vectorizer.vectorize(outputs)
def save(self, save_dir):
self.text_vectorizer.save(save_dir)
|
[
"[email protected]"
] | |
0a2c635b30ba3fceb06674f761530776ee51e64e
|
c1a05663947ad6ba03183c91add95500b622d74e
|
/PYTHON/code/failed/grammar.py
|
e452f842db06f513198551ec78edfe6af7e747ee
|
[] |
no_license
|
qhb1001/A-simple-interpreter
|
c90ca6334cf448a98ee0a38a2d4edabb8e2d745a
|
ed9200bfc7d50f5a2f1fc6132372d025d836da4d
|
refs/heads/master
| 2020-04-10T09:34:38.303791 | 2019-05-08T08:46:48 | 2019-05-08T08:46:48 | 160,941,087 | 4 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,071 |
py
|
import exceptions
import lexical
# first deal with the brackets
def check_brackets(lines):
line_number = 0
# if last
if lines == None:
exit()
for line in lines:
stack = []
line_number += 1
for token in line:
# if this is a '(', then just push it into the stack
if token.token_type == lexical.TokenType.L_BRACKET:
stack.append('(')
# else if this is a ')', then check is there exists a '(' on top of stack
elif token.token_type == lexical.TokenType.R_BRACKET:
if len(stack) == 0:
raise exceptions.BracketException("BracketException in line", line_number, ", missing left bracket.")
elif stack.pop() != '(':
raise exceptions.BracketException("BracketException in line", line_number, ", missing right bracket.")
if len(stack) != 0:
raise exceptions.BracketException("BracketException in line", line_number, ", missing right bracket.")
def check_for(line, line_number):
# define the syntax of FOR-LOOP statement should be
# FOR T FROM CONST_ID TO CONST_ID STEP CONST_ID DRAW (T,T)
check_list = []
check_list.append(lexical.TokenType.FOR)
check_list.append(lexical.TokenType.T)
check_list.append(lexical.TokenType.FROM)
check_list.append(lexical.TokenType.CONST_ID)
check_list.append(lexical.TokenType.TO)
check_list.append(lexical.TokenType.CONST_ID)
check_list.append(lexical.TokenType.STEP)
check_list.append(lexical.TokenType.CONST_ID)
check_list.append(lexical.TokenType.DRAW)
check_list.append(lexical.TokenType.L_BRACKET)
check_list.append(lexical.TokenType.T)
check_list.append(lexical.TokenType.COMMA)
check_list.append(lexical.TokenType.T)
check_list.append(lexical.TokenType.R_BRACKET)
length = len(check_list)
idx = 0
for token in line:
if token.token_type == check_list[idx]:
idx += 1
if idx == length:
break
if idx != length:
raise exceptions.SyntaxNotMatched("Syntax not matched exception in line", line_number)
def check_rot(line, line_number):
# define the syntax of ROT-IS statement should be
# ROT IS CONST_ID
check_list = []
check_list.append(lexical.TokenType.ROT)
check_list.append(lexical.TokenType.IS)
check_list.append(lexical.TokenType.CONST_ID)
length = len(check_list)
idx = 0
for token in line:
if token.token_type == check_list[idx]:
idx += 1
if idx == length:
break
if idx != length:
raise exceptions.SyntaxNotMatched("Syntax not matched exception in line", line_number)
def check_scale(line, line_number):
# define the syntax of SCALE-IS statement should be
# SCALE IS (CONST_ID,CONST_ID)
check_list = []
check_list.append(lexical.TokenType.SCALE)
check_list.append(lexical.TokenType.IS)
check_list.append(lexical.TokenType.L_BRACKET)
check_list.append(lexical.TokenType.CONST_ID)
check_list.append(lexical.TokenType.COMMA)
check_list.append(lexical.TokenType.CONST_ID)
check_list.append(lexical.TokenType.R_BRACKET)
length = len(check_list)
idx = 0
for token in line:
if token.token_type == check_list[idx]:
idx += 1
if idx == length:
break
if idx != length:
raise exceptions.SyntaxNotMatched("Syntax not matched exception in line", line_number)
def check_origin(line, line_number):
# define the syntax of SCALE-IS statement should be
# SCALE IS (CONST_ID,CONST_ID)
check_list = []
check_list.append(lexical.TokenType.ORIGIN)
check_list.append(lexical.TokenType.IS)
check_list.append(lexical.TokenType.L_BRACKET)
check_list.append(lexical.TokenType.CONST_ID)
check_list.append(lexical.TokenType.COMMA)
check_list.append(lexical.TokenType.CONST_ID)
check_list.append(lexical.TokenType.R_BRACKET)
length = len(check_list)
idx = 0
for token in line:
if token.token_type == check_list[idx]:
idx += 1
if idx == length:
break
if idx != length:
raise exceptions.SyntaxNotMatched("Syntax not matched exception in line", line_number)
def check_grammar(lines):
try:
check_brackets(lines)
line_number = 0
for line in lines:
line_number += 1
if line[0].token_type == lexical.TokenType.FOR:
check_for(line, line_number)
elif line[0].token_type == lexical.TokenType.ROT:
check_rot(line, line_number)
elif line[0].token_type == lexical.TokenType.SCALE:
check_scale(line, line_number)
elif line[0].token_type == lexical.TokenType.ORIGIN:
check_origin(line, line_number)
except exceptions.BracketException as e:
print("GRAMMAR EXCEPTION:", e.args)
except exceptions.SyntaxNotMatched as e:
print("GRAMMAR EXCEPTION:", e.args)
|
[
"[email protected]"
] | |
f2671d36c0e36c603edd4e644c8fe5e3c47c3207
|
ad3eca78df0539defdf3697de863fd4f2334ae5d
|
/kfold/kfold-13.py
|
3cea01a6f082375bd88c1f9be112a4e6265ffc4e
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
AinoraZ/similarity
|
6f74ca826d9123cd5d69832047dec4a4cbde9a72
|
25ef2b613f135b7ac06abdfa4f8e161b0009f324
|
refs/heads/master
| 2021-03-10T00:26:47.435985 | 2020-04-25T11:33:58 | 2020-04-25T11:33:58 | 246,398,657 | 0 | 3 | null | 2020-06-03T10:02:08 | 2020-03-10T20:14:13 |
Python
|
UTF-8
|
Python
| false | false | 269 |
py
|
import glob, os
dataset_dir = '/opt/datasets/data/simulated_flight_1/train/'
test_data_dir = dataset_dir + '0/*'
all_files = glob.glob(test_data_dir)
all_files.sort()
for filename in all_files[2304:2496]:
os.rename(filename, filename.replace('/train/', '/test/'))
|
[
"[email protected]"
] | |
163e0034af894a9a7ead6fc469d11e2a2b917cf5
|
58b3ef7362ad040f4ea3b976837fa023af8f2426
|
/GoodShearTestCase/ToastDemo.py
|
f91bc5b7585870b461b46106d1ebecbfff9b837c
|
[] |
no_license
|
Ghuashang/GoodShears
|
1b76d9d670954c7b7fc8cb8e381f9ca5c0142917
|
89bdbc52320d2a08dd029bfc7e9734646565498c
|
refs/heads/master
| 2021-03-07T09:40:34.284168 | 2020-03-10T09:25:21 | 2020-03-10T09:25:21 | 246,258,016 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,768 |
py
|
# 导入webdriver库
from appium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
import time
#定义空字典
device_info={}
device_info['platformName']='Android'#设备平台
device_info['platformVersion']="6.0.1"#设备系统版本
device_info["deviceName"]="DUK_AL20"#设备名称
device_info["device"]="hlteuc"#设备厂商信息
device_info["app"]="/Users/yenuo/Documents/专高6项目实战/案例应用程序/mncg.apk"#app绝对路径
device_info["noReset"]=True#是否重置应用
device_info['"appPackage"']='com.jhss.youguu'
device_info["appActivity"]=".SplashActivity"#app的活动页名称
device_info['automationName']='uiautomator2'#处理tosta提示业务
#使用webdrivr协议,远程(Remote)链接appium('http://127.0.0.1:4723/wd/hub')和设备(device_info)
driver=webdriver.Remote('http://127.0.0.1:4723/wd/hub',device_info)
time.sleep(7)
#点击"模拟炒股"图标
tipElement=driver.find_element_by_id("com.jhss.youguu:id/rl_bottom_tab")
tipElement.find_element_by_xpath("//*[@class='android.widget.ImageView' and @index='0']").click()
time.sleep(2)
#点击立即领取
driver.find_element_by_id("com.jhss.youguu:id/login_by_simulate").click()
time.sleep(2)
#点击登录按钮
driver.find_element_by_id("com.jhss.youguu:id/bt_login").click()
#预期结果
expValue="请输入手机号"
#获取toast
Toast=WebDriverWait(driver,10,0.5).until(lambda x:x.find_element_by_xpath("//*[@text='请输入手机号']"))
#Toast文本赋值给实际结果
actValue=Toast.text
#实际结果和预期是否一致
if actValue==expValue:
print("pass")
else:
print("Fail")
#结束清理
driver.quit()
'''
一条完整的自动化用例,包含一下部分
初始化
构造数据及执行过程
断言
结束清理
'''
|
[
"[email protected]"
] | |
8e37255157122eeb58f0dbe4739806864330ef96
|
31cccd2ece781295f4d0b280bbe4019f0f94fa00
|
/PYTHON/Bank.py
|
cce2d02e9b5ad5f977c6e95550b20f7c521b22e1
|
[] |
no_license
|
jdshahexplore/shah
|
6f2dfbcbcfb99ba8ebdca8765f12efc4f9414324
|
92058c5dec0a3f47b96f8badb24230a08e80971b
|
refs/heads/master
| 2020-03-31T00:51:43.104075 | 2018-10-09T13:03:11 | 2018-10-09T13:03:11 | 151,757,071 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,681 |
py
|
#random class for generating random number
import os
import random
import pickle
import getpass
#Class for storing account details
class Bank:
#This function displays a form for new customer
def getdata(self):
self.Name=input("Enter Account Holder Name\t:\t")
self.Address=input("Enter Address\t\t\t:\t")
self.Mobile=input("Enter Mobile Number\t\t:\t")
self.Branch=input("Enter Branch Name\t\t:\t")
self.Acctype=input("Enter Account type(S/C)\t\t:\t")
self.Balance=input("Enter Balance\t\t\t:\t")
self.Accno=str(20180000+random.randint(1,9999))
self.Cardno=str(81020000+random.randint(1,9999))
self.Cvv=str(random.randint(100,999))
self.Pin=str(random.randint(1000,9999))
#This function writes the details of customer in file
def putfile(self):
while os.path.exists(self.Accno):
self.Accno=str(random.randint(1,9999))
fout=open(self.Accno,"wb")
pickle.dump(self,fout)
fout.close()
print("\n--------Account Created Successfully--------\n")
ch=getpass.getpass("\nPress Enter\n")
os.system('cls')
print("\n--------------Your Credentials--------------\n")
print("\t\tYour Account No\t:\t",self.Accno)
print("\t\tYour Card No\t:\t",self.Cardno)
print("\t\tYour CVV No\t:\t",self.Cvv)
print("\t\tYour Pin No\t:\t",self.Pin)
ch=getpass.getpass("\n\t\t\tPress Enter\n")
os.system('cls')
#This function displays details of requested user
def putdata(acc):
if os.path.exists(acc):
fin=open(acc,"rb")
b=Bank()
b=pickle.load(fin)
fin.close()
pin=getpass.getpass("Enter Pin No\t\t:\t")
os.system('cls')
if(pin==b.Pin):
print("Account Holder Name\t:\t",b.Name)
print("Resident Address\t:\t",b.Address)
print("Mobile Number\t\t:\t",b.Mobile)
print("Bank Branch Name\t:\t",b.Branch)
print("Account Balance\t\t:\t",b.Balance)
print("Type of Account\t\t:\t",b.Acctype)
else:
print("Pin is invalid")
else:
print("Customer doesn't exist");
ch=getpass.getpass("\n\t\t\tPress Enter\n")
os.system('cls')
def menu():
print("\n\n1: Add new Customer")
print("2: Display Detail")
while True:
menu()
c=eval(input("Enter Choice : "))
if (c==1):
b1=Bank()
b1.getdata()
b1.putfile()
elif (c==2):
acc=input("\nEnter Account No\t:\t")
putdata(acc)
else:
break;
|
[
"[email protected]"
] | |
c52a5c1676bcf0d5ccf2d2f211d671bdb31e6b0b
|
a51e4a1299cd5e2f5a5205f49a9c82a3e9e6939d
|
/1.3 Data Preprocessing with Scikit-Learn/7.2 Labeled Data.py
|
4fc125b2f5df3eb197300115e2699c650600bec4
|
[] |
no_license
|
Deviloxide/Introduction-to-Machine-Learning
|
0309861a63e91766eadf4be64d57dbe18efe359a
|
9599066dd747e2f4deb1b87b10ba7abd7c3ed314
|
refs/heads/main
| 2023-05-26T00:14:14.764233 | 2021-06-09T15:36:55 | 2021-06-09T15:36:55 | 373,716,827 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,099 |
py
|
from sklearn.datasets import load_breast_cancer
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
# The coding exercise in this chapter involves completing the separate
# _components function, which will separate principal component data by
# class.
# To do this, we first need to complete a helper function, get_label_info,
# which returns the label name and data for an input class label.
# The component_data input represents the principal component data.
# The labels input is a 1-D array containing the class label IDs
# corresponding to each row of component_data. We can use it to separate
# the principle components by class.
# The class_label input represents a particular class label ID.
# The label_names input represents all the string names for the class
# labels.
# Set label_name equal to the string at index class_label of label_names.
# Set label_data equal to the rows of component_data corresponding to the
# indexes where labels equals class_label. Then return the tuple
# (label_name, label_data).
def get_label_info(component_data, labels,
class_label, label_names):
label_name = label_names[class_label]
label_data = component_data[labels == class_label]
return label_name, label_data
# Now, inside the main separate_data function, we'll iterate through each
# label in the label_names list.
# Set separated_data equal to an empty list.
# Create a for loop that iterates variable class_label through
# range(len(label_names)).
# Inside the for loop, we can use our helper function to obtain the
# separated data for each class.
# Inside the for loop, call get_label_info with inputs component_data,
# labels, class_label, and label_names. Append the function's output to
# separated_data.
# After finalizing the list of principle components separated by class,
# we return it.
# Outside the for loop, return separated_data.
def separate_data(component_data, labels,
label_names):
separated_data = []
for class_label in range(len(label_names)):
separated_data.append(get_label_info(
component_data, labels, class_label, label_names))
return separated_data
# The separate_data function is incredibly useful for visualizing the
# components. We can use matplotlib to create nice plots of the separated
# data (shown in the code below).
bc = load_breast_cancer()
pca_obj = PCA(n_components=2)
component_data = pca_obj.fit_transform(bc.data)
labels = bc.target
label_names = bc.target_names
# Using the completed separate_data function
separated_data = separate_data(component_data,
labels, label_names)
# Plotting the data
for label_name, label_data in separated_data:
col1 = label_data[:, 0] # 1st column (1st pr. comp.)
col2 = label_data[:, 1] # 2nd column (2nd pr. comp.)
plt.scatter(col1, col2, label=label_name) # scatterplot
plt.legend() # adds legend to plot
plt.title('Breast Cancer Dataset PCA Plot')
plt.show()
|
[
"[email protected]"
] | |
1ceb6046b361c47e2aeb6f2e8d5fcf37b576a647
|
ffa2e1cd0adc8cb9f548bf322f6a0d81f101035b
|
/cloudbaseinit/tests/plugins/common/userdataplugins/cloudconfigplugins/test_set_hostname.py
|
a84e2ba8ae4d400bba17b18814b9a7682fbe9d82
|
[
"Apache-2.0"
] |
permissive
|
ader1990/cloudbase-init-1
|
87c754c9374797a476eb2b75ed0d3b3d96de6d14
|
4b0d94cd0fe484c2f323fb47f14c8563b4cfdfb2
|
refs/heads/master
| 2023-08-07T18:57:48.478125 | 2020-02-07T17:47:25 | 2020-04-03T17:10:09 | 25,139,191 | 0 | 2 |
Apache-2.0
| 2020-04-14T10:50:31 | 2014-10-13T02:05:45 |
Python
|
UTF-8
|
Python
| false | false | 1,476 |
py
|
# Copyright 2016 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
try:
import unittest.mock as mock
except ImportError:
import mock
from oslo_config import cfg
from cloudbaseinit.plugins.common.userdataplugins.cloudconfigplugins import (
set_hostname
)
CONF = cfg.CONF
class Set_HostNamePluginPluginTest(unittest.TestCase):
def setUp(self):
self._sethost_name_plugin = set_hostname.SetHostnamePlugin()
@mock.patch('cloudbaseinit.utils.hostname')
@mock.patch('cloudbaseinit.osutils.factory.get_os_utils')
def test_process(self, mock_get_os_utils, mock_hostname):
mock_data = "fake_data"
mock_os_util = mock.MagicMock()
mock_os_util.set_hostname.return_value = (mock_data, True)
mock_get_os_utils.return_value = mock_os_util
result_process = self._sethost_name_plugin.process(mock_data)
self.assertTrue(result_process)
|
[
"[email protected]"
] | |
3d85b60c535e510cd5fac34fa8f7216174d05472
|
79218f2de150f9681beed51658cd23bd291d4933
|
/docs_build/conf.py
|
902accac229e9d24dd94619d6c7156d6d7d9e340
|
[
"Apache-2.0"
] |
permissive
|
thomaskwscott/kafka-connect-shell-source
|
9d590fdabf059bf203efb419132028525dfae2df
|
c07b4d0b53fefb4d6e1ef3b964576096a6f0fec2
|
refs/heads/master
| 2020-04-23T05:17:07.156960 | 2019-02-23T21:10:14 | 2019-02-23T21:10:14 | 170,935,439 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,558 |
py
|
# -*- coding: utf-8 -*-
#
# Kafka Connect Shell documentation build configuration file, created by
# sphinx-quickstart on Wed Dec 17 14:17:15 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.ifconfig', 'sphinxcontrib.httpdomain']
def setup(app):
app.add_config_value('platform_docs', True, 'env')
# Even if it has a default, these options need to be specified
platform_docs = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Kafka Connect Shell Source'
copyright = u'2018, 3Fi LTD.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '5.1'
# The full version, including alpha/beta/rc tags.
release = '5.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
import sphinx_rtd_theme
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'KafkaConnectShellSourceDoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'KafkaConnectShellSource.tex', u'Kafka Connect Shell Source Documentation',
u'3Fi LTD', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'kafkaconnectshellsource', u'Kafka Connect Shell Source',
[u'3Fi LTD'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'KafkaConnectShellSource', u'Kafka Connect Shell Source Documentation',
u'3Fi LTD', 'KafkaConnectShellSource',
'Kafka Connector for Shell sources',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
[
"[email protected]"
] | |
f9df0948e54785143d5fea418eb952470e8e1864
|
a809187985e3aa4ffd35a0bbadf5f39ccb29f304
|
/parabola_functions22.py
|
c9a146f11b2a3eeea0e7683452afd07799e43e98
|
[] |
no_license
|
RajrocksIT/python_parabola_functions
|
95f3b52777fdbe955d0581182dfa680bf5a8fa26
|
b33bddc94f3ac5f310a64e687d9857cdccd49ba4
|
refs/heads/master
| 2020-06-08T01:15:36.210477 | 2019-06-21T16:46:53 | 2019-06-21T16:46:53 | 193,131,931 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,576 |
py
|
# Now lets makes changes so that it comes to original form.
try:
import tkinter
except ImportError:
import Tkinter as tkinter
def parabola(x):
y = x * x / 100
return y
def draw_axes(page): # canvas changed to page
page.update() # canvas changed to page
x_origin = page.winfo_width() / 2 # canvas changed to page
y_origin = page.winfo_height() / 2 # canvas changed to page
page.configure(scrollregion=(-x_origin, -y_origin, x_origin, y_origin)) # canvas changed to page
page.create_line(-x_origin, 0, x_origin, 0, fill="black") # canvas changed to page
page.create_line(0, y_origin, 0, -y_origin, fill="black") # canvas changed to page
print(locals()) # newly added.
def plot(canvas, x, y):
canvas.create_line(x, y, x+1, y+1, fill="red")
mainWindow = tkinter.Tk()
mainWindow.title("Parabola")
mainWindow.geometry('640x480')
canvas = tkinter.Canvas(mainWindow, width=640, height=480) # width is changed from 320 to 640
canvas.grid(row=0, column=0)
#canvas2 = tkinter.Canvas(mainWindow, width=320, height=480, background="blue")
#canvas2.grid(row=0, column=1)
#print(repr(canvas), repr(canvas2))
draw_axes(canvas)
#draw_axes(canvas2)
for x in range(-100, 100):
y = parabola(x)
print(y)
plot(canvas, x, -y)
mainWindow.mainloop()
|
[
"[email protected]"
] | |
afd09bb83a3a90a1805d56e217fcd7384f04c39c
|
b4f1f3d454306ba6dcf1d197dbcf80f9d4326a97
|
/venv/lib/python2.7/site-packages/troposphere/redshift.py
|
056d6102a4227599d8bb11f130bbdf6823b70faa
|
[] |
no_license
|
clearnote01/i-am-a-pokemon
|
d81ffbbef7b90594798509b0fafc1ef0577d2553
|
e3b12c0cb000c89a5007c88608b783b68fd0f8a3
|
refs/heads/master
| 2021-01-19T17:36:34.391035 | 2017-04-15T17:41:04 | 2017-04-15T17:41:04 | 88,333,579 | 1 | 1 | null | 2020-07-25T23:46:29 | 2017-04-15T08:11:26 |
Python
|
UTF-8
|
Python
| false | false | 2,692 |
py
|
# Copyright (c) 2014, Guillem Anguera <[email protected]>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSObject, AWSProperty
from .validators import boolean, integer
class Cluster(AWSObject):
resource_type = "AWS::Redshift::Cluster"
props = {
'AllowVersionUpgrade': (boolean, False),
'AutomatedSnapshotRetentionPeriod': (integer, False),
'AvailabilityZone': (basestring, False),
'ClusterParameterGroupName': (basestring, False),
'ClusterSecurityGroups': (list, False),
'ClusterSubnetGroupName': (basestring, False),
'ClusterType': (basestring, True),
'ClusterVersion': (basestring, False),
'DBName': (basestring, True),
'ElasticIp': (basestring, False),
'Encrypted': (boolean, False),
'HsmClientCertificateIdentifier': (basestring, False),
'HsmConfigurationIdentifier': (basestring, False),
'IamRoles': ([basestring], False),
'KmsKeyId': (basestring, False),
'MasterUsername': (basestring, True),
'MasterUserPassword': (basestring, True),
'NodeType': (basestring, True),
'NumberOfNodes': (integer, False), # Conditional
'OwnerAccount': (basestring, False),
'Port': (integer, False),
'PreferredMaintenanceWindow': (basestring, False),
'PubliclyAccessible': (boolean, False),
'SnapshotClusterIdentifier': (basestring, False),
'SnapshotIdentifier': (basestring, False),
'VpcSecurityGroupIds': (list, False),
}
class AmazonRedshiftParameter(AWSProperty):
props = {
'ParameterName': (basestring, True),
'ParameterValue': (basestring, True),
}
class ClusterParameterGroup(AWSObject):
resource_type = "AWS::Redshift::ClusterParameterGroup"
props = {
'Description': (basestring, True),
'ParameterGroupFamily': (basestring, True),
'Parameters': ([AmazonRedshiftParameter], False),
}
class ClusterSecurityGroup(AWSObject):
resource_type = "AWS::Redshift::ClusterSecurityGroup"
props = {
'Description': (basestring, True),
}
class ClusterSecurityGroupIngress(AWSObject):
resource_type = "AWS::Redshift::ClusterSecurityGroupIngress"
props = {
'ClusterSecurityGroupName': (basestring, True),
'CIDRIP': (basestring, False),
'EC2SecurityGroupName': (basestring, False),
'EC2SecurityGroupOwnerId': (basestring, False),
}
class ClusterSubnetGroup(AWSObject):
resource_type = "AWS::Redshift::ClusterSubnetGroup"
props = {
'Description': (basestring, True),
'SubnetIds': (list, True),
}
|
[
"[email protected]"
] | |
b4e53cc53f327c176ac752687f666fa108fb1ffb
|
0ffb1c51608db9af003c774aff26d55478f8959c
|
/Aula1/aula1_ex3.py
|
88bca3b0f84ae07a47f987f4a7c811358877c7bc
|
[] |
no_license
|
miguelgallo/Python2019_1
|
b8573aa31d0cff96584cd06c30f57df09e7b7e71
|
1c6ae770ba6db385353739bccc682f0ec11bdecf
|
refs/heads/master
| 2020-04-28T12:18:52.587285 | 2019-07-11T17:28:03 | 2019-07-11T17:28:03 | 175,272,526 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 311 |
py
|
print('Este código calcula os zeros da função y = 3 *x^2 - 4 *x -10')
a = 3
b = -4
c = -10
delta = (b ** 2) - 4 * a * c
x_1 = (-b + ((delta)**(1/2)))/(2*a)
x_2 = (-b - ((delta)**(1/2)))/(2*a)
print('Os zeros da função ocorrem com os seguintes valores: x_1 = ', "%.3f" % x_1 , ' e x_2 = ' , "%.3f" % x_2)
|
[
"[email protected]"
] | |
90916e2114b0ee3e7317d3d5a8fa9e0691727229
|
866549190585e349c926a70cf2d3f767a3e72bde
|
/examples/larson.py
|
5e5eb5bbd717629c7d54b7b992d258417df11a67
|
[] |
no_license
|
russelltsherman/rpi-ledshim
|
9e6a15e4b3f140e7a1f00adc83212eb8e2445066
|
3d4acf5a70ac4507ff45dcbf2d2c73313a32f5ff
|
refs/heads/main
| 2020-12-10T20:44:53.291042 | 2020-01-13T22:31:05 | 2020-01-13T22:31:05 | 233,707,189 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 777 |
py
|
#!/usr/bin/env python3
import time
import ledshim
ledshim.set_clear_on_exit()
REDS = [0] * ledshim.NUM_PIXELS * 2
SCAN = [1, 2, 4, 8, 16, 32, 64, 128, 255]
REDS[ledshim.NUM_PIXELS - len(SCAN):ledshim.NUM_PIXELS + len(SCAN)] = SCAN + SCAN[::-1]
start_time = time.time()
while True:
# Sine wave, spends a little longer at min/max
# delta = (time.time() - start_time) * 8
# offset = int(round(((math.sin(delta) + 1) / 2) * (ledshim.NUM_PIXELS - 1)))
# Triangle wave, a snappy ping-pong effect
delta = (time.time() - start_time) * ledshim.NUM_PIXELS * 2
offset = int(abs((delta % len(REDS)) - ledshim.NUM_PIXELS))
for i in range(ledshim.NUM_PIXELS):
ledshim.set_pixel(i, REDS[offset + i], 0, 0)
ledshim.show()
time.sleep(0.05)
|
[
"[email protected]"
] | |
4b1787ae72aefebe880c2c4b230b6b170670ff17
|
79187d5ada48d8c9abd85da9ec8cf367cda18099
|
/custom/fastai.customold/callbacks/one_cycle.py
|
3975821fcc304d354c2730aca7d4c1b4ac12328d
|
[
"MIT"
] |
permissive
|
MichaelSluydts/schnetpack
|
92331eb12ea8aa3b910e168cba0e263d0b318dbf
|
9ca068d1f43ee4aff5bc2b6b0d5714c3ac484470
|
refs/heads/master
| 2023-02-24T15:35:19.906745 | 2021-02-01T12:22:13 | 2021-02-01T12:22:13 | 334,938,523 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,166 |
py
|
"Supports 1-Cycle style training"
from fastai.core import *
from schnetpack2.custom.fastai.callback import *
from schnetpack2.custom.fastai.basic_train import Learner,LearnerCallback
__all__ = ['OneCycleScheduler']
class OneCycleScheduler(LearnerCallback):
"Manage 1-Cycle style training as outlined in Leslie Smith's [paper](https://arxiv.org/pdf/1803.09820.pdf)."
def __init__(self, learn:Learner, lr_max:float, moms:Floats=(0.95,0.85), div_factor:float=25., pct_start:float=0.3):
super().__init__(learn)
self.lr_max,self.div_factor,self.pct_start = lr_max,div_factor,pct_start
self.moms=tuple(listify(moms,2))
if is_listy(self.lr_max): self.lr_max = np.array(self.lr_max)
def steps(self, *steps_cfg:StartOptEnd):
"Build anneal schedule for all of the parameters."
return [Stepper(step, n_iter, func=func)
for (step,(n_iter,func)) in zip(steps_cfg, self.phases)]
def on_train_begin(self, n_epochs:int, **kwargs:Any)->None:
"Initialize our optimization params based on our annealing schedule."
n = len(self.learn.data.train_dl) * n_epochs
a1 = int(n * self.pct_start)
a2 = n-a1
self.phases = ((a1, annealing_cos), (a2, annealing_cos))
low_lr = self.lr_max/self.div_factor
self.lr_scheds = self.steps((low_lr, self.lr_max), (self.lr_max, low_lr/1e4))
self.mom_scheds = self.steps(self.moms, (self.moms[1], self.moms[0]))
self.opt = self.learn.opt
self.opt.lr,self.opt.mom = self.lr_scheds[0].start,self.mom_scheds[0].start
self.idx_s = 0
def on_batch_end(self, train, **kwargs:Any)->None:
"Take one step forward on the annealing schedule for the optim params."
if train:
if self.idx_s >= len(self.lr_scheds): return True
self.opt.lr = self.lr_scheds[self.idx_s].step()
self.opt.mom = self.mom_scheds[self.idx_s].step()
# when the current schedule is complete we move onto the next
# schedule. (in 1-cycle there are two schedules)
if self.lr_scheds[self.idx_s].is_done:
self.idx_s += 1
|
[
"[email protected]"
] | |
c948e4c49f57de913ea20ae7dd968cb11a338119
|
76294c1bb35afe5bb1fca1a46a8b3c6fd777c2cf
|
/test/wx/test_filehelper.py
|
1cb731bf42b056a51ae95258be15ef7d99f3a791
|
[] |
no_license
|
robotsstudio/wxRobot
|
4f9c0ece2b4e02085bd582199d9713c664cbea32
|
355e903422543372430f39a9693a424aa1c2df16
|
refs/heads/master
| 2020-05-21T10:38:11.320463 | 2018-10-25T02:10:30 | 2018-10-25T02:10:30 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,340 |
py
|
#!/usr/bin/env python
# encoding: utf-8
# @Time : 10/18/18
__author__ = 'MiracleYoung'
import functools, threading, time
import itchat
from itchat.content import *
import sys, os
print(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
print(sys.path)
instance = itchat.new_instance()
class FileHelper:
def __init__(self, instance):
self.instance = instance
self.th = threading.Thread(target=self.update_chatrooms, args=())
self.th.start()
def update_chatrooms(self):
while True:
self.groups = []
all = self.instance.get_chatrooms()
for group in all:
self.groups.append(group.UserName)
print(len(self.groups))
time.sleep(30)
@instance.msg_register([FRIENDS], isFriendChat=True)
def friends(res):
msg = res['Text']
try:
# add friends
if res['MsgType'] == 51 and res['Status'] == 3 and res['StatusNotifyCode'] == 5:
# TODO
# add friends
to_user = res.ToUserName
instance.add_friend(userName=to_user, status=3)
instance.send_msg('''
由于人数已满100,回复:“技术群”,拉你入群。
知识星球内有「Python原创」、「大航海计划」、「问题解答」、「面试刷题」、「大厂内推」、「技术分享」等,在这个星球能够得到的,不只是关于Python,圈子、人脉、资源,学习氛围,眼界都是比技术更值得去借鉴的东西。
如果想要加入知识星球的话,可以回复“知识星球”
''', to_user=to_user)
print('已添加好友')
except AttributeError:
pass
@instance.msg_register([TEXT], isFriendChat=True)
def auto_reply(res):
msg = res['Text']
from_user = res['User']
if msg == '技术群':
instance.add_member_into_chatroom(instance.search_chatrooms('测试群2')[0].UserName,
memberList=[from_user])
elif msg == '知识星球':
instance.send_image('./textpng.png', toUserName=from_user)
else:
pass
fh = FileHelper(instance)
instance.auto_login(hotReload=True)
instance.run()
|
[
"[email protected]"
] | |
fbcf2f345a377b236e4d5dd331708ae9b0e6cc03
|
392a4f5c76414fcbed17dd5dccaf2f64096659a2
|
/app_frame/page/market.py
|
0630ce30172d3d8b20da2105324e02b39ca1bd86
|
[] |
no_license
|
Allison001/homework
|
3bd5794c8bdd944f827f3e8008eea1831f90644b
|
1ab910d21ad4348a212b226758cfa8244ea03bfc
|
refs/heads/master
| 2023-04-08T22:48:56.667737 | 2021-04-15T03:38:56 | 2021-04-15T03:38:56 | 324,184,733 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 386 |
py
|
import yaml
from selenium.webdriver.common.by import By
from app_frame.basepage import BasePage
from app_frame.page.search import Search
class Market(BasePage):
def goto_search(self):
self.run_step('../page/market.yaml','goto_search')
# self.find_and_click((By.XPATH,"//*[@resource-id='com.xueqiu.android:id/action_search']"))
return Search(self.driver)
|
[
"[email protected]"
] | |
c5a017a2520ae0196d0bdd007873614abd29c5f0
|
4bde0c6c41a27efa068d89663e190403131ee118
|
/06_merge_sort.py
|
07179f04d25f7240d50e36724880214020bdfabf
|
[] |
no_license
|
wangshaobo08/algorithm
|
7919510281a3b8cfede2d80ced06b152795af574
|
ef3b83b977014ff2fe760f8b533ace56c33e4cad
|
refs/heads/master
| 2021-10-23T18:50:21.225749 | 2019-03-19T09:55:56 | 2019-03-19T09:55:56 | 122,831,467 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 903 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2018-12-10 20:50:47
# @Author : sober ([email protected])
# @Link : https://www.jianshu.com/u/b2d5110e9a57
# @Version : $Id$
def merge_sort(arr):
n = len(arr)
if n <= 1:
return arr
mid = n // 2
left_li = merge_sort(arr[:mid])
right_li = merge_sort(arr[mid:])
left_cursor, right_cursor = 0, 0
result = []
while left_cursor < len(left_li) and right_cursor < len(right_li):
if left_li[left_cursor] < right_li[right_cursor]:
result.append(left_li[left_cursor])
left_cursor += 1
else:
result.append(right_li[right_cursor])
right_cursor += 1
result += left_li[left_cursor:]
result += right_li[right_cursor:]
return result
if __name__ == "__main__":
arr = [1, 2, 1, 4, 67, 3, 7, 8, 6, 5, 5]
print(merge_sort(arr))
|
[
"[email protected]"
] | |
0d5831a00990b638ea1e36c31bbf02d7b0c7d605
|
2c308baea6968125fd69b365b0341d16b02b65a3
|
/quantylab/systrader/creon/bridge_django.py
|
faab6455f3c67de7cc36820a76c064029559c0cd
|
[] |
no_license
|
spica2002/systrader
|
da3f81d73a030a7a1d7b41b2919c649c1fdfe5bc
|
9491d294f328d8114ef89868ba3273acf0182f25
|
refs/heads/master
| 2023-04-14T03:33:18.925740 | 2021-04-24T02:33:03 | 2021-04-24T02:33:03 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,648 |
py
|
import json
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from quantylab.systrader.creon import Creon
from quantylab.systrader.creon import constants
c = Creon()
@csrf_exempt
def handle_connection(request):
if request.method == 'GET':
# check connection status
return JsonResponse(c.connected(), safe=False)
elif request.method == 'POST':
# make connection
data = json.loads(request.body)
_id = data['id']
_pwd = data['pwd']
_pwdcert = data['pwdcert']
return JsonResponse(c.connect(_id, _pwd, _pwdcert), safe=False)
elif request.method == 'DELETE':
# disconnect
return JsonResponse(c.disconnect(), safe=False)
def handle_stockcodes(request):
c.wait()
market = request.GET.get('market')
if market == 'kospi':
return JsonResponse(c.get_stockcodes(constants.MARKET_CODE_KOSPI), safe=False)
elif market == 'kosdaq':
return JsonResponse(c.get_stockcodes(constants.MARKET_CODE_KOSDAQ), safe=False)
else:
return HttpResponse('"market" should be one of "kospi" and "kosdaq".', status_code=400)
def handle_stockstatus(request):
c.wait()
stockcode = request.GET.get('code')
if not stockcode:
return HttpResponse('"code" should be provided.', status_code=400)
res = c.get_stockstatus(stockcode)
return JsonResponse(res)
def handle_stockcandles(request):
c.wait()
stockcode = request.GET.get('code')
n = request.GET.get('n')
if n:
n = int(n)
date_from = request.GET.get('date_from')
date_to = request.GET.get('date_to')
if not (n or date_from):
return HttpResponse('Need to provide "n" or "date_from" argument.', status_code=400)
res = c.get_chart(stockcode, target='A', unit='D', n=n, date_from=date_from, date_to=date_to)
return JsonResponse(res, safe=False)
def handle_marketcandles(request):
c.wait()
marketcode = request.GET.get('code')
n = request.GET.get('n')
if n:
n = int(n)
date_from = request.GET.get('date_from')
date_to = request.GET.get('date_to')
if marketcode == 'kospi':
marketcode = '001'
elif marketcode == 'kosdaq':
marketcode = '201'
elif marketcode == 'kospi200':
marketcode = '180'
else:
return HttpResponse('"code" should be one of "kospi", "kosdaq", and "kospi200".', status_code=400)
if not (n or date_from):
return HttpResponse('Need to provide "n" or "date_from" argument.', status_code=400)
res = c.get_chart(marketcode, target='U', unit='D', n=n, date_from=date_from, date_to=date_to)
return JsonResponse(res, safe=False)
def handle_stockfeatures(request):
c.wait()
stockcode = request.GET.get('code')
if not stockcode:
return HttpResponse('"code" should be provided.', status_code=400)
res = c.get_stockfeatures(stockcode)
return JsonResponse(res)
def handle_short(request):
c.wait()
stockcode = request.GET.get('code')
n = request.GET.get('n')
if n:
n = int(n)
if not stockcode:
return HttpResponse('"code" should be provided.', status_code=400)
res = c.get_shortstockselling(stockcode, n=n)
return JsonResponse(res, safe=False)
def handle_investorbuysell(request):
c.wait()
stockcode = request.GET.get('code')
n = request.GET.get('n')
if n:
n = int(n)
if not stockcode:
return HttpResponse('"code" should be provided.', status_code=400)
res = c.get_investorbuysell(stockcode, n=n)
return JsonResponse(res, safe=False)
|
[
"[email protected]"
] | |
3eab17b4d99bc2897c9558af70613ded3c6884d6
|
633ed4f252fb0562465e8c16c2a9a72381219e4a
|
/PycharmProjects/HelloWorldProject/PythonLearnAgain/ch5/ch5_class_def_iterations.py
|
580d97ecb534fdb221500466dee5d7e5390a9d6f
|
[] |
no_license
|
venkunikku/exercises_learning
|
5c3731f7bb8ec895a59d7eb8e4e3ed18911ef548
|
6fff208a8f218209f1a380e639ba1cd7b5a8e5b2
|
refs/heads/master
| 2021-04-29T00:23:01.487906 | 2017-06-02T21:26:10 | 2017-06-02T21:26:10 | 77,713,063 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 768 |
py
|
class Iterator:
# def __new__(cls, *args, **kwargs):
# print("Called when new object is created")
# def __init__(self, a):
# print("Called when object is created")
# def __del__(self):
# print("called when object is destored")
li = []
def __init__(self, max):
self.max = max
def __iter__(self):
self.a = 0
self.b = 1
return self # this shouls return an iterator. If the same class is implementing __next__ the it can return self
def __next__(self):
fib = self.a
if fib > self.max:
raise StopIteration
self.a, self.b = self.b, self.a + self.b
return fib
if __name__ == '__main__':
a = Iterator(10)
for i in a:
print(i)
|
[
"[email protected]"
] | |
670a9c4656b1ed4889e4390c5fe424466c8af425
|
e7d65f8773a8c736fc9e41e843d7da6da5cc2e0b
|
/py3plex/algorithms/network_classification/PPR.py
|
0339b2da13f9375d038028962e9f8485a7392e37
|
[
"BSD-3-Clause"
] |
permissive
|
hanbei969/Py3plex
|
768e86b16ca00044fcb4188e01edf32c332c8a2a
|
1ef3e0e6d468d24bd6e6aec3bd68f20b9d9686bb
|
refs/heads/master
| 2021-01-03T18:19:24.049457 | 2020-02-12T16:51:14 | 2020-02-12T16:51:14 | 240,188,307 | 1 | 0 |
BSD-3-Clause
| 2020-02-13T05:57:16 | 2020-02-13T05:57:16 | null |
UTF-8
|
Python
| false | false | 4,584 |
py
|
## set of routines for validation of the PPR-based classification
from ..node_ranking import *
from ..general.benchmark_classification import *
import pandas as pd
from sklearn.svm import SVC
from sklearn.metrics import f1_score
import time
import numpy as np
import multiprocessing as mp
from sklearn.model_selection import StratifiedKFold,StratifiedShuffleSplit
from sklearn import preprocessing
def construct_PPR_matrix(graph_matrix,parallel=False):
"""
PPR matrix is the matrix of features used for classification --- this is the spatially intense version of the classifier
"""
## initialize the vectors
n = graph_matrix.shape[1]
vectors = np.zeros((n, n))
results = run_PPR(graph_matrix,parallel=parallel)
## get the results in batches
for result in results:
if result != None:
## individual batches
if isinstance(result, list):
for ppr in result:
vectors[ppr[0],:] = ppr[1]
else:
ppr = result
vectors[ppr[0],:] = ppr[1]
return vectors
def construct_PPR_matrix_targets(graph_matrix,targets,parallel=False):
n = graph_matrix.shape[1]
vectors = np.empty((len(targets), n))
tar_map = dict(zip(targets,range(len(targets))))
results = run_PPR(graph_matrix,targets=targets,parallel=parallel)
for result in results:
vectors[tar_map[result[0]],:] = vectors[1]
return vectors
## deal with that now..
def validate_ppr(core_network,labels,dataset_name="test",repetitions=5,random_seed=123,multiclass_classifier=None,target_nodes=None,parallel=False):
"""
The main validation class --- use this to obtain CV results!
"""
if multiclass_classifier is None:
multiclass_classifier = SVC(kernel = 'linear', C = 1,probability=True)
df = pd.DataFrame()
for k in range(repetitions):
## this is relevant for supra-adjacency-based tasks..
if target_nodes is not None:
print("Subnetwork ranking in progress..")
vectors = construct_PPR_matrix_targets(core_network,target_nodes,parallel=parallel)
labels = labels[target_nodes]
else:
vectors = construct_PPR_matrix(core_network,parallel=parallel)
## remove single instance-single target!
nz = np.count_nonzero(labels,axis=0)
wnz = np.argwhere(nz>2).T[0]
labels = labels[:,wnz]
for j in np.arange(0.1,0.5,0.1):
## run the training..
print("Train size:{}, method {}".format(j,"PPR"))
print(vectors.shape,labels.shape)
rs = StratifiedShuffleSplit(n_splits=10, test_size=0.5, random_state=random_seed)
micros = []
macros = []
times = []
new_train_y = []
for y in labels:
new_train_y.append(list(y).index(1))
onedim_labels = np.array(new_train_y)
for X_train, X_test in rs.split(vectors,new_train_y):
start = time.time()
train_x = vectors[X_train]
test_x = vectors[X_test]
train_labels = labels[X_train]
test_labels = labels[X_test]
train_labels_first = onedim_labels[X_train]
test_labels_second = onedim_labels[X_test]
clf = multiclass_classifier
clf.fit(train_x, train_labels_first)
preds = clf.predict(test_x)
mi = f1_score(test_labels_second, preds, average='micro')
ma = f1_score(test_labels_second, preds, average='macro')
# being_predicted = np.unique(train_labels_first)
# tmp_lab = test_labels[:,being_predicted]
# mi,ma = evaluate_oracle_F1(probs,tmp_lab)
## train the model
end = time.time()
elapsed = end - start
micros.append(mi)
macros.append(ma)
times.append(elapsed)
outarray = {"percent_train": np.round(1-j,1), "micro_F":np.mean(micros),"macro_F":np.mean(macros) ,"setting": "PPR" ,"dataset": dataset_name,"time":np.mean(times)}
df = df.append(outarray,ignore_index=True)
df = df.reset_index()
return df
|
[
"[email protected]"
] | |
f9754e638f7f0ac79325079232c3d5fc0e6698ee
|
52951a030d9988d33b7a148c4997c1240ced4216
|
/cgen/cgen/cuda.py
|
8d444c942fb8d3963557f03928775b08ad9a07fe
|
[] |
no_license
|
chiaracoetzee/asptutorial
|
d99632fbb8ea561f12912f344d9451fe547bdd0d
|
4cc60645aa06054eb59d2e821b96d36cef68782a
|
refs/heads/master
| 2021-07-11T08:16:22.865970 | 2012-04-02T14:07:37 | 2012-04-02T14:07:37 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 582 |
py
|
from __future__ import division
from cgen import DeclSpecifier
class CudaGlobal(DeclSpecifier):
def __init__(self, subdecl):
DeclSpecifier.__init__(self, subdecl, "__global__")
class CudaDevice(DeclSpecifier):
def __init__(self, subdecl):
DeclSpecifier.__init__(self, subdecl, "__device__")
class CudaShared(DeclSpecifier):
def __init__(self, subdecl):
DeclSpecifier.__init__(self, subdecl, "__shared__")
class CudaConstant(DeclSpecifier):
def __init__(self, subdecl):
DeclSpecifier.__init__(self, subdecl, "__constant__")
|
[
"[email protected]"
] | |
a5a768716529aae693d0c0183cc71a5de0059dcd
|
f2348887dbffbd2124376b97c942939e78093e8f
|
/test2.py
|
b0e537cd2b086f870432a74523e1be14d079f806
|
[] |
no_license
|
BomberDim/Python-practice
|
f2d0c9051b67a0269b04861c205d268611ed259b
|
b55baabaed088449dc70cda9f09e706f3614fbf3
|
refs/heads/master
| 2020-05-19T09:57:42.117307 | 2015-12-13T16:17:06 | 2015-12-13T16:17:06 | 42,327,071 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 136 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
i = 1
while i < 11:
print(i)
i += 1
print ("Конец программы")
|
[
"[email protected]"
] | |
e398d0079296284a5b09029a6040ba7474bc34b9
|
0d12834f4e393ecb35211c04c88f31eaf724a4dd
|
/src/model/pl_model.py
|
e5eff989fba027a5e4af271882be3db3ff33c647
|
[] |
no_license
|
killsking/atrc
|
be39f21b87d6cbfda3206e7e08ca70afde83a02b
|
5714e58db809dad7bc6d71b6b6be83f241105278
|
refs/heads/main
| 2023-07-12T18:24:21.871045 | 2021-08-23T09:10:31 | 2021-08-23T09:10:31 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,665 |
py
|
import os
import torch
import torch.nn as nn
import pytorch_lightning as pl
from . import builder
from . import utils_model
class MultiTaskModel(pl.LightningModule):
def __init__(self,
model_backbone: str,
in_index: str,
model_head: str,
tasks: list, # datamodule
task_channel_mapping: dict, # datamodule
metrics_dict: nn.ModuleDict, # datamodule
edge_pos_weight: float, # datamodule
normals_centroids: torch.Tensor, # datamodule
iterations: int = 40000,
lr: float = 0.01,
head_lr_mult: float = 1.0,
weight_decay: float = 0.0005,
atrc_genotype_path: str = None):
super().__init__()
self.tasks = tasks
self.metrics_dict = metrics_dict
self.iterations = iterations
self.lr = lr
self.head_lr_mult = head_lr_mult
self.weight_decay = weight_decay
self.backbone = builder.get_backbone(model_backbone=model_backbone,
pretrained=True)
self.head = builder.get_head(head_name=model_head,
in_index=in_index,
idx_to_planes=self.backbone.idx_to_planes,
tasks=tasks,
task_channel_mapping=task_channel_mapping,
atrc_genotype_path=atrc_genotype_path)
self.criterion = builder.get_criterion(tasks=tasks,
head_endpoints=self.head.head_endpoints,
edge_pos_weight=edge_pos_weight,
normals_centroids=normals_centroids)
def on_fit_start(self):
if 'edge' in self.tasks:
self.edge_save_dir = os.path.join(self.logger.log_dir, 'edge_preds')
if self.trainer.is_global_zero:
os.makedirs(self.edge_save_dir, exist_ok=True)
def training_step(self, batch, batch_idx):
image = batch['image']
targets = {t: batch[t] for t in self.tasks}
input_shape = image.shape[-2:]
features = self.backbone(image)
out = self.head(features, input_shape, image=image)
loss, logger_losses = self.criterion(out, targets)
self.log('train_loss', loss)
for key, val in logger_losses.items():
self.log(key + '_train_loss', val)
return loss
def validation_step(self, batch, batch_idx):
image = batch['image']
targets = {t: batch[t] for t in self.tasks}
input_shape = image.shape[-2:]
features = self.backbone(image)
out = self.head(features, input_shape, image=image)
for task in self.tasks:
task_target = targets[task]
task_pred = out['final'][task]
if task == 'depth':
# threshold negative values
task_pred.clamp_(min=0.)
if task == 'edge':
# edge predictions are saved for later evaluation
utils_model.save_predictions('edge', task_pred, batch['meta'], self.edge_save_dir)
else:
self.metrics_dict[task](task_pred, task_target)
def validation_epoch_end(self, outputs):
metrics_val = {}
for task in self.tasks:
if task == 'edge':
continue
metrics_val[task] = self.metrics_dict[task].compute()
self.log('_'.join(
[task, 'valid', self.metrics_dict[task].__class__.__name__]), metrics_val[task], sync_dist=True)
def configure_optimizers(self):
params = self._get_parameters()
optimizer = torch.optim.SGD(
lr=self.lr, momentum=0.9, weight_decay=self.weight_decay, params=params)
scheduler = {
'scheduler': utils_model.PolynomialLR(optimizer, self.iterations, gamma=0.9, min_lr=0),
'interval': 'step'
}
return [optimizer], [scheduler]
def _get_parameters(self):
backbone_params = []
head_params = []
params_dict = dict(self.named_parameters())
for key, value in params_dict.items():
if 'backbone' not in key:
head_params.append(value)
else:
backbone_params.append(value)
params = [{'params': backbone_params, 'lr': self.lr},
{'params': head_params, 'lr': self.lr * self.head_lr_mult}]
return params
|
[
"[email protected]"
] | |
50651266817835bab17e23e3b97ff1c9a1bda761
|
68e5cf2b601df44734e1ce5ea947cf54531dfa22
|
/cms/cms1/views1.py
|
ff03c30134915d0c1f3acd60e9940926dad51a6f
|
[] |
no_license
|
Amit-031296/cms1
|
b77440fb446f1bc6cfce3822f73d629c8ffe841a
|
eea09e45519d3be7c5dfabe2ce73a37c7104a7e4
|
refs/heads/master
| 2023-04-06T06:21:22.547657 | 2021-03-31T12:51:36 | 2021-03-31T12:51:36 | 353,355,209 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,728 |
py
|
from rest_framework import status
from rest_framework.response import Response
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated
from rest_framework.authentication import TokenAuthentication
from rest_framework.pagination import PageNumberPagination
from rest_framework.generics import ListAPIView
# from rest_framework.filters import SearchFilter, OrderingFilter
from cms1.models import *
from cms1 import serializers,serializers1
SUCCESS = 'success'
ERROR = 'error'
DELETE_SUCCESS = 'deleted'
UPDATE_SUCCESS = 'updated'
CREATE_SUCCESS = 'created'
# Response: https://gist.github.com/mitchtabian/93f287bd1370e7a1ad3c9588b0b22e3d
# Url: https://<your-domain>/api/blog/<slug>/
# Headers: Authorization: Token <token>
@api_view(['GET', ])
@permission_classes((IsAuthenticated, ))
def cms_detail_content_view(request,):
cmsusers = request.user
try:
content = CMSUsersContent.objects.get(cmsusers=cmsusers)
except CMSUsersContent.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = serializers1.CMSUsersContentSerializer(content)
return Response(serializer.data)
# Response: https://gist.github.com/mitchtabian/32507e93c530aa5949bc08d795ba66df
# Url: https://<your-domain>/api/blog/<slug>/update
# Headers: Authorization: Token <token>
# @api_view(['PUT',])
# @permission_classes((IsAuthenticated,))
# def api_update_blog_view(request, slug):
# try:
# blog_post = BlogPost.objects.get(slug=slug)
# except BlogPost.DoesNotExist:
# return Response(status=status.HTTP_404_NOT_FOUND)
# user = request.user
# if blog_post.author != user:
# return Response({'response':"You don't have permission to edit that."})
# if request.method == 'PUT':
# serializer = BlogPostUpdateSerializer(blog_post, data=request.data, partial=True)
# data = {}
# if serializer.is_valid():
# serializer.save()
# data['response'] = UPDATE_SUCCESS
# data['pk'] = blog_post.pk
# data['title'] = blog_post.title
# data['body'] = blog_post.body
# data['slug'] = blog_post.slug
# data['date_updated'] = blog_post.date_updated
# image_url = str(request.build_absolute_uri(blog_post.image.url))
# if "?" in image_url:
# image_url = image_url[:image_url.rfind("?")]
# data['image'] = image_url
# data['username'] = blog_post.author.username
# return Response(data=data)
# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# @api_view(['GET',])
# @permission_classes((IsAuthenticated,))
# def api_is_author_of_blogpost(request, slug):
# try:
# blog_post = BlogPost.objects.get(slug=slug)
# except BlogPost.DoesNotExist:
# return Response(status=status.HTTP_404_NOT_FOUND)
# data = {}
# user = request.user
# if blog_post.author != user:
# data['response'] = "You don't have permission to edit that."
# return Response(data=data)
# data['response'] = "You have permission to edit that."
# return Response(data=data)
# # Response: https://gist.github.com/mitchtabian/a97be3f8b71c75d588e23b414898ae5c
# # Url: https://<your-domain>/api/blog/<slug>/delete
# # Headers: Authorization: Token <token>
# @api_view(['DELETE',])
# @permission_classes((IsAuthenticated, ))
# def api_delete_blog_view(request, slug):
# try:
# blog_post = BlogPost.objects.get(slug=slug)
# except BlogPost.DoesNotExist:
# return Response(status=status.HTTP_404_NOT_FOUND)
# user = request.user
# if blog_post.author != user:
# return Response({'response':"You don't have permission to delete that."})
# if request.method == 'DELETE':
# operation = blog_post.delete()
# data = {}
# if operation:
# data['response'] = DELETE_SUCCESS
# return Response(data=data)
# # Response: https://gist.github.com/mitchtabian/78d7dcbeab4135c055ff6422238a31f9
# # Url: https://<your-domain>/api/blog/create
# # Headers: Authorization: Token <token>
# @api_view(['POST'])
# @permission_classes((IsAuthenticated,))
# def api_create_blog_view(request):
# if request.method == 'POST':
# data = request.data
# data['author'] = request.user.pk
# serializer = BlogPostCreateSerializer(data=data)
# data = {}
# if serializer.is_valid():
# blog_post = serializer.save()
# data['response'] = CREATE_SUCCESS
# data['pk'] = blog_post.pk
# data['title'] = blog_post.title
# data['body'] = blog_post.body
# data['slug'] = blog_post.slug
# data['date_updated'] = blog_post.date_updated
# image_url = str(request.build_absolute_uri(blog_post.image.url))
# if "?" in image_url:
# image_url = image_url[:image_url.rfind("?")]
# data['image'] = image_url
# data['username'] = blog_post.author.username
# return Response(data=data)
# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# # Response: https://gist.github.com/mitchtabian/ae03573737067c9269701ea662460205
# # Url:
# # 1) list: https://<your-domain>/api/blog/list
# # 2) pagination: http://<your-domain>/api/blog/list?page=2
# # 3) search: http://<your-domain>/api/blog/list?search=mitch
# # 4) ordering: http://<your-domain>/api/blog/list?ordering=-date_updated
# # 4) search + pagination + ordering: <your-domain>/api/blog/list?search=mitch&page=2&ordering=-date_updated
# # Headers: Authorization: Token <token>
# class ApiBlogListView(ListAPIView):
# queryset = BlogPost.objects.all()
# serializer_class = BlogPostSerializer
# authentication_classes = (TokenAuthentication,)
# permission_classes = (IsAuthenticated,)
# pagination_class = PageNumberPagination
# filter_backends = (SearchFilter, OrderingFilter)
# search_fields = ('title', 'body', 'author__username')
|
[
"[email protected]"
] | |
c7d2e24957a8f5d7a7276553f6d133a9933b2d8a
|
385e00e3d48446baf20cb3d0fbf9db0344cd95da
|
/test/visualization/test_utils.py
|
9e8a593f52ffbe911da59c3806471afc61755eca
|
[
"Apache-2.0"
] |
permissive
|
oliverdial/qiskit-experiments
|
d670f9151116e2e7d9a67f304a23313aa31fc30f
|
a387675a3fe817cef05b968bbf3e05799a09aaae
|
refs/heads/main
| 2023-06-24T08:07:19.505243 | 2023-06-09T21:01:59 | 2023-06-09T21:01:59 | 362,153,676 | 0 | 0 |
Apache-2.0
| 2021-04-27T15:03:52 | 2021-04-27T15:03:51 | null |
UTF-8
|
Python
| false | false | 4,818 |
py
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2022.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Test visualization utilities.
"""
import itertools as it
from test.base import QiskitExperimentsTestCase
from typing import List, Tuple
import numpy as np
from ddt import data, ddt
from qiskit.exceptions import QiskitError
from qiskit_experiments.visualization.utils import DataExtentCalculator
from qiskit_experiments.framework.package_deps import numpy_version
@ddt
class TestDataExtentCalculator(QiskitExperimentsTestCase):
"""Test DataExtentCalculator"""
@classmethod
def _dummy_data(
cls,
extent: Tuple[float, float, float, float] = (-1, 1, -5, 0),
n_data: int = 5,
n_points: int = 16,
) -> List[np.ndarray]:
# Create a list of bin edges by which to divide the target extent
bin_edges = [
np.histogram_bin_edges(extent[0:2], bins=n_data).tolist(),
np.histogram_bin_edges(extent[2:], bins=n_data).tolist(),
]
# Iterate over pairs of adjacent bin edges, which define the maximum and minimum for the region.
# This is done by generating sliding windows of bin_edges as follows:
# [[a], [b], [c], [d], [e], [f]], g]
# [a, [[b], [c], [d], [e], [f], [g]]
# The result is a list of pairs representing a moving window of size 2.
# TODO: remove the old code once numpy is above 1.20.
dummy_data = []
if numpy_version() >= (1, 20):
for (x_min, x_max), (y_min, y_max) in it.product(
*np.lib.stride_tricks.sliding_window_view(bin_edges, 2, 1)
):
_dummy_data = np.asarray(
[
np.linspace(x_min, x_max, n_points),
np.linspace(y_min, y_max, n_points),
]
)
dummy_data.append(_dummy_data.swapaxes(-1, -2))
else:
for (x_min, x_max), (y_min, y_max) in it.product(
*tuple(list(zip(b[0:-1], b[1:])) for b in bin_edges)
):
_dummy_data = np.asarray(
[
np.linspace(x_min, x_max, n_points),
np.linspace(y_min, y_max, n_points),
]
)
dummy_data.append(_dummy_data.swapaxes(-1, -2))
return dummy_data
@data(*list(it.product([1.0, 1.1, 2.0], [None, 1.0, np.sqrt(2)])))
def test_end_to_end(self, args):
"""Test end-to-end functionality.
Results that are asserted include the range of the final extent tuple and its midpoint.
"""
# Test args
multiplier, aspect_ratio = args[0], args[1]
# Problem inputs
extent = (-1, 1, -5, 1)
n_data = 6
dummy_data = self._dummy_data(extent, n_data=n_data)
ext_calc = DataExtentCalculator(multiplier=multiplier, aspect_ratio=aspect_ratio)
# Add data as 2D and 1D arrays to test both methods
for d in dummy_data[0 : int(n_data / 2)]:
ext_calc.register_data(d)
for d in dummy_data[int(n_data / 2) :]:
for i_dim in range(2):
ext_calc.register_data(d[:, i_dim], dim=i_dim)
# Check extent
actual_extent = ext_calc.extent()
# Check that range was scaled. Given we also have an aspect ratio, we may have a range that is
# larger than the original scaled by the multiplier. At the minimum, the range should be exactly
# equal to the original scaled by the multiplier
expected_range = multiplier * np.diff(np.asarray(extent).reshape((2, 2)), axis=1).flatten()
actual_range = np.diff(np.reshape(actual_extent, (2, 2)), axis=1).flatten()
for act, exp in zip(actual_range, expected_range):
self.assertTrue(act >= exp)
# Check that the midpoints are the same.
expected_midpoint = np.mean(np.reshape(extent, (2, 2)), axis=1).flatten()
actual_midpoint = np.mean(np.reshape(actual_extent, (2, 2)), axis=1).flatten()
np.testing.assert_almost_equal(
actual_midpoint,
expected_midpoint,
)
def test_no_data_error(self):
"""Test that a QiskitError is raised if no data was set."""
ext_calc = DataExtentCalculator()
with self.assertRaises(QiskitError):
ext_calc.extent()
|
[
"[email protected]"
] | |
696bd35da64e17e822d288f6f255b2ede67b799f
|
f14c198ad1b8f6324956e5bcdb4cd910c67eb3e1
|
/exercises/migrations/0002_auto_20200511_0132.py
|
3bd35216046044ede8444a9f139c160196dd3b7a
|
[
"Unlicense"
] |
permissive
|
rattletat/python-homework-server
|
8150e9059d43cc24677a632fbac503856a66e971
|
abfac831ed45cc567a6a1610edee934200ffada7
|
refs/heads/master
| 2022-12-26T02:25:48.455504 | 2020-10-01T11:08:24 | 2020-10-01T11:08:24 | 258,362,901 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,209 |
py
|
# Generated by Django 3.0.5 on 2020-05-10 23:32
from django.db import migrations, models
import exercises.helper
import exercises.storage
import exercises.validators
class Migration(migrations.Migration):
dependencies = [
('exercises', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='exercise',
name='short_name',
field=models.CharField(max_length=50, null=True),
),
migrations.AlterField(
model_name='exercise',
name='description',
field=models.FileField(storage=exercises.storage.OverwriteStorage(), upload_to=exercises.helper.get_description_path, validators=[exercises.validators.FileValidator(allowed_extensions=['md'], allowed_mimetypes=['text/markdown', 'text/plain', 'text/x-python'])]),
),
migrations.AlterField(
model_name='exercise',
name='tests',
field=models.FileField(storage=exercises.storage.OverwriteStorage(), upload_to=exercises.helper.get_tests_path, validators=[exercises.validators.FileValidator(allowed_extensions=['py'], allowed_mimetypes=['text/python', 'text/x-python'])]),
),
]
|
[
"[email protected]"
] | |
ec72f0f7a89df64431f0e586bcb6b77cb150443b
|
42a23fabbf3280340377c777021950ba1351657c
|
/users/views.py
|
f95e13c8d5c672c2e64697090bd44b9d1873c422
|
[] |
no_license
|
adineshshylaja/8210Assignment1
|
9046f09dcdfa9d3c483c2c7a223b9e7ef97c6c8a
|
5faf6af827e426364fca6350802982477ee388fc
|
refs/heads/master
| 2022-12-12T22:03:44.832689 | 2020-09-14T04:53:42 | 2020-09-14T04:53:42 | 293,982,704 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 328 |
py
|
from django.shortcuts import render
from django.urls import reverse_lazy
from django.views import generic
from .forms import CustomUserCreationForm
# Create your views here.
class SignUpView(generic.CreateView):
form_class = CustomUserCreationForm
success_url = reverse_lazy('login')
template_name = 'signup.html'
|
[
"[email protected]"
] | |
0b9316b1cd3f2f756c659bfeef922ef8efed975a
|
f064b7f41011d14191d994a2f917cbb57da40770
|
/tutorial 4_Variables.py
|
40856496a035c157980fb86ff4812ddf30a47b2e
|
[] |
no_license
|
naveensambandan11/PythonTutorials
|
5980f77def0e4f040852bb16d8fcd58a3f4767ec
|
668ba99dd81a494e1072f5fdc10b8441078835d2
|
refs/heads/master
| 2022-04-20T04:47:09.589115 | 2020-04-20T07:28:21 | 2020-04-20T07:28:21 | 257,203,001 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 950 |
py
|
Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 22:45:29) [MSC v.1916 32 bit (Intel)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> x=6
>>> y=5
>>> x+5
11
>>> x+y
11
>>> print x + ('apples')
SyntaxError: invalid syntax
>>> print(x + 'apples')
Traceback (most recent call last):
File "<pyshell#5>", line 1, in <module>
print(x + 'apples')
TypeError: unsupported operand type(s) for +: 'int' and 'str'
>>> name = 'naveen'
>>> x+name
Traceback (most recent call last):
File "<pyshell#7>", line 1, in <module>
x+name
TypeError: unsupported operand type(s) for +: 'int' and 'str'
>>> name2 = 'mage'
>>> name + name2
'naveenmage'
>>> name2+name
'magenaveen'
>>> x+y
11
>>> _+4
15
>>> name[0:2]
'na'
>>> name2[1:3]
'ag'
>>> name[2:]
'veen'
>>> name2[-3]
'a'
>>> name[3]
'e'
>>> len(name)
6
>>> len(name2)
4
>>> len(name)+len(name2)
10
>>> len(name)-2
4
>>>
|
[
"[email protected]"
] | |
6a1d507ecf64c91e7e343636b91b18096cc98ccf
|
39b9c3c223cb1a48fb2386c2166c2601d0301a2b
|
/qtTest.py
|
5f8ea21f8bf10687645ea16db6f83898879cd3b2
|
[] |
no_license
|
AlexCornish/BLSFormatter
|
98da607fd8fac50d6d468aad2dfa55f5268f898d
|
5de9ec211fda3eee597f1fd1149921210c4a16d5
|
refs/heads/main
| 2023-02-07T06:50:07.042423 | 2020-12-31T19:21:50 | 2020-12-31T19:21:50 | 325,335,500 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,639 |
py
|
import sys
import os
import commodity
import industry
from PyQt5.QtWidgets import QApplication,QLineEdit,QPushButton,QCheckBox,QWidget, QVBoxLayout,QHBoxLayout,QLabel, QRadioButton,QGridLayout, QButtonGroup, QFileDialog
import webbrowser
class GUI(QWidget):
def __init__(self):
super(GUI,self).__init__()
self.win = QWidget()
self.win.setFixedSize(400,340)
self.win.setWindowTitle("BLS Request")
hbox = QHBoxLayout()
label = QLabel(self.win)
label.setText("Which BLS dataset would you like to format? ")
vbox = QVBoxLayout()
hbox1 = QHBoxLayout()
bg1 = QButtonGroup(self.win)
rb1 = QRadioButton("Commodity", self.win)
rb2 = QRadioButton("Industry", self.win)
hbox2 = QHBoxLayout()
bg2 = QButtonGroup(self.win)
label1 = QLabel(self.win)
label1.setText("Time Period Format: ")
label1.adjustSize()
rb4 = QRadioButton("Yearly", self.win)
rb5 = QRadioButton("Quarterly", self.win)
rb6 = QRadioButton("Monthly", self.win)
rb6.setChecked(True)
label3 = QLabel(self.win)
label3.setText("Other Options: ")
label3.adjustSize()
# c1-c3 must be disabled if not monthly
c1 = QCheckBox("Drop M13")
c2 = QCheckBox("Format Time Period as YYYY-MM-01")
c3 = QCheckBox("Add Seasonal Codes")
rb4.toggled.connect(lambda:disableMonthly(c1,c2,c3,c4))
rb5.toggled.connect(lambda:disableMonthly(c1,c2,c3,c4))
rb6.toggled.connect(lambda:enableMonthly(c1,c2,c3,c4))
c4 = QCheckBox("Add Year-Over-Year Changes")
c5 = QCheckBox("Add Percentage Changes between Periods")
c6 = QCheckBox("Add Labels for Each Level")
c7 = QCheckBox("Split ID values")
c8 = QCheckBox("Format Dataframe Wide")
hbox3 = QHBoxLayout()
submitButton = QPushButton("Save")
submitButton.setEnabled(False)
submitButton.setCheckable(True)
rb1.toggled.connect(lambda:submitButton.setEnabled(True))
rb2.toggled.connect(lambda:submitButton.setEnabled(True))
submitButton.toggled.connect(lambda:performDataFuncs(rb1, rb2,rb4.isChecked(),rb5.isChecked(),rb6.isChecked(),c1.isChecked(),c2.isChecked(),c3.isChecked(),c4.isChecked(),c5.isChecked(),c6.isChecked(),c7.isChecked(),c8.isChecked()))
hbox3.addWidget(submitButton)
bg1.addButton(rb1)
bg1.addButton(rb2)
bg2.addButton(rb4)
bg2.addButton(rb5)
bg2.addButton(rb6)
hbox.addWidget(label)
helpButton = QPushButton("HELP")
helpButton.clicked.connect(lambda: webbrowser.open('https://github.com/AlexCornish/BLSFormatter/blob/main/readMe.md',new=1))
hbox.addWidget(helpButton)
hbox1.addWidget(rb1)
hbox1.addWidget(rb2)
hbox2.addWidget(rb4)
hbox2.addWidget(rb5)
hbox2.addWidget(rb6)
vbox.addLayout(hbox)
vbox.addLayout(hbox1)
vbox.addWidget(label1)
vbox.addLayout(hbox2)
vbox.addWidget(label3)
vbox.addWidget(c1)
vbox.addWidget(c2)
vbox.addWidget(c3)
vbox.addWidget(c4)
vbox.addWidget(c5)
vbox.addWidget(c6)
vbox.addWidget(c7)
vbox.addWidget(c8)
vbox.addLayout(hbox3)
self.win.setLayout(vbox)
self.win.show()
def disableMonthly(c1,c2,c3,c4):
c1.setEnabled(False)
c1.setChecked(False)
c2.setEnabled(False)
c2.setChecked(False)
c3.setEnabled(False)
c3.setChecked(False)
c4.setEnabled(False)
c4.setChecked(False)
def enableMonthly(c1,c2,c3,c4):
c1.setEnabled(True)
c2.setEnabled(True)
c3.setEnabled(True)
c4.setEnabled(True)
def performDataFuncs(wpRB, pcRB,yearly,quarterly,monthly,c1,c2,c3,c4,c5,c6,c7,c8):
if wpRB.isChecked():
inputArr = [yearly,quarterly,monthly,c1,c2,c3,c4,c5,c6,c7,c8]
data = commodity.wpProcessing(inputArr)
openFileSaveAs(data)
elif pcRB.isChecked():
inputArr = [yearly,quarterly,monthly,c1,c2,c3,c4,c5,c6,c7,c8]
data = industry.pcProcessing(inputArr)
openFileSaveAs(data)
def openFileSaveAs(data):
dialogue = QFileDialog()
result = dialogue.getSaveFileName(dialogue, "Save File",filter="*csv")
writePath = result[0] + ".csv"
data.to_csv(writePath,index=False)
app = QApplication([])
if __name__ == "__main__":
win = GUI()
win.show()
app.exec_()
|
[
"[email protected]"
] | |
3ddb09c649bea4922aeb00e020172bdd08f3f780
|
b2628e2c583928a52fd261a114f608c299796f7e
|
/leetcode/lessons/array/370_range_addition/__init__.py
|
71296b5620b19be26f5802dc631e2d0e2919dde5
|
[
"MIT",
"Python-2.0"
] |
permissive
|
wangkuntian/leetcode
|
22f0326453650898c3680d36bbb3053bba78fa11
|
e8dc9c8032c805a7d071ad19b94841ee8e52e834
|
refs/heads/master
| 2022-02-20T18:08:41.953746 | 2022-02-17T01:35:33 | 2022-02-17T01:35:33 | 221,393,840 | 0 | 0 |
MIT
| 2021-11-19T01:52:30 | 2019-11-13T07:01:28 |
Python
|
UTF-8
|
Python
| false | false | 3,256 |
py
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__project__ = 'leetcode'
__file__ = '__init__.py.py'
__author__ = 'king'
__time__ = '2022/2/14 19:15'
_ooOoo_
o8888888o
88" . "88
(| -_- |)
O\ = /O
____/`---'\____
.' \\| |// `.
/ \\||| : |||// \
/ _||||| -:- |||||- \
| | \\\ - /// | |
| \_| ''\---/'' | |
\ .-\__ `-` ___/-. /
___`. .' /--.--\ `. . __
."" '< `.___\_<|>_/___.' >'"".
| | : `- \`.;`\ _ /`;.`/ - ` : | |
\ \ `-. \_ __\ /__ _/ .-` / /
======`-.____`-.___\_____/___.-`____.-'======
`=---='
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
佛祖保佑 永无BUG
"""
from typing import List
from leetcode.utils.timeutils import time_interval
"""
难度:中等
假设你有一个长度为n的数组,初始情况下所有的数字均为0,你将会被给出k个更新的操作。
其中,每个操作会被表示为一个三元组:[startIndex, endIndex, inc],
你需要将子数组 A[startIndex ... endIndex](包括 startIndex 和 endIndex)增加 inc。
请你返回k次操作后的数组。
示例:
输入: length = 5, updates = [[1,3,2],[2,4,3],[0,2,-2]]
输出: [-2,0,3,5,3]
解释:
初始状态:
[0,0,0,0,0]
进行了操作 [1,3,2] 后的状态:
[0,2,2,2,0]
进行了操作 [2,4,3] 后的状态:
[0,2,5,5,3]
进行了操作 [0,2,-2] 后的状态:
[-2,0,3,5,3]
Related Topics 数组 差分数组
"""
class Difference:
def __init__(self, nums):
self.diff = [0] * len(nums)
self.diff[0] = nums[0]
for i in range(1, len(nums)):
self.diff[i] = nums[i] - nums[i - 1]
def increment(self, i, j, val):
self.diff[i] += val
if j + 1 < len(self.diff):
self.diff[j + 1] -= val
def result(self):
r = [0] * len(self.diff)
r[0] = self.diff[0]
for i in range(1, len(self.diff)):
r[i] = r[i - 1] + self.diff[i]
return r
class Solution:
@staticmethod
@time_interval
def getModifiedArray(length: int, updates: List[List[int]]) -> List[int]:
if length <= 0:
return []
result = [0] * (length + 1)
for start, end, inc in updates:
result[start] += inc
result[end + 1] -= inc
for i in range(1, length):
result[i] += result[i - 1]
return result[:-1]
@staticmethod
@time_interval
def getModifiedArray2(length: int, updates: List[List[int]]) -> List[int]:
nums = [0] * length
d = Difference(nums)
for start, end, inc in updates:
d.increment(start, end, inc)
return d.result()
updates = [[1, 3, 2], [2, 4, 3], [0, 2, -2]]
s = Solution()
print(s.getModifiedArray(5, updates))
print(s.getModifiedArray2(5, updates))
|
[
"[email protected]"
] | |
17cab5b93cd1dec250015db0bddaadb9015ff413
|
c6ed7cf05f46db66fa71ee5c251954f7fcdab67e
|
/object-oriented-program.py
|
d314056817abdf72d28b09c5d6e9ac4d4f9c59d9
|
[] |
no_license
|
KFranciszek/python-tutorial
|
84cfbbe55b3ef13418d79ab323b499a24362aa77
|
575a334a6b4b0c591194c57d104bdd710d5d6478
|
refs/heads/master
| 2020-12-31T07:18:30.196411 | 2016-03-10T15:47:52 | 2016-03-10T15:47:52 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 231 |
py
|
class exampleClass:
eyes="blue"
age=22
def thisMethod(self):
return 'hey this method worked'
print exampleClass
exampleObject=exampleClass()
print exampleObject.eyes
print exampleObject.age
print exampleObject.thisMethod()
|
[
"[email protected]"
] | |
7503c66bb83cc8fac576b2e0b803de64e6108c4c
|
d95a9767f5de4f1951ad9faa5b701089f0e7e187
|
/teachers inter2.py
|
ee3150a56515d31f39c6ae89c34566cb7d6d8112
|
[] |
no_license
|
TomSadeh/Teachers_PIAAC
|
4df72e766c400906cc5ef119b5a8674adde0bd6a
|
0f078b8f1cdf11404c0dd359c73b5177f15afb5f
|
refs/heads/main
| 2023-03-12T12:02:08.804731 | 2021-02-21T22:06:34 | 2021-02-21T22:06:34 | 341,014,208 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,444 |
py
|
import pandas as pd
import numpy as np
import math
import matplotlib.pyplot as plt
def invert(string):
"""
A function which invert a string.
Parameters
----------
string : string
the string to invert.
Returns
-------
string
An inverted string.
Required libraries
------------------
None.
"""
return string[::-1]
def weighted_median(data, weights, interpolate = False):
"""
A function that calculates the weighted median of a given series of values
by using a series of weights.
Parameters
----------
data : Iterable
The data which the function calculates the median for.
weights : Iterable
The weights the function uses to calculate an weighted median.
Returns
-------
numpy.float64
The function return the weighted median.
Required libraries
---------
Numpy.
"""
#Forcing the data to a numpy array.
data = np.array(data)
weights = np.array(weights)
#Sorting the data and the weights.
ind_sorted = np.argsort(data)
sorted_data = data[ind_sorted]
sorted_weights = weights[ind_sorted]
#Calculating the cumulative sum of the weights.
sn = np.cumsum(sorted_weights)
#Calculating the threshold.
threshold = sorted_weights.sum()/2
#Interpolating the median and returning it.
if interpolate == True:
return np.interp(0.5, (sn - 0.5 * sorted_weights) / np.sum(sorted_weights), sorted_data)
#Returning the first value that equals or larger than the threshold.
else:
return sorted_data[sn >= threshold][0]
#Importing the countries csv and creating an empty DataFrame to contain the results.
df_countries = pd.read_csv(r'')
results = pd.DataFrame()
for country in df_countries['Country']:
#Excluding countries that don't have profession data.
if country in ['Austria', 'Canada', 'Estonia', 'Finland', 'United Kingdoms']:
continue
#Reading a file and dropping irrelevant rows.
df = pd.read_csv(r''+str(country)+'.csv', low_memory = False)
if country in ['France', 'Italy', 'Spain']:
df.dropna(subset = ['PVLIT1', 'PVNUM1'], inplace = True)
else:
df.dropna(subset = ['PVLIT1', 'PVNUM1', 'PVPSL1'], inplace = True)
#Slicing the teachers.
dfe = df[df['ISCO2C'] == '23'].copy()
dfe = dfe[dfe['B_Q01a'].isin(['12'])]
dfn = df[df['ISCO2C'] != '23'].copy()
dfn = dfn[dfn['B_Q01a'].isin(['12'])]
#If the sample is too small, skip.
if len(dfe) < 50:
continue
#Saving the median of each skill.
results.loc[country, 'Lit Median'] = weighted_median(dfe['PVLIT1'], dfe['SPFWT0'], interpolate = True)
results.loc[country, 'Num Median'] = weighted_median(dfe['PVNUM1'], dfe['SPFWT0'], interpolate = True)
results.loc[country, 'PSL Median'] = weighted_median(dfe['PVPSL1'], dfe['SPFWT0'], interpolate = True)
#Creating the figure.
results.loc[:, 'Color'] = 'tab:blue'
results.loc['Israel', 'Color'] = 'tab:orange'
fig, axes = plt.subplots(3, 1, figsize = (10,20), tight_layout = True)
results.sort_values(by = ['Lit Median'], inplace = True)
axes[0].bar(results.index, results['Lit Median'], color = results['Color'])
axes[0].set_xticklabels(labels = results.index, rotation = 'vertical', fontsize = 14)
results.sort_values(by = ['Num Median'], inplace = True)
axes[1].bar(results.index, results['Num Median'], color = results['Color'])
axes[1].set_xticklabels(labels = results.index, rotation = 'vertical', fontsize = 14)
results.sort_values(by = ['PSL Median'], inplace = True)
results.drop(['Spain', 'Italy', 'France'], inplace = True)
axes[2].bar(results.index, results['PSL Median'], color = results['Color'])
axes[2].set_xticklabels(labels = results.index, rotation = 'vertical', fontsize = 14)
fig.suptitle(invert('ביצועי מורים במבחני CAAIP ביחס לאנשים בעלי השכלה זהה'), x = 0.5, y = 1.02, fontsize = 20)
for i in range(0,3):
axes[i].set_ylabel(invert('יחס הציונים'), fontsize = 14)
axes[0].set_title(invert('אורינות מילולית'), fontsize = 14)
axes[1].set_title(invert('אורינות כמותית'), fontsize = 14)
axes[2].set_title(invert('פתרון בעיות'), fontsize = 14)
|
[
"[email protected]"
] | |
464ffd8a81e68c2134ff5fb1d332fa541231862b
|
b85ab4b1199e7b36d50b25269a56cc21acac2722
|
/scripts/demultiplex_casava_fastq.py
|
5f0935d16a0b842aa00cf217045c7db5d75ad1d1
|
[
"MIT",
"GPL-1.0-or-later"
] |
permissive
|
SciLifeLab/scilifelab
|
20e2ab7b53200d3d4d98c2f34d21d516c0d9de83
|
79960f7042118f900bd1eaabe4902ee76abd8020
|
refs/heads/master
| 2020-12-29T03:07:03.365483 | 2019-01-22T09:32:43 | 2019-01-22T09:32:43 | 4,002,536 | 1 | 6 |
MIT
| 2020-02-17T12:09:12 | 2012-04-12T07:58:18 |
Python
|
UTF-8
|
Python
| false | false | 1,339 |
py
|
"""Demultiplex a CASAVA 1.8+ FastQ file based on the information in the fastq header
"""
import sys
import argparse
from scilifelab.utils.fastq_utils import FastQParser, is_read_pair
def demultiplex_fastq(index, fastq1, fastq2):
filter = {'index': index}
fp1 = FastQParser(fastq1,filter)
if fastq2 is not None:
fp2 = FastQParser(fastq2,filter)
for r1 in fp1:
if fastq2 is not None:
r2 = fp2.next()
assert is_read_pair(r1,r2), "Mismatching headers for expected read pair"
sys.stderr.write("{}\n".format("\n".join(r2)))
sys.stdout.write("{}\n".format("\n".join(r1)))
def main():
parser = argparse.ArgumentParser(description="Demultiplex a CASAVA 1.8+ FastQ file based on the information in the fastq header")
parser.add_argument('fastq1', action='store',
help="FastQ file to demultiplex")
parser.add_argument('-f','--fastq2', action='store', default=None,
help="Optional paired FastQ file to demultiplex")
parser.add_argument('index', action='store',
help="Index sequence to demultiplex on")
args = parser.parse_args()
demultiplex_fastq(args.index,args.fastq1,args.fastq2)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
a5f68b739dca61aab1451a4a8f934959b269946f
|
ced555353fd2ac54654274b3a8aa84680cb78c01
|
/0425/42502.py
|
58843b70bb958b731ed13e9d3dd836ebc57fa463
|
[] |
no_license
|
aw6629/pratice
|
69801dfef5ad98b7da3627f234e1fa4351a41cd2
|
0981e50fd42123391e1a48890cebf638c2e24d6b
|
refs/heads/main
| 2022-12-24T22:02:55.045505 | 2020-10-05T08:17:59 | 2020-10-05T08:17:59 | 301,325,003 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 450 |
py
|
x=int(input('input len'))
y=int(input('input wid'))
class rect:
def __init__(self,len,wid):
self.__len = x
self.__wid = y
def area(self):
print('面積=',x*y)
def perimiter(self):
print('周長=',2*x+2*y)
class squa(rect):
def __init__(self,x):
super().__init__(x,x)
d=squa()
if x==y :
print(d.area())
print(d.perimiter())
else:
print(rect.area())
print(rect.perimiter())
|
[
"[email protected]"
] | |
e043ac5e94b702c4a8dff5d511c9e222e9602a5f
|
629a7acf0a16a1a939e59bcf1128340fb724eac0
|
/optimization/Optimizers/LinearModelOptimize/LinearOptimize_scale.py
|
25ab67da7b903798be4b07a67216e6a12a1acf73
|
[] |
no_license
|
qingswu/2020_staj
|
c1f66b26c8c6d71bb167988392b8e5f346c5b64e
|
93365eb461ed8a0897267c71d6935279824b391f
|
refs/heads/master
| 2022-12-16T08:49:35.815702 | 2020-09-09T23:53:54 | 2020-09-09T23:53:54 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,651 |
py
|
import torch
import Tracker
import torch.optim as optim
import math
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.append("../../Data/Scenarios")
sys.path.append("../../Data/Train")
import trainData
dataPacks = trainData.trainDataPacks
dtype_torch = torch.float64
torch.autograd.set_detect_anomaly(True)
#helper functions
def calculateLoss(groundTruth, predictionMeasured):
diff = groundTruth - predictionMeasured
return torch.mm(diff.T, diff)
#to optimize
#########################################
scale = torch.tensor([
7
], dtype= dtype_torch, requires_grad=True)
def getProcessNoiseCov():
return torch.tensor([
[ 0.0251, 0.0219, -0.0072, -0.0054],
[ 0.0219, 0.0199, -0.0064, -0.0049],
[-0.0072, -0.0064, 0.0023, 0.0016],
[-0.0054, -0.0049, 0.0016, 0.0017]
], dtype=dtype_torch) * scale
#########################################
learningRate = 0.5
sequenceLength = 100
optimizer = optim.Adam([scale], lr = learningRate )
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience = 10, factor=0.5, verbose=True)
Tracker.ProcessNoiseCov = getProcessNoiseCov()
dt = 0.1
minimumSliceCount = float("inf")
batchSize = 25
initialThreshold = math.ceil(50 / sequenceLength)
losses_ = []
for dataPack in dataPacks:
sliceCount = int(len(dataPack[0]) / sequenceLength)
if(minimumSliceCount > sliceCount):
minimumSliceCount = sliceCount
for epoch in range(100):
losses = []
trackers = []
for _ in dataPacks:
trackers.append(Tracker.Tracker_SingleTarget_SingleModel_Linear_allMe())
for s in range(minimumSliceCount):
totalLoss = 0
batches = []
for b in range(math.ceil(len(dataPacks)/batchSize)):
if((b+1)*batchSize > batchSize):
batches.append(dataPacks[b*batchSize : ])
else:
batches.append(dataPacks[b*batchSize : (b+1)*batchSize])
for b, batch in enumerate(batches):
loss = 0
for l, dataPack in enumerate(batch):
tracker = trackers[b*batchSize + l]
measurementPacks = dataPack[0][s*sequenceLength: (s+1) * sequenceLength]
groundTruthPacks = dataPack[1][s*sequenceLength: (s+1) * sequenceLength]
for i, (measurementPack, groundTruthPack) in enumerate(zip(measurementPacks, groundTruthPacks)):
z = tracker.feedMeasurement(torch.from_numpy(measurementPack[0]), dt)
if(z is not None and s >= initialThreshold):
loss += calculateLoss(torch.from_numpy(groundTruthPack[0]), z) / len(dataPacks) / sequenceLength
if(loss != 0):
loss.backward()
totalLoss += loss.item()
optimizer.step()
optimizer.zero_grad()
for l, _ in enumerate(batch):
tracker = trackers[b*batchSize + l]
tracker.detachTrack()
Tracker.ProcessNoiseCov = getProcessNoiseCov()
if(s == initialThreshold):
print(totalLoss)
print(scale)
scheduler.step(totalLoss)
losses.append(totalLoss)
losses_.append(losses)
lossesPlot = np.array(losses_)
plt.plot(lossesPlot[:,-1])
|
[
"[email protected]"
] | |
5d60feb8769d1485490b5ef335ee7b4911efa151
|
ffc550bca3253e9ec50eb4f9dee2822863414a6d
|
/Lab08/signals.py
|
40336fac9d7d2afa48d381a1fb69e2be454fae05
|
[] |
no_license
|
WangyuanhaoNB/ECE364
|
0841bbd9065c2673182cdd9b8ab49bd7db48ab38
|
0f0690477dbd307ad4dcb7283242ddb1e7c5b462
|
refs/heads/master
| 2020-05-03T07:41:28.000166 | 2017-12-13T01:13:48 | 2017-12-13T01:13:48 | 149,058,137 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,548 |
py
|
import moduleTasks
#Part III
def loadMultiple(signalNames, folderName, maxCount):
d ={}
for signalName in signalNames:
try:
list_vals , num_non_vals = moduleTasks.loadDataFrom(signalName, folderName)
except (OSError,ValueError):
d[signalName] = None
else:
if num_non_vals <= maxCount:
d[signalName] = list_vals
else:
d[signalName] = []
return d
def saveData(signalsDictionary, targetFolder, bounds, threshold):
for k,v in signalsDictionary.items():
try:
bound = moduleTasks.isBounded(v, bounds, threshold)
except ValueError:
pass
else:
if bound:
new_filename = k + ".txt"
path_string = targetFolder + "/" + new_filename
with open(path_string,"w") as myFile:
for val in v[:-1]:
final_str = "{:.3f}\n".format(val)
myFile.write(final_str)
final_str = "{:.3f}".format(v[-1])
myFile.write(final_str)
else:
pass
if __name__ == "__main__":
pass
#d = loadMultiple(["AFW-481","CIG-308","FPT-701"], "Signals", 10)
#for k in d.keys():
# print(len(d[k]))
#d["CIG-308"] = []
#for k in d.keys():
# print(len(d[k]))
#list1 , num1 = moduleTasks.loadDataFrom("FPT-701", "Signals")
#print(num1)
#saveData(d, "Signals2", (0.00,100.00), 1000)
|
[
"[email protected]"
] | |
294ae3a7c890e586c2da0d3b832995c6a1a6de4c
|
9407457c5724316cf7019a87adbde8de5a5ab4c5
|
/reconstruct-original-digits-from-english/solution.py
|
f9cb9ec958df247b2fb4031a761a5b22310748d7
|
[] |
no_license
|
andriitugai/Code_Challenges
|
7f3b7e344341b7ec736b32e93e21eb01f49d698c
|
d4b8e016c0117df4614e715e8b172f4abaa646fc
|
refs/heads/master
| 2023-04-19T03:42:59.807354 | 2021-05-04T13:34:16 | 2021-05-04T13:34:16 | 351,730,724 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 804 |
py
|
class Solution:
def originalDigits(self, s: str) -> str:
def decremental(num, word, control_char):
n = s_counts.get(control_char, 0)
if n:
for c in word:
s_counts[c] -= n
result.extend([str(num)] * n)
s_counts = {}
for c in s:
s_counts[c] = s_counts.get(c, 0) + 1
result = []
decremental(6, "six", "x")
decremental(7, "seven", "s")
decremental(0, "zero", "z")
decremental(2, "two", "w")
decremental(5, "five", "v")
decremental(4, "four", "f")
decremental(8, "eight", "g")
decremental(9, "nine", "i")
decremental(3, "three", "h")
decremental(1, "one", "n")
return ''.join(sorted(result))
|
[
"[email protected]"
] | |
7736ed51fe1a1691133e354fb1c1d6372fd47acf
|
4a238068e29a1f6871cc049a0486b20b27e781de
|
/Habana/benchmarks/resnet/implementations/resnet-tf-sys-420gh-tngr/TensorFlow/computer_vision/Resnets/utils/optimizers/keras/lars_optimizer.py
|
4c64f2a9dc780522a575d18bc7c554999bcaf59b
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Pixelz-Inc/training_results_v1.0
|
61b4555ad482b189d1966be3edd127858addd628
|
c507130c4e04c1f274a9ae8b7284aac79f26325a
|
refs/heads/master
| 2023-08-18T22:46:13.316503 | 2021-10-22T04:01:57 | 2021-10-22T04:01:57 | 399,047,712 | 0 | 0 |
NOASSERTION
| 2021-08-23T09:37:25 | 2021-08-23T09:37:25 | null |
UTF-8
|
Python
| false | false | 9,194 |
py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Layer-wise Adaptive Rate Scaling optimizer for large-batch training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend_config
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import training_ops
from tensorflow.python.ops import state_ops
class LARSOptimizer(optimizer_v2.OptimizerV2):
"""Layer-wise Adaptive Rate Scaling for large batch training.
Introduced by "Large Batch Training of Convolutional Networks" by Y. You,
I. Gitman, and B. Ginsburg. (https://arxiv.org/abs/1708.03888)
Implements the LARS learning rate scheme presented in the paper above. This
optimizer is useful when scaling the batch size to up to 32K without
significant performance degradation. It is recommended to use the optimizer
in conjunction with:
- Gradual learning rate warm-up
- Linear learning rate scaling
- Poly rule learning rate decay
Note, LARS scaling is currently only enabled for dense tensors. Sparse tensors
use the default momentum optimizer.
"""
def __init__(
self,
learning_rate,
momentum=0.9,
weight_decay=0.0001,
# The LARS coefficient is a hyperparameter
eeta=0.001,
epsilon=0.0,
name="LARSOptimizer",
# Enable skipping variables from LARS scaling.
# TODO(sameerkm): Enable a direct mechanism to pass a
# subset of variables to the optimizer.
skip_list=None,
use_nesterov=False,
**kwargs):
"""Construct a new LARS Optimizer.
Args:
learning_rate: A `Tensor`, floating point value, or a schedule that is a
`tf.keras.optimizers.schedules.LearningRateSchedule`, or a callable
that takes no arguments and returns the actual value to use. The
learning rate.
momentum: A floating point value. Momentum hyperparameter.
weight_decay: A floating point value. Weight decay hyperparameter.
eeta: LARS coefficient as used in the paper. Dfault set to LARS
coefficient from the paper. (eeta / weight_decay) determines the highest
scaling factor in LARS.
epsilon: Optional epsilon parameter to be set in models that have very
small gradients. Default set to 0.0.
name: Optional name prefix for variables and ops created by LARSOptimizer.
skip_list: List of strings to enable skipping variables from LARS scaling.
If any of the strings in skip_list is a subset of var.name, variable
'var' is skipped from LARS scaling. For a typical classification model
with batch normalization, the skip_list is ['batch_normalization',
'bias']
use_nesterov: when set to True, nesterov momentum will be enabled
**kwargs: keyword arguments.
Raises:
ValueError: If a hyperparameter is set to a non-sensical value.
"""
if momentum < 0.0:
raise ValueError("momentum should be positive: %s" % momentum)
if weight_decay < 0.0:
raise ValueError("weight_decay should be positive: %s" % weight_decay)
super(LARSOptimizer, self).__init__(name=name, **kwargs)
self._set_hyper("learning_rate", learning_rate)
# When directly using class members, instead of
# _set_hyper and _get_hyper (such as learning_rate above),
# the values are fixed after __init(), and not being
# updated during the training process.
# This provides better performance but less flexibility.
self.momentum = momentum
self.weight_decay = weight_decay
self.eeta = eeta
self.epsilon = epsilon or backend_config.epsilon()
self._skip_list = skip_list
self.use_nesterov = use_nesterov
def _prepare_local(self, var_device, var_dtype, apply_state):
lr_t = self._get_hyper("learning_rate", var_dtype)
local_step = math_ops.cast(self.iterations, var_dtype)
lr_t = math_ops.cast(lr_t(local_step), var_dtype)
learning_rate_t = array_ops.identity(lr_t)
apply_state[(var_device, var_dtype)].update(
dict(
learning_rate=learning_rate_t,
))
def _create_slots(self, var_list):
for v in var_list:
self.add_slot(v, "momentum")
def compute_lr(self, grad, var, coefficients):
scaled_lr = coefficients["learning_rate"]
if self._skip_list is None or not any(v in var.name
for v in self._skip_list):
w_norm = linalg_ops.norm(var, ord=2)
g_norm = linalg_ops.norm(grad, ord=2)
trust_ratio = array_ops.where(
math_ops.greater(w_norm, 0),
array_ops.where(
math_ops.greater(g_norm, 0),
(self.eeta * w_norm /
(g_norm + self.weight_decay * w_norm + self.epsilon)), 1.0), 1.0)
scaled_lr = coefficients["learning_rate"] * trust_ratio
# Add the weight regularization gradient
grad = grad + self.weight_decay * var
return scaled_lr, grad
def _apply_dense(self, grad, var, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = ((apply_state or {}).get((var_device, var_dtype))
or self._fallback_apply_state(var_device, var_dtype))
scaled_lr, grad = self.compute_lr(grad, var, coefficients)
mom = self.get_slot(var, "momentum")
return training_ops.apply_momentum(
var,
mom,
math_ops.cast(1.0, var.dtype.base_dtype),
grad * scaled_lr,
self.momentum,
use_locking=False,
use_nesterov=self.use_nesterov)
def _resource_apply_dense(self, grad, var, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = ((apply_state or {}).get((var_device, var_dtype))
or self._fallback_apply_state(var_device, var_dtype))
scaled_lr, grad = self.compute_lr(grad, var, coefficients)
mom = self.get_slot(var, "momentum")
# ============================================================
return training_ops.resource_apply_keras_momentum(
var.handle,
mom.handle,
scaled_lr,
grad,
self.momentum,
use_locking=False,
use_nesterov=self.use_nesterov)
# ============================================================
# ============================================================
# mom_t = mom * self.momentum - grad * scaled_lr
# mom_t = state_ops.assign(mom, mom_t, use_locking=False)
# if self.use_nesterov:
# var_t = var + mom_t * self.momentum - grad * scaled_lr
# else:
# var_t = var + mom_t
# return state_ops.assign(var, var_t, use_locking=False).op
# ============================================================
# Fallback to momentum optimizer for sparse tensors
def _apply_sparse(self, grad, var, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = ((apply_state or {}).get((var_device, var_dtype))
or self._fallback_apply_state(var_device, var_dtype))
mom = self.get_slot(var, "momentum")
return training_ops.sparse_apply_momentum(
var,
mom,
coefficients["learning_rate"],
grad.values,
grad.indices,
self.momentum,
use_locking=False,
use_nesterov=self.use_nesterov)
def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = ((apply_state or {}).get((var_device, var_dtype))
or self._fallback_apply_state(var_device, var_dtype))
mom = self.get_slot(var, "momentum")
return training_ops.resource_sparse_apply_keras_momentum(
var.handle,
mom.handle,
coefficients["learning_rate"],
grad,
indices,
self.momentum,
use_locking=False,
use_nesterov=self.use_nesterov)
def get_config(self):
config = super(LARSOptimizer, self).get_config()
config.update({
"learning_rate": self._serialize_hyperparameter("learning_rate"),
"momentum": self.momentum,
"weight_decay": self.weight_decay,
"eeta": self.eeta,
"epsilon": self.epsilon,
"use_nesterov": self.use_nesterov,
})
return config
|
[
"[email protected]"
] | |
a51d88e25ec1dc0494fa59f51cddc8b8f646c3ee
|
98df159a4efcbb43a07a3b983f8b30fd5a1d4d4b
|
/test-test.py
|
955b0701cc06da79ee991b87ecbbbb4207bdba08
|
[] |
no_license
|
Lumia720/L-layer-Neural-network
|
e6390020e4f2c67242899f6fefdca1796e8f55ae
|
9d05997d3411e54d4404a655239edef8b5b3fe8e
|
refs/heads/master
| 2020-08-06T22:21:06.760074 | 2019-10-06T14:04:06 | 2019-10-06T14:04:06 | 213,179,020 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 140 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 10 17:18:20 2019
@author: ttl80
"""
import numpy as np
A1 = [[3,3],[4,4]]
print(A1)
A1.list
|
[
"[email protected]"
] | |
ed266d2bd928f39ccd13797dd06884870914583f
|
54f95342dde22c2efe5e44e2edfecd13bfd98787
|
/ejercicios_obligatorios/ejercicio_2.py
|
0820693f943d403cb626469bbec1702419d07240
|
[] |
no_license
|
LucianoBartomioli/-EDU-POO_IRESM_2021
|
3293e98aada2ae8bd3221991a9ac95200f0541bd
|
dfaca205c31b95cf784cd8c04ae8060c9c3950da
|
refs/heads/main
| 2023-04-14T20:44:10.449823 | 2021-05-01T22:13:44 | 2021-05-01T22:13:44 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,550 |
py
|
nombre_ciclista = ""
ciclistas = []
while nombre_ciclista != "0":
nombre_ciclista = input("Ingrese el nombre del ciclista: ")
if nombre_ciclista != "0":
tiempo_de_carrera = float(input("Ingrese el tiempo de carrera: "))
if tiempo_de_carrera < 0:
while tiempo_de_carrera < 0:
print("¡El tiempo de carrera no puede ser negativo!")
tiempo_de_carrera = input("Ingrese nuevamente el tiempo de carrera: ")
ciclista_lista = [nombre_ciclista, tiempo_de_carrera]
ciclistas.append(ciclista_lista)
tiempo_record = int(input("Ingrese el tiempo record: "))
cantidad_ciclistas = len(ciclistas)
mejor_tiempo = 999999999
suma_tiempos = 0
for ciclista in ciclistas:
suma_tiempos = suma_tiempos + ciclista[1]
tiempo_ciclista = ciclista[1]
if ciclista[1] < mejor_tiempo:
mejor_tiempo = ciclista[1]
ciclista_ganador = [ciclista[0], mejor_tiempo]
print()
print(f"El ciclista ganador es {ciclista_ganador[0]} con un tiempo de {ciclista_ganador[1]}")
print()
if ciclista_ganador[1]< tiempo_record:
print(f"El ciclista {ciclista_ganador[0]} supero el tiempo record de {tiempo_record}")
elif ciclista_ganador[1]== tiempo_record:
print(f"El ciclista {ciclista_ganador[0]} igualó el tiempo record de {tiempo_record}")
else:
print(f"El ciclista {ciclista_ganador[0]} supero el tiempo record de {tiempo_record}")
tiempo_promedio_ciclistas = suma_tiempos / len(ciclistas)
print(f"El promedio de tiempo de carrera es {tiempo_promedio_ciclistas}")
|
[
"[email protected]"
] | |
b7935778e4af05b4794433f47991deced92fb943
|
d9a469bc9cff39d89e7cb04e4fc537763aee9aca
|
/binance_chain/exceptions.py
|
957d3ed87c3cd1eb28ab1f816979271c6ed5ca5f
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
sammchardy/python-binance-chain
|
d017c0f1e6bd84d28017f87e2d229b21a2ee8b8a
|
19d7d639cc912a27ec86831338c2a2dc96289d50
|
refs/heads/master
| 2023-05-11T19:15:44.912507 | 2021-06-01T03:14:25 | 2021-06-01T03:14:25 | 172,324,144 | 310 | 111 |
MIT
| 2022-06-30T10:55:19 | 2019-02-24T10:29:29 |
Python
|
UTF-8
|
Python
| false | false | 1,626 |
py
|
import ujson as json
class BinanceChainAPIException(Exception):
def __init__(self, response, status_code):
self.code = 0
try:
json_res = json.loads(response.content)
except ValueError:
if not response.content:
self.message = status_code
else:
self.message = 'Invalid JSON error message from Binance Chain: {}'.format(response.text)
else:
self.code = json_res.get('code', None)
self.message = json_res['message']
self.status_code = status_code
self.response = response
self.request = getattr(response, 'request', None)
def __str__(self): # pragma: no cover
return f'APIError(code={self.code}): {self.message}'
class BinanceChainRequestException(Exception):
pass
class BinanceChainBroadcastException(Exception):
pass
class BinanceChainSigningAuthenticationException(Exception):
pass
class BinanceChainRPCException(Exception):
def __init__(self, response):
self.code = 0
try:
json_res = json.loads(response.content)
except ValueError:
self.message = 'Invalid JSON error message from Binance Chain: {}'.format(response.text)
else:
self.code = json_res['error']['code']
self.message = json_res['error']['message']
self.status_code = response.status_code
self.response = response
self.request = getattr(response, 'request', None)
def __str__(self): # pragma: no cover
return f'RPCError(code={self.code}): {self.message}'
|
[
"[email protected]"
] | |
779a661995aadef8a57f6133081f9846279fb570
|
885bb5d50bb5a3433b6a571e934347c61f1427f1
|
/hw5/task1.py
|
6afd468b7668aef541cfa4596dde33b9291f7738
|
[] |
no_license
|
abhi-phoenix/Data-mining
|
824f0c54c77f668599ebf6627e0f223770a53d08
|
601c3a1c236fb02966a3af19b114005ddaa81279
|
refs/heads/main
| 2023-07-25T12:34:07.749911 | 2021-09-09T07:40:47 | 2021-09-09T07:40:47 | 404,628,647 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,317 |
py
|
from pyspark import SparkContext, SparkConf
from pyspark.sql import SparkSession
import sys
import json
import re
import collections
import time
import math
import random
from itertools import combinations
import binascii
import json
start = time.time()
def generate_prime(m,number_of_primes):
def prime(count):
end = int(math.sqrt(count))+1
for xx in range(2,end):
if count%xx == 0:
return False
return True
primes = []
count = m+1
while len(primes) != number_of_primes:
if prime(count) == True:
primes.append(count)
count+=1
else:
count+=1
return primes
def task1(sc,first_json, second_json, output_file):
def hash_func_tuples(length_users_all, hash_functions_count):
list_ = list(range(1,length_users_all))
alpha = random.sample(list_,hash_functions_count)
beta = random.sample(list_,hash_functions_count)
peta = generate_prime(length_users_all, hash_functions_count)
## HERE set
tuples = set(zip(alpha,beta,peta))
if len(tuples) != len(alpha):
while True:
a = random.sample(list_,hash_functions_count)
b = random.sample(list_,hash_functions_count)
p = generate_prime(length_users_all, hash_functions_count)
new_tuples = list(zip(alpha,beta,peta))
for i in new_tuples:
tuples.add(i)
if len(tuples) == len(alpha):
return list(tuples)
else:
return list(tuples)
"""
def get_signature(slice_row, tuples, length_users_all):
output = [99999999 for i in range(len(tuples))]
for user in slice_row:
for tup in range(len(tuples)):
hashfunction = ((tuples[tup][0]*user+tuples[tup][1])%tuples[tup][2])%length_users_all
if hashfunction<output[tup]:
output[tup] = hashfunction
return output
"""
def generate_hashes(x, hash_func_tuples,m):
funcs_all = []
for i in hash_func_tuples:
print(i[0], i[1], i[2])
val = ((i[0]*x+i[1])%i[2])%m
print(type(val))
funcs_all.append(val)
return funcs_all
hash_functions_count = 80
m = 1200
filter_arr = [0 for _ in range(m)]
textRDD = sc.textFile(first_json)
RDD_dictionary = textRDD.map(json.loads)
spark = SparkSession(sc)
textRDD2 = sc.textFile(second_json)
RDD_dictionary2 = textRDD2.map(json.loads)
#print(RDD_dictionary.collect())
mapping = RDD_dictionary.map(lambda x: x['name'])
mapping2 = RDD_dictionary2.map(lambda x: x['name'])
hashed_map = mapping.map(lambda x: int(binascii.hexlify(x.encode('utf8')),16))
hashed_map2 = mapping2.map(lambda x: int(binascii.hexlify(x.encode('utf8')),16))
hash_func_tuples = hash_func_tuples(m, hash_functions_count)
hash_funcs_map = mapping.map(lambda x: generate_hashes(x, hash_func_tuples,m))
for arr in hash_funcs_map.collect():
for hash_val_arr in arr:
filter_arr[hash_val_arr] = 1
hash_funcs_map2 = mapping2.map(lambda x: generate_hashes(x, hash_func_tuples,m))
with open(output_file, 'w+') as file:
for arr in hash_funcs_map2.collect():
###
flag = 1
for hash_val_arr in arr:
if filter_arr[hash_val_arr] != 1:
flag = 0
if flag ==1:
file.write('T'+'\t')
else:
file.write('F'+'\t')
file.close()
#print(int(binascii.hexlify(mapping.first().encode('utf8')),16))
"""
#mapping = RDD_dictionary.map(lambda x: (x['business_id'], x['user_id'])).distinct().filter(lambda x: x[0] != None and x[1] != None).map(lambda x: (x[0], {x[1]})) #.filter(lambda x: x[2]!= None)
business_user_group = RDD_dictionary.map(lambda x: (x['business_id'], x['user_id'])).groupByKey()
business_user = business_user_group.mapValues(set)
business_user_dict = business_user.collectAsMap()
#business_user2 = RDD_dictionary.map(lambda x: (x['business_id'], {x['user_id']})).reduceByKey(lambda a,b: a+b)
user = RDD_dictionary.map(lambda x: x['user_id']).distinct()
user_sorted = user.sortBy(lambda user_id: user_id)
users_all = user_sorted.collect()
## 2
dic_user = dict()
length_users_all = len(users_all)
indexes_users = list(range(length_users_all))
for user_index in range(len(users_all)):
dic_user[users_all[user_index]] = indexes_users[user_index]
business = RDD_dictionary.map(lambda x: x['business_id']).distinct()
business_sorted = user.sortBy(lambda business_id: business_id)
business_all = business_sorted.collect()
# 3
dic_business = dict()
length_business_all = len(business_all)
indexes_business = list(range(length_business_all))
for business_index in range(len(business_all)):
dic_business[business_index] = business_index
business_user_transform_char = business_user.map(lambda x: (x[0],[dic_user[i] for i in x[1]]))
tuples = hash_func_tuples(length_users_all, hash_functions_count)
signature_matr = business_user_transform_char.map(lambda x: (x[0],get_signature(x[1],tuples, length_users_all)))
print(signature_matr.collect())
Lsh_bands= signature_matr.flatMap(lambda x: LSH(x,bands, rows))
#print(Lsh_bands.collect())
reduce_lsh = Lsh_bands.groupByKey()
#print(reduce_lsh)
second_map_cleared = reduce_lsh.mapValues(list)
second_map_filtered = second_map_cleared.filter(lambda x:len(x[1])>1) #.= ? hash bands what, key? (band, tuple)
cands = second_map_filtered.flatMap(lambda x: list(combinations(x[1],2))).distinct()
#print(cands.collect())
jaccard_similarity = cands.map(lambda x: jaccard(x, business_user_dict)).filter(lambda x: x[2]>=0.055).collect() #.sort()
#print(jaccard_similarity)
print(jaccard_similarity)
#print(business_user.collect())
return jaccard_similarity
"""
return 1
if __name__ == '__main__':
if len(sys.argv[1:]) == 3:
first_json = sys.argv[1]
second_json = sys.argv[2]
output_file = sys.argv[3]
conf = (SparkConf()
.setMaster("local[*]")
.setAppName("My app")
.set("spark.executor.memory", "1g"))
sc = SparkContext(conf = conf)
sc.setLogLevel("ERROR")
output = task1(sc,first_json, second_json, output_file)
"""
with open(output_file, 'w+') as file:
for row in output:
temp_dict = dict()
temp_dict['b1'] = row[0]
temp_dict['b2'] = row[1]
temp_dict['sim'] = row[2]
line = json.dumps(temp_dict)+"\n"
file.writelines(line)
file.close()
"""
#print(time.time()-start)
else:
print("Not Enough Number of Arguments")
|
[
"[email protected]"
] | |
4f0d3727a003f65b28d97e95316cdc9eefd284eb
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_196/ch80_2020_04_13_18_23_05_143280.py
|
f6edda895b2e0e2bcd29788dd3078b902f425c3f
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 151 |
py
|
def interseccao_chaves(dic1,dic2):
lista = []
for a in dic1.keys() and in dic2.keys():
lista.append(a,b)
return lista
|
[
"[email protected]"
] | |
304d0aadd4793582691ecaac867aac3b2e6711df
|
52e73b8542367309610aedc20d99c1669cef3076
|
/algorithm/regression/linear_regression.py
|
0f72e64dec72f46e0ba27c12e879e652536478bf
|
[
"Apache-2.0"
] |
permissive
|
coding-chenkaikai/pytorch
|
ad52b7bb3a0cf7864e8cb7a21839a0127b306491
|
c6201829dcd4556f1a796f0404357223a2233573
|
refs/heads/master
| 2020-05-30T04:52:07.359435 | 2019-06-01T07:37:03 | 2019-06-01T07:37:03 | 189,550,767 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 227 |
py
|
# -*- coding: UTF-8 -*-
import torch
import numpy as np
import matplotlib.pyplot as plt
import torch.nn.functional as F
from torch.autograd import Variable
print(torch.__version__)
print('gpu: ', torch.cuda.is_available())
|
[
"[email protected]"
] | |
a1f12290c23d8395dcfa64805afb025e05f935ec
|
7575d136a22c0476330646a06f99676a79c7e117
|
/lib/notifications.py
|
9f00d14ddbce9ba041833718e5ca730bd40296c5
|
[] |
no_license
|
ryant71/fitly
|
5d7d87a363e4808647364c8d2f2809d3864d0d9b
|
49070dd43549e1c84d65e5ef8125ab5b61f62efa
|
refs/heads/master
| 2021-02-23T08:55:52.848095 | 2020-03-03T23:28:21 | 2020-03-03T23:28:21 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,650 |
py
|
from lib.sqlalchemy_declarative import db_connect, withings, stravaSummary, athlete
from sqlalchemy import func
from datetime import datetime, timedelta
import dash_bootstrap_components as dbc
def last_body_measurement_notification():
session, engine = db_connect()
last_measurement_date = session.query(func.max(withings.date_utc))[0][0]
engine.dispose()
session.close()
if last_measurement_date:
days_since_last_measurement = datetime.utcnow().date() - last_measurement_date.date()
if days_since_last_measurement >= timedelta(days=7):
return dbc.Alert(
"It's been {:.0f} days since your last body measurement".format(days_since_last_measurement.days),
color='primary',
style={'borderRadius': '4px'})
def last_ftp_test_notification(ftp_type):
session, engine = db_connect()
last_ftp_test_date = \
session.query(func.max(stravaSummary.start_date_utc)).filter(
(stravaSummary.name.ilike('%ftp test%')) & (stravaSummary.type.ilike(ftp_type))
)[0][0]
ftp_week_threshold = session.query(athlete).filter(
athlete.athlete_id == 1).first().ftp_test_notification_week_threshold
engine.dispose()
session.close()
if last_ftp_test_date:
weeks_since_ftp_test = ((datetime.utcnow() - last_ftp_test_date).days) / 7.0
if weeks_since_ftp_test >= ftp_week_threshold:
return dbc.Alert(
"It's been {:.1f} weeks since your last {} FTP test".format(weeks_since_ftp_test, ftp_type),
color='primary',
style={'borderRadius': '4px'})
|
[
"[email protected]"
] | |
94b8dc6ded544809973f96e4c6c5d7f9a07def5e
|
a4429ad2de2f58fdb5765dbec65378723780a904
|
/Actividades/practica2-1/graph.py
|
af398353fdcf8475c8ec342f24b173f79dd79403
|
[
"MIT"
] |
permissive
|
alfonsoirai/Advanced-Databases-Course
|
ad389921452f44cbb0b4ba3842d5e2fdc503ea4a
|
1bb333e1b40980d0dbcae75db3a7df42be0b3b01
|
refs/heads/master
| 2021-04-27T19:41:44.007238 | 2018-02-28T18:35:56 | 2018-02-28T18:35:56 | 122,362,718 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,561 |
py
|
import pandas as pd
class Node(object):
def __init__(self, id, name):
self.id = id
self.name = name
self.level = 0
self.adjacency = {}
def add_connection(self, node, cost):
self.adjacency[node.id] = [cost, node]
def toString(self):
string = str(self.id) + ", " + self.name + ":"
for i in self.adjacency.values():
string += "\n\t(" + str(i[1].id) + ", " + i[1].name + ")"
return string
class Graph (object):
def __init__(self, name="Vox Graph"):
self.name = name
self.size = 0
self.nodes = {}
def insert_node(self, node):
self.nodes[node.id] = node
self.size += 1
def bfs(self, firstID):
firstNode = self.nodes[firstID]
result = []
resultString = []
Q = []
result.append(firstNode)
Q.append(firstNode)
firstNode.level = 0
resultString.append(str(firstNode.level) + " " + firstNode.name)
while Q:
current = Q.pop(0)
for i in current.adjacency.values():
if i[1] not in result:
i[1].level = current.level + 1
result.append(i[1])
Q.append(i[1])
resultString.append(str(i[1].level) + " " + i[1].name)
return result, resultString
def dfs(self, firstID):
firstNode = self.nodes[firstID]
result = []
resultString = []
stack = []
stack.append(firstNode)
firstNode.level = 0
while stack:
current = stack.pop()
if(current not in result):
result.append(current)
resultString.append(str(current.level) + " " + current.name)
for i in current.adjacency.values():
if i[1] not in result:
i[1].level = current.level + 1
stack.append(i[1])
return result, resultString
def toString(self):
string = ""
for i in self.nodes.values():
string += "\n" + i.toString()
return self.name + " of size: " + str(self.size) + " with Nodes: " + string
def read_csv(self, nodesFile, edgesFile):
fileNodes = pd.read_csv(nodesFile)
for i, row in fileNodes.iterrows():
self.insert_node(Node(row['Id'], row['Label']))
fileEdges = pd.read_csv(edgesFile)
for i, row in fileEdges.iterrows():
self.nodes[row['Source']].add_connection(self.nodes[row['Target']], 1)
|
[
"[email protected]"
] | |
692c8de911e9fb1876f5df1f7ef733905ec6483e
|
6362f80ea7a3b5ccc0f53751271327ecc6852e0c
|
/Topics/Elif statement/Coordinates/main.py
|
a89069c78ab7e7d944e6ffa085dcd50236fe155e
|
[] |
no_license
|
vinaym97/Hangman
|
e592672f6902bad0e8abffb802dc2872c428ef84
|
0ef0961b02d4186a6b1da62419ce2573ffb56f6d
|
refs/heads/main
| 2023-07-11T09:00:56.913046 | 2021-08-17T13:38:57 | 2021-08-17T13:38:57 | 387,756,264 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 471 |
py
|
x = float(input())
y = float(input())
if (x == 0 and y == 0):
print("It's the origin!")
# elif (x == 0 and y == ((-10 <= y < 0) or (0 < y <= 10))) or (x==((-10 <= x < 0) or (0 < x <= 10)) and y == 0 ):
elif (x > 0) and (y > 0):
print("I")
# elif (x < 0) and (y > 0):
elif x < 0 < y:
print("II")
elif (x < 0) and (y < 0):
print("III")
# elif (x > 0) and (y < 0):
elif y < 0 < x:
print("IV")
else:
print("One of the coordinates is equal to zero!")
|
[
"[email protected]"
] | |
288adf9ab5077d9b7d3daaad38895722092bdf56
|
d5c23a2b29e52f2762faaca36f01032d7ceee898
|
/manage.py
|
af6afcf1949521a2c2f363f34a5218526aa6016e
|
[] |
no_license
|
fbwinghsa/musicchat
|
d6a33a319844d3af98b88fa0a901988819983070
|
2ce44c9c2d810e25d4666de581b9e5325a2b80f4
|
refs/heads/master
| 2020-05-18T22:30:08.732180 | 2014-11-03T09:14:41 | 2014-11-03T09:14:41 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 252 |
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "musicchat.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"[email protected]"
] | |
0d9d93fefe298f84e13f4389bb039b2ea8cf1f26
|
a760adeac660bcd2b6d32e28ef204b31b3adca34
|
/0x02-python-import_modules/1-calculation.py
|
c1e1c1fa99efa9582b29995735ff28fc790b2e64
|
[] |
no_license
|
eserebry/holbertonschool-higher_level_programming
|
4f694ab1444654cfdb6a708bbbf20ae763334ee1
|
706cdd32371ec5f2971e8f8277d5c95ceadb5fc8
|
refs/heads/master
| 2020-03-09T13:51:52.376133 | 2018-09-08T03:31:19 | 2018-09-08T03:31:19 | 128,821,206 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 343 |
py
|
#!/usr/bin/python3
if __name__ == "__main__":
from calculator_1 import add, sub, mul, div
a = 10
b = 5
print("{:d} + {:d} = {:d}" .format(a, b, add(a, b)))
print("{:d} - {:d} = {:d}" .format(a, b, sub(a, b)))
print("{:d} * {:d} = {:d}" .format(a, b, mul(a, b)))
print("{:d} / {:d} = {:d}" .format(a, b, div(a, b)))
|
[
"[email protected]"
] | |
5fcd80e755545cb1696b792d22db6bfeb15d37fb
|
b502a06d1c9a887c8c991ae9933c8ad9135b9007
|
/runme.py
|
59418c1ff156b1635016bee0c0ac703cf36dd7af
|
[] |
no_license
|
bbargar/ECON498_Final
|
da4638186f14c22b305875a6216715697e869101
|
eeedd3b05ba99331df545a2413a1f8f1b9cc131a
|
refs/heads/main
| 2023-01-30T00:17:47.926700 | 2020-12-12T18:53:15 | 2020-12-12T18:53:15 | 320,752,787 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,287 |
py
|
import kfold_template
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
import numpy as np
import json
dataset = pd.read_csv('business_sample_csv.csv')
# print(dataset)
target = dataset.iloc[:,2].values
data = dataset.iloc[:,[5,6,7,11,15]].values
target = target.astype(int)
# print(target)
# print(data)
n_estimators_list = [5,10,20,30,40]
max_depth_list = [5,10,20,40,50]
results_list = []
for i in max_depth_list:
machine = RandomForestClassifier(n_estimators=20, criterion='gini', max_depth=i, n_jobs=4)
accuracy_score, confusion_matrix = kfold_template.run_kfold(4, data, target, machine)
results_list.append(['gini ',accuracy_score,str(i),'n_estimators = 20'])
print(accuracy_score)
for i in confusion_matrix:
print(i)
for j in n_estimators_list:
machine = RandomForestClassifier(n_estimators=j, criterion='gini', max_depth=20, n_jobs=4)
accuracy_score, confusion_matrix = kfold_template.run_kfold(4, data, target, machine)
results_list.append(['gini ',accuracy_score,str(j),'max_depth_list = 20'])
print(accuracy_score)
for i in confusion_matrix:
print(i)
results = pd.DataFrame(results_list)
results.to_csv('runme_prediction_csv.csv')
|
[
"[email protected]"
] | |
4f1ec457cdb2aff59d8558ed5d090e890e081fa7
|
80a689cecd96315e55e6452d201e6531868bdc99
|
/management/commands/pdk_nudge_ios_devices_boto.py
|
c82c6760ca2673b2252cf9062343fe8914127764
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
kamau-edwin/PassiveDataKit-Django
|
d36fad6b366fef32c96941b10455b054abd44f7c
|
95db5701f88c74328b0611124149fdffa079e7b8
|
refs/heads/master
| 2021-01-06T17:25:50.471370 | 2020-02-26T21:17:32 | 2020-02-26T21:17:32 | 241,416,694 | 0 | 0 |
Apache-2.0
| 2020-02-18T16:56:09 | 2020-02-18T16:56:07 | null |
UTF-8
|
Python
| false | false | 5,828 |
py
|
# pylint: disable=no-member,line-too-long
import json
import re
import boto
import boto.exception
import boto.sns
from django.conf import settings
from django.core.management.base import BaseCommand
from ...decorators import handle_lock, log_scheduled_event
from ...models import DataPoint
class Command(BaseCommand):
help = 'Send silent notifications to iOS devices to nudge power management systems for transmission using Boto and Amazon Simple Notification Service.'
def add_arguments(self, parser):
pass
@handle_lock
@log_scheduled_event
def handle(self, *args, **options): # pylint: disable=too-many-locals, too-many-branches, too-many-statements
tokens = {}
for point in DataPoint.objects.filter(generator_identifier='pdk-app-event', secondary_identifier='pdk-ios-device-token').order_by('created'):
properties = point.fetch_properties()
tokens[point.source] = properties['event_details']['token']
region = [r for r in boto.sns.regions() if r.name == settings.PDK_BOTO_REGION][0]
notification = {'aps': {'content-available' : 1}}
message = {'APNS': json.dumps(notification), 'default': 'nil'}
sns = boto.sns.SNSConnection(
aws_access_key_id=settings.PDK_BOTO_ACCESS_KEY,
aws_secret_access_key=settings.PDK_BOTO_ACCESS_SECRET,
region=region,
)
for source, token in tokens.iteritems(): # pylint: disable=unused-variable
try:
endpoint_response = sns.create_platform_endpoint(
platform_application_arn=settings.PDK_BOTO_SNS_ARN,
token=token,
)
endpoint_arn = endpoint_response['CreatePlatformEndpointResponse']['CreatePlatformEndpointResult']['EndpointArn']
except boto.exception.BotoServerError, err:
print 'ERR 1: ' + err.message
# Yes, this is actually the official way:
# http://stackoverflow.com/questions/22227262/aws-boto-sns-get-endpoint-arn-by-device-token
result_re = re.compile(r'Endpoint(.*)already', re.IGNORECASE)
result = result_re.search(err.message)
if result:
endpoint_arn = result.group(0).replace('Endpoint ', '').replace(' already', '')
else:
raise
try:
sns.publish(target_arn=endpoint_arn, message_structure='json', message=json.dumps(message))
except boto.exception.BotoServerError, err:
print 'FAILED SENDING TO ' + token
print 'ERR: ' + err.message
result_re = re.compile(r'Endpoint(.*)disabled', re.IGNORECASE)
result = result_re.search(err.message)
if result:
for point in DataPoint.objects.filter(source=source, generator_identifier='pdk-app-event', secondary_identifier='pdk-ios-device-token').order_by('created'):
properties = point.fetch_properties()
if token == properties['event_details']['token']:
print 'RENAMING: ' + token
point.secondary_identifier = 'pdk-ios-device-token-sandbox'
point.save()
else:
raise
tokens = {}
for point in DataPoint.objects.filter(generator_identifier='pdk-app-event', secondary_identifier='pdk-ios-device-token-sandbox').order_by('created'):
properties = point.fetch_properties()
tokens[point.source] = properties['event_details']['token']
message = {'APNS_SANDBOX': json.dumps(notification), 'default': 'nil'}
for source, token in tokens.iteritems(): # pylint: disable=unused-variable
try:
endpoint_response = sns.create_platform_endpoint(
platform_application_arn=settings.PDK_BOTO_SNS_ARN_SANDBOX,
token=token,
)
endpoint_arn = endpoint_response['CreatePlatformEndpointResponse']['CreatePlatformEndpointResult']['EndpointArn']
except boto.exception.BotoServerError, err:
print 'ERR 2: ' + err.message
# Yes, this is actually the official way:
# http://stackoverflow.com/questions/22227262/aws-boto-sns-get-endpoint-arn-by-device-token
result_re = re.compile(r'Endpoint(.*)already', re.IGNORECASE)
result = result_re.search(err.message)
if result:
endpoint_arn = result.group(0).replace('Endpoint ', '').replace(' already', '')
else:
raise
try:
sns.publish(target_arn=endpoint_arn, message_structure='json', message=json.dumps(message))
# print('PUBLISHED DEV: ' + token)
except boto.exception.BotoServerError, err:
print 'FAILED SENDING 2 TO ' + token
print 'ERR: ' + err.message
result_re = re.compile(r'Endpoint(.*)disabled', re.IGNORECASE)
result = result_re.search(err.message)
if result:
for point in DataPoint.objects.filter(source=source, generator_identifier='pdk-app-event', secondary_identifier='pdk-ios-device-token-sandbox').order_by('created'):
properties = point.fetch_properties()
if token == properties['event_details']['token']:
print 'RENAMING 2: ' + token
point.secondary_identifier = 'pdk-ios-device-token-disabled'
point.save()
else:
raise
|
[
"[email protected]"
] | |
a86baf9fdc83809dd1793223124161e35450f92e
|
2372203835f4550af81c2549d18d47ef7fb0b75a
|
/api/utils.py
|
9b59c911aefd9bab6a229261b54b24a06ba0e916
|
[
"MIT"
] |
permissive
|
batpad/go-api
|
7071b7a3ca13bdda283ae717d6cfecdf9dbd4f6b
|
6c187396fddae9ebcb923540824c86c40f8254bb
|
refs/heads/master
| 2022-12-09T22:23:52.772748 | 2019-12-11T16:15:11 | 2019-12-11T16:15:11 | 227,409,969 | 0 | 0 |
MIT
| 2021-12-13T20:28:13 | 2019-12-11T16:22:56 |
Python
|
UTF-8
|
Python
| false | false | 671 |
py
|
def pretty_request(request):
headers = ''
for header, value in request.META.items():
if not header.startswith('HTTP'):
continue
header = '-'.join([h.capitalize() for h in header[5:].lower().split('_')])
headers += '{}: {}\n'.format(header, value)
return (
'{method} HTTP/1.1\n'
'Content-Length: {content_length}\n'
'Content-Type: {content_type}\n'
'{headers}\n\n'
'{body}'
).format(
method=request.method,
content_length=request.META['CONTENT_LENGTH'],
content_type=request.META['CONTENT_TYPE'],
headers=headers,
body=request.body,
)
|
[
"[email protected]"
] | |
825d01fdf3e99cfa53d1e7db9a1f88f76509a82c
|
4b76914d18b2cc59f0c263f668a16080eb987a46
|
/mmlclient.py
|
4ac00bc3c545a2cfa138ed18335f2876fa83aacd
|
[] |
no_license
|
BenLand100/MMLDaemon
|
addfa3390094502ffe890da9610e672078a07ab4
|
ebd371f1d2cfd0f4a1ade7728ed951b0f13bbbff
|
refs/heads/master
| 2016-09-06T09:13:36.584123 | 2011-01-12T20:40:55 | 2011-01-12T20:40:55 | 1,246,381 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,484 |
py
|
#!/usr/bin/python
"""
* Copyright 2010 by Benjamin J. Land (a.k.a. BenLand100)
*
* This file is part of the MMLDaemon project.
*
* MMLDaemon is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* MMLDaemon is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with MMLDaemon. If not, see <http://www.gnu.org/licenses/>.
"""
from socket import *
from struct import *
from time import sleep
#Client/Daemon protocol: [command] [args]
#commands are a one byte int
#args can be: type, pid, string
#type is a one byte int
#pid is a four byte int
#string is a four byte int length followed by string data
MMLD_SPAWN = 0 #C->D | [type] #client requests daemon for a new worker of a type
MMLD_WORKER = 1 #D->C | [pid] #daemon responds with the new worker's pid
MMLD_START = 2 #C->D | [pid] [string] #client signals worker to start a program with a string arg
MMLD_STOP = 3 #C->D | [pid] #client signals the worker to stop
MMLD_PAUSE = 4 #C->D | [pid] #client signals the worker to pause
MMLD_DISCONNECT = 7 #C->D | #client notifies daemon it is disconnecting
MMLD_ERROR = 8 #D->C | [string] #daemon encountered an error and must terminate the connection
MMLD_KILL = 9 #C->D | [pid] #client requests a worker process be terminated
MMLD_FINISHED =10 #D->C | [pid] #daemon notifies client that a script has terminated
MMLD_DEBUG =11 #D->C | [pid] [string] #daemon sends client the debug from a worker
#type codes for ScriptRunner types
MMLD_PS = 0 #pascalscript
MMLD_PY = 1 #python
MMLD_CPAS = 2 #python
socket = socket(AF_INET,SOCK_STREAM)
socket.connect(('localhost', 8000))
socket.send(pack('=BB',MMLD_SPAWN,MMLD_CPAS))
while True:
try:
(code,) = unpack('=B',socket.recv(1))
if code == MMLD_WORKER:
(pid,) = unpack('=i',socket.recv(4))
#begin test code
print 'Worker Created:',pid
program = 'program new; var x: integer; begin x:= 1 + 2 end.'
print 'Starting a Script:',pid
socket.send(pack('=Bii'+str(len(program))+'s',MMLD_START,pid,len(program),program))
#socket.send(pack('=Bi',MMLD_KILL,pid))
#end test code
elif code == MMLD_DEBUG:
(pid,size) = unpack('=ii',socket.recv(8))
(msg,) = unpack('='+str(size)+'s',socket.recv(size))
print '<'+str(pid)+'>', msg
elif code == MMLD_ERROR:
(size,) = unpack('=i',socket.recv(4))
(why,) = unpack('='+str(size)+'s',socket.recv(size))
raise Exception(why)
elif code == MMLD_FINISHED:
(pid,) = unpack('=i',socket.recv(4))
print 'Script Terminated:',pid
raise Exception('Finished')
else:
raise Exception('Unknown Daemon Command: ' + str(code))
except error:
raise
|
[
"[email protected]"
] | |
441e60c7846fde6cca41e6cbb3845b685e4f8672
|
09e5cfe06e437989a2ccf2aeecb9c73eb998a36c
|
/modules/cctbx_project/cctbx/symmetry_search/boost_python/SConscript
|
be2824dfaa2fdc51694642b708bafd590f93bda6
|
[
"BSD-3-Clause-LBNL",
"BSD-3-Clause"
] |
permissive
|
jorgediazjr/dials-dev20191018
|
b81b19653624cee39207b7cefb8dfcb2e99b79eb
|
77d66c719b5746f37af51ad593e2941ed6fbba17
|
refs/heads/master
| 2020-08-21T02:48:54.719532 | 2020-01-25T01:41:37 | 2020-01-25T01:41:37 | 216,089,955 | 0 | 1 |
BSD-3-Clause
| 2020-01-25T01:41:39 | 2019-10-18T19:03:17 |
Python
|
UTF-8
|
Python
| false | false | 216 |
Import("env_cctbx_boost_python_ext")
env = env_cctbx_boost_python_ext.Clone()
env.Prepend(LIBS=["cctbx", "omptbx"])
env.SharedLibrary(target="#lib/cctbx_symmetry_search_ext", source=[
"symmetry_search_ext.cpp",
])
|
[
"[email protected]"
] | ||
e697e2c61f46b0fc54fa6f465b65c02000ca78d5
|
72cc3e8bb9f3a4e04c445fef5ab447bc01023e24
|
/Baekjoon/if조건문/04.사분면고르기.py
|
34d220ff7ee22010ce61cc2d724adbde9d6f0331
|
[] |
no_license
|
Choe-Yun/Algorithm
|
ace65d5161b876e8e29e6820efac70466d5e290b
|
546c89453d061cc48634abf195b420a494a9528f
|
refs/heads/master
| 2023-05-01T21:51:51.823576 | 2021-05-24T13:57:00 | 2021-05-24T13:57:00 | 365,746,601 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,133 |
py
|
### 문제
# 흔한 수학 문제 중 하나는 주어진 점이 어느 사분면에 속하는지 알아내는 것이다. 사분면은 아래 그림처럼 1부터 4까지 번호를 갖는다. "Quadrant n"은 "제n사분면"이라는 뜻이다.
#
# 예를 들어, 좌표가 (12, 5)인 점 A는 x좌표와 y좌표가 모두 양수이므로 제1사분면에 속한다. 점 B는 x좌표가 음수이고 y좌표가 양수이므로 제2사분면에 속한다.
#
# 점의 좌표를 입력받아 그 점이 어느 사분면에 속하는지 알아내는 프로그램을 작성하시오. 단, x좌표와 y좌표는 모두 양수나 음수라고 가정한다.
### 입력
# 첫 줄에는 정수 x가 주어진다. (−1000 ≤ x ≤ 1000; x ≠ 0) 다음 줄에는 정수 y가 주어진다. (−1000 ≤ y ≤ 1000; y ≠ 0)
### 출력
# 점 (x, y)의 사분면 번호(1, 2, 3, 4 중 하나)를 출력한다.
##### code
x = int(input())
y = int(input())
if x > 0 and y > 0: # x,y: 양수
print('1')
elif x < 0 and y > 0: # x:음수, y:양수
print('2')
elif x < 0 and y < 0: # x,y: 음수
print('3')
else:
print('4')
|
[
"[email protected]"
] | |
3122241b31020e0b087e3f2e049ab486e87621ba
|
119fd27a34eb3ef48f1075c899790a5b1387ddcf
|
/model.py
|
4115b13b9bca7ac8ff77a127c7a5934e572cac2f
|
[] |
no_license
|
xieyuanhuata/pizza
|
2c158ab031cd90461e478456288069e5d1a2f8b6
|
ad52e8795d213c522c2b70d34135b64fc97fa3bf
|
refs/heads/master
| 2023-03-07T02:16:53.394831 | 2017-07-31T05:48:44 | 2017-07-31T05:48:44 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 15,800 |
py
|
"""
A.I. Pizza
CEO Bhban
Imagination Garden
Latest Modification : 6/22, 2017
"""
import tensorflow as tf
import inspect
__author__ = "BHBAN"
decay = 0.9
class BEGAN(object):
def __init__(self, batch_size, is_training, num_keys, input_length, output_length, learning_rate):
self.input_music_seg = tf.placeholder(tf.float32, shape=[batch_size, 8, num_keys/8, input_length], name="input_music_segment")
self.ground_truth_seg = tf.placeholder(tf.float32, shape=[batch_size, 8, num_keys/8, output_length], name="ground_truth")
self.threshold = tf.placeholder(tf.float32, shape=[batch_size, 8, num_keys/8, output_length], name="threshold")
self.GAN = Generator_BEGAN(is_training, input_length, output_length, self.threshold)
self.k_t = tf.Variable(0., trainable=False, name='k_t')
self.lambda_k = learning_rate
with tf.variable_scope("graph"):
self.predict, g_logits = self.GAN.predict(self.input_music_seg, is_training)
with tf.variable_scope("graph", reuse=True):
self.d_out1, d_logits = self.GAN.predict(self.predict, is_training)
self.l_x = tf.reduce_mean(tf.abs(d_logits - self.input_music_seg))
self.l_g = tf.reduce_mean(tf.abs(g_logits - self.input_music_seg))
self.loss_d = self.l_x - self.k_t * self.l_g
self.loss_g = self.l_g
self.train_op_d, self.train_op_g=self.train(learning_rate)
def train(self, learning_rate):
optimizer_d = tf.train.AdamOptimizer(learning_rate)
optimizer_g = tf.train.AdamOptimizer(learning_rate)
train_op_d = optimizer_d.minimize(self.loss_d)
train_op_g = optimizer_g.minimize(self.loss_g)
gamma = self.l_x / self.l_g
self.k_t += self.lambda_k*(gamma * self.l_x - self.l_g)
return train_op_d, train_op_g
def conversation(self, input_tensor, session):
feed_dict={self.input_music_seg : input_tensor}
output = session.run(self.predict, feed_dict=feed_dict)
return output
class Generator_BEGAN(object):
def __init__(self, is_training, input_window, output_window, threshold):
self.window_size = input_window
self.output_window = output_window
self.is_training = is_training
self.CNN_shapes = []
self.CNN_kernels = []
self.threshold=threshold
self.CNN_shapes.append([2, 2, input_window, 64])
self.CNN_shapes.append([2, 2, 64, 128])
self.CNN_shapes.append([2, 2, 128, 256])
self.CNN_shapes.append([2, 2, 256, 256])
self.CNN_shapes.append([2, 2, 256, 256])
self.CNN_shapes.append([1, 1, 256, 256])
for i, el in enumerate(self.CNN_shapes):
self.CNN_kernels.append(tf.get_variable("G_CNN_" + str(i), initializer=tf.truncated_normal(el, stddev=0.02)))
def predict(self, input_music, is_training):
net = []
net.append(input_music)
dcnn_kernels = []
# Encoder Layers
for i, el in enumerate(self.CNN_kernels):
C = tf.nn.conv2d(net[-1], el, strides=[1, 2, 2, 1], padding="SAME")
N = tf.contrib.layers.batch_norm(C, decay=decay, is_training=is_training, updates_collections=None)
R = tf.nn.relu(N)
net.append(R)
# Decoder Layers
deconv_shape1 = net[6].shape.as_list()
dcnn1_shape = [1, 1, deconv_shape1[3], net[-1].get_shape().as_list()[3]]
dcnn_kernels.append(tf.get_variable("DCNN_1_W", initializer=tf.truncated_normal(dcnn1_shape, stddev=0.02)))
deconv_shape2 = net[5].shape.as_list()
dcnn2_shape = [2, 2, deconv_shape2[3], deconv_shape1[3]]
dcnn_kernels.append(tf.get_variable("DCNN_2_W", initializer=tf.truncated_normal(dcnn2_shape, stddev=0.02)))
deconv_shape3 = net[4].shape.as_list()
dcnn3_shape = [2, 2, deconv_shape3[3], deconv_shape2[3]]
dcnn_kernels.append(tf.get_variable("DCNN_3_W", initializer=tf.truncated_normal(dcnn3_shape, stddev=0.02)))
deconv_shape4 = net[3].shape.as_list()
dcnn4_shape = [2, 2, deconv_shape4[3], deconv_shape3[3]]
dcnn_kernels.append(tf.get_variable("DCNN_4_W", initializer=tf.truncated_normal(dcnn4_shape, stddev=0.02)))
deconv_shape5 = net[2].shape.as_list()
dcnn5_shape = [2, 2, deconv_shape5[3], deconv_shape4[3]]
dcnn_kernels.append(tf.get_variable("DCNN_5_W", initializer=tf.truncated_normal(dcnn5_shape, stddev=0.02)))
deconv_shape6 = net[1].shape.as_list()
dcnn6_shape = [2, 2, deconv_shape6[3], deconv_shape5[3]]
dcnn_kernels.append(tf.get_variable("DCNN_6_W", initializer=tf.truncated_normal(dcnn6_shape, stddev=0.02)))
deconv_shape7 = net[0].shape.as_list()
deconv_shape7[3] = self.output_window
dcnn7_shape = [2, 2, self.output_window, deconv_shape6[3]]
dcnn_kernels.append(tf.get_variable("DCNN_7_W", initializer=tf.truncated_normal(dcnn7_shape, stddev=0.02)))
DC1 = tf.nn.conv2d_transpose(net[-1], dcnn_kernels[0], deconv_shape1, strides=[1,2,2,1], padding="SAME")
DC1 = tf.contrib.layers.batch_norm(DC1, decay=decay, is_training=is_training, updates_collections=None)
DC1 = tf.nn.relu(DC1)
DC2 = tf.nn.conv2d_transpose(DC1, dcnn_kernels[1], deconv_shape2, strides=[1,2,2,1], padding="SAME")
DC2 = tf.contrib.layers.batch_norm(DC2, decay=decay, is_training=is_training, updates_collections=None)
DC2 = tf.nn.relu(DC2)
DC3 = tf.nn.conv2d_transpose(DC2, dcnn_kernels[2], deconv_shape3, strides=[1,2,2,1], padding="SAME")
DC3 = tf.contrib.layers.batch_norm(DC3, decay=decay, is_training=is_training, updates_collections=None)
DC3 = tf.nn.relu(DC3)
DC4 = tf.nn.conv2d_transpose(DC3, dcnn_kernels[3], deconv_shape4, strides=[1,2,2,1], padding="SAME")
DC4 = tf.contrib.layers.batch_norm(DC4, decay=decay, is_training=is_training, updates_collections=None)
DC4 = tf.nn.relu(DC4)
DC5 = tf.nn.conv2d_transpose(DC4, dcnn_kernels[4], deconv_shape5, strides=[1,2,2,1], padding="SAME")
DC5 = tf.contrib.layers.batch_norm(DC5, decay=decay, is_training=is_training, updates_collections=None)
DC5 = tf.nn.relu(DC5)
DC6 = tf.nn.conv2d_transpose(DC5, dcnn_kernels[5], deconv_shape6, strides=[1,2,2,1], padding="SAME")
DC6 = tf.contrib.layers.batch_norm(DC6, decay=decay, is_training=is_training, updates_collections=None)
DC6 = tf.nn.relu(DC6)
DC7 = tf.nn.conv2d_transpose(DC6, dcnn_kernels[6], deconv_shape7, strides=[1,2,2,1], padding="SAME")
DC7 = tf.contrib.layers.batch_norm(DC7, decay=decay, is_training=is_training, updates_collections=None)
DC7 = tf.nn.relu(DC7)
logits = DC7
#predict = tf.greater(logits, self.threshold)
predict = tf.nn.elu(DC7)
return predict, logits
class VAE(object):
def __init__(self, batch_size, is_training, num_keys, input_length, output_length, learning_rate):
self.keep_probability = tf.placeholder(tf.float32, name="keep_probability")
self.input_music_seg = tf.placeholder(tf.float32, shape=[batch_size, num_keys, input_length, 1], name="input_music_segment")
self.ground_truth_seg = tf.placeholder(tf.float32, shape=[batch_size, num_keys, output_length, 1], name="ground_truth")
self.Generator = Generator(is_training)
self.predict, logits = self.Generator.predict(self.input_music_seg, is_training, self.keep_probability, num_keys, output_length)
self.loss = tf.reduce_mean(tf.squared_difference(self.ground_truth_seg, logits))
trainable_var = tf.trainable_variables()
self.train_op = self.train(trainable_var, learning_rate)
def train(self, trainable_var, learning_rate):
optimizer = tf.train.AdamOptimizer(learning_rate)
grads = optimizer.compute_gradients(self.loss, var_list=trainable_var)
return optimizer.apply_gradients(grads)
class GAN(object):
def __init__(self, batch_size, is_training, num_keys, input_length, output_length, learning_rate, use_began_loss=False):
self.keep_probability = tf.placeholder(tf.float32, name="keep_probability")
self.input_music_seg = tf.placeholder(tf.float32, shape=[batch_size, num_keys, input_length, 1], name="input_music_segment")
self.ground_truth_seg = tf.placeholder(tf.float32, shape=[batch_size, num_keys, output_length, 1], name="ground_truth")
self.Generator = Generator(is_training)
self.Discriminator = Discriminator(is_training)
with tf.variable_scope("G"):
self.predict, logits = self.Generator.predict(self.input_music_seg, is_training, self.keep_probability, num_keys, output_length)
with tf.variable_scope("D") as discriminator_scope:
self.d_out1, d_logits1 = self.Discriminator.discriminate(self.ground_truth_seg, is_training, self.keep_probability)
#discriminator_scope.reuse_variables()
self.d_out2, d_logits2 = self.Discriminator.discriminate(self.predict, is_training, self.keep_probability)
# basic loss
self.loss = tf.reduce_mean(-tf.log(d_logits1) - tf.log(1-d_logits2))
# began loss
if use_began_loss:
self.loss_g = tf.reduce_mean(tf.squared_difference(self.ground_truth_seg, logits))
trainable_var = tf.trainable_variables()
if use_began_loss:
self.train_op, self.train_op_g = self.train_with_began_loss(trainable_var, learning_rate)
else:
self.train_op = self.train_without_began_loss(trainable_var, learning_rate)
def train_with_began_loss(self, trainable_var, learning_rate):
optimizer1 = tf.train.AdamOptimizer(learning_rate)
optimizer2 = tf.train.AdamOptimizer(learning_rate)
grads = optimizer1.compute_gradients(self.loss, var_list = trainable_var)
grads_g = optimizer2.compute_gradients(self.loss_g, var_list = trainable_var)
return optimizer1.apply_gradients(grads), optimizer2.apply_gradients(grads_g)
def train_without_began_loss(self, trainable_var, learning_rate):
optimizer = tf.train.AdamOptimizer(learning_rate)
grads = optimizer.compute_gradients(self.loss, var_list = trainable_var)
return optimizer.apply_gradients(grads)
def discrimination(self, itr):
if self.d_out1 == self.d_out2:
print("EPOCH : " + str(itr) + " >>> Discriminator Failed!!!!!! Sibbal!!")
else:
print("EPOCH : " + str(itr) + " >>> Discriminator successed!!!!!!")
class Generator(object):
def __init__(self, is_training):
self.is_training = is_training
self.CNN_shapes = []
self.CNN_kernels = []
self.CNN_shapes.append([16, 16, 1, 32])
self.CNN_shapes.append([8, 8, 32, 64])
self.CNN_shapes.append([8, 8, 64, 64])
self.CNN_shapes.append([1, 1, 64, 1024])
for i, el in enumerate(self.CNN_shapes):
self.CNN_kernels.append(tf.get_variable("E_CNN_" + str(i), initializer=tf.truncated_normal(el, stddev=0.02)))
def predict(self, input_music, is_training, keep_prob, num_keys, output_length):
net = []
net.append(input_music)
dcnn_kernels = []
# Encoder Layers
for i, el in enumerate(self.CNN_kernels):
C = tf.nn.conv2d(net[-1], el, strides=[1, 2, 2, 1], padding="VALID")
N = tf.contrib.layers.batch_norm(C, decay=decay, is_training=is_training, updates_collections=None)
R = tf.nn.relu(N)
net.append(R)
# Decoder Layers
deconv_shape1 = net[3].shape.as_list()
dcnn1_shape = [1, 1, deconv_shape1[3], net[-1].get_shape().as_list()[3]]
dcnn_kernels.append(tf.get_variable("D_DCNN_1_W", initializer=tf.truncated_normal(dcnn1_shape, stddev=0.02)))
deconv_shape2 = net[2].shape.as_list()
dcnn2_shape = [8, 8, deconv_shape2[3], deconv_shape1[3]]
dcnn_kernels.append(tf.get_variable("D_DCNN_2_W", initializer=tf.truncated_normal(dcnn2_shape, stddev=0.02)))
deconv_shape3 = net[1].shape.as_list()
dcnn3_shape = [8, 8, deconv_shape3[3], deconv_shape2[3]]
dcnn_kernels.append(tf.get_variable("D_DCNN_3_W", initializer=tf.truncated_normal(dcnn3_shape, stddev=0.02)))
deconv_shape4 = net[0].shape.as_list()
dcnn4_shape = [16, 16, deconv_shape4[3], deconv_shape3[3]]
dcnn_kernels.append(tf.get_variable("D_DCNN_4_W", initializer=tf.truncated_normal(dcnn4_shape, stddev=0.02)))
DC1 = tf.nn.conv2d_transpose(net[-1], dcnn_kernels[0], deconv_shape1, strides=[1,2,2,1], padding="VALID")
DC1 = tf.contrib.layers.batch_norm(DC1, decay=decay, is_training=is_training, updates_collections=None)
# F1 = tf.add(DC1, net[3])
DC2 = tf.nn.conv2d_transpose(DC1, dcnn_kernels[1], deconv_shape2, strides=[1,2,2,1], padding="VALID")
DC2 = tf.contrib.layers.batch_norm(DC2, decay=decay, is_training=is_training, updates_collections=None)
# F2 = tf.add(DC2, net[2])
DC3 = tf.nn.conv2d_transpose(DC2, dcnn_kernels[2], deconv_shape3, strides=[1,2,2,1], padding="VALID")
DC3 = tf.contrib.layers.batch_norm(DC3, decay=decay, is_training=is_training, updates_collections=None)
# F3 = tf.add(DC3, net[1])
DC4 = tf.nn.conv2d_transpose(DC3, dcnn_kernels[3], deconv_shape4, strides=[1,2,2,1], padding="VALID")
DC4 = tf.contrib.layers.batch_norm(DC4, decay=decay, is_training=is_training, updates_collections=None)
# F4 = tf.add(DC4, net[0])
logits = DC4
predict = tf.round(logits)
return predict, logits
class Discriminator(object):
def __init__(self, is_training):
self.is_training = is_training
self.CNN_shapes = []
self.CNN_kernels = []
self.FNN_shapes = []
self.FNN_kernels = []
self.FNN_biases = []
self.CNN_shapes.append([2, 2, 1, 64])
self.CNN_shapes.append([2, 2, 64, 128])
self.CNN_shapes.append([2, 2, 128, 256])
self.CNN_shapes.append([2, 2, 256, 512])
self.CNN_shapes.append([2, 2, 512, 512])
self.FNN_shapes.append([512, 4096])
self.FNN_shapes.append([4096, 4096])
self.FNN_shapes.append([4096, 1024])
self.FNN_shapes.append([1024, 2])
for i, el in enumerate(self.CNN_shapes):
self.CNN_kernels.append(tf.get_variable("D_CNN_" + str(i), initializer=tf.truncated_normal(el, stddev=0.02)))
for i, el in enumerate(self.FNN_shapes):
self.FNN_kernels.append(tf.get_variable("D_FNN_" + str(i), initializer = tf.truncated_normal(el, stddev=0.02)))
def discriminate(self, input_music, is_training, keep_prob):
net = []
net.append(input_music)
for el in self.CNN_kernels:
C = tf.nn.conv2d(net[-1], el, strides=[1,1,1,1], padding="SAME")
N = tf.contrib.layers.batch_norm(C, decay=decay, is_training=is_training, updates_collections=None)
R = tf.nn.relu(N)
P = tf.nn.max_pool(R, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding="SAME")
net.append(P)
net[-1] = tf.reshape(net[-1], [-1, self.FNN_shapes[0][0]])
for i, el in enumerate(self.FNN_kernels[:-1]):
W = tf.matmul(net[-1], el)
N = tf.contrib.layers.batch_norm(W, is_training=is_training, updates_collections=None)
R = tf.nn.relu(N)
net.append(R)
logits = tf.nn.softmax(net[-1])
discrimination = tf.argmax(logits)
return discrimination, logits
|
[
"[email protected]"
] | |
05cdd6e0b5aadfcd1453901287e445578f2b8e29
|
6ba38fe94e7ea5146c633f56f59c0c3278d695a7
|
/build/build_for_ios
|
d6425614eecc82e40f167b7e162c91cecd846058
|
[
"MIT"
] |
permissive
|
mworks/mworks
|
b49b721c2c5c0471180516892649fe3bd753a326
|
abf78fc91a44b99a97cf0eafb29e68ca3b7a08c7
|
refs/heads/master
| 2023-09-05T20:04:58.434227 | 2023-08-30T01:08:09 | 2023-08-30T01:08:09 | 2,356,013 | 14 | 11 | null | 2012-10-03T17:48:45 | 2011-09-09T14:55:57 |
C++
|
UTF-8
|
Python
| false | false | 941 |
#!/usr/bin/env python3
import argparse
from subprocess import check_call
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--archive', action='store_true',
help='create an archive for distribution')
parser.add_argument('version', nargs='?', help='version number')
args = parser.parse_args()
cmd = [
'/usr/bin/xcrun',
'xcodebuild',
'-workspace', 'MWorks-iOS.xcworkspace',
'-scheme', 'MWorks',
'-destination', 'generic/platform=iOS',
]
if args.archive:
cmd.append('archive')
else:
cmd.extend(['clean', 'build'])
cmd.extend(['GCC_TREAT_WARNINGS_AS_ERRORS=YES',
'MTL_TREAT_WARNINGS_AS_ERRORS=YES',
'SWIFT_TREAT_WARNINGS_AS_ERRORS=YES'])
if args.version:
cmd.append('MW_VERSION=' + args.version)
check_call(cmd)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | ||
36ed25a8f241d3d99be52f87083b2f2bd2e59b8c
|
6fddbb2ddc1c2447619a6b7cbdf9052b2a357bb5
|
/preprocessing/processers/__init__.py
|
4408c397b7aca40a4e6c4c531431d962284dcd21
|
[
"Apache-2.0"
] |
permissive
|
fagan2888/floods-gans
|
d896eeb923d4850697c7e230f6e3d5b66837f919
|
787dc2a3c08483c68a687b4355c0f0f6f2711ab9
|
refs/heads/master
| 2022-04-09T20:07:29.471949 | 2020-02-24T14:06:40 | 2020-02-24T14:06:40 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 81 |
py
|
from .flipper import Flipper
from .saver import Saver
from .loader import Loader
|
[
"[email protected]"
] | |
cebcfbab3351bb97acf855a4e8a6a0e12ecff3e0
|
d88f9acfe09d79f06cf251b8cbbb012b55d99f39
|
/Scraping/test_scraping/create_sqlite_db.py
|
95e1b304f3fc724c65ddf601e4224bbe7e44b3ed
|
[] |
no_license
|
Twishar/DataAnalysis
|
535beb795e30b8ac07767a61f1ebfbc60546271f
|
e5d5ba9ba0b9a51031e8f1f4225bc35d848159dd
|
refs/heads/master
| 2022-03-04T19:02:30.917729 | 2019-11-15T14:18:53 | 2019-11-15T14:18:53 | 98,515,695 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 192 |
py
|
import sqlite3
conn = sqlite3.connect('allo_db.sqlite')
c = conn.cursor()
c.execute('''CREATE TABLE allo_parse
(search_param text, results text);''')
conn.commit()
conn.close()
|
[
"[email protected]"
] | |
9858f8027cfdcb40a2b8916164d29d4d43e252c8
|
9f43b7e689c04b23b47b09a15be121a564db5020
|
/tests/unit/test_helpers.py
|
e55b5ab9488a7547bc8aa30b9a71f2406ef21019
|
[] |
no_license
|
Beartime234/slack-quizdata
|
63fea58953154b440e26d38cdb6c7eb07f296cac
|
ae1a0561e8dc4a5da4ed36071389ffcf131cb8ac
|
refs/heads/master
| 2021-06-07T11:58:00.716572 | 2019-12-27T04:41:22 | 2019-12-27T04:41:22 | 152,212,713 | 0 | 0 | null | 2021-04-30T21:58:31 | 2018-10-09T08:08:45 |
Python
|
UTF-8
|
Python
| false | false | 1,400 |
py
|
import pytest
from uploader import helpers
def test_generating_unique_id():
ret = helpers.generate_unique_id()
assert type(ret) == str
def test_check_if_id_in_question():
question_without_id = {"question": "blah"}
ret_without = helpers.check_if_id_in_question(question_without_id)
assert ret_without is False
question_with_id = {"question_id": "somerandomid"}
ret_with = helpers.check_if_id_in_question(question_with_id)
assert ret_with is True
def test_add_id_to_question():
question = {
"question": "What is a dog?",
"incorrect_answers": [],
"correct_answer": ""
}
ret = helpers.add_id_to_question(question)
assert "question_id" in ret.keys()
assert type(question["question_id"]) is str
def test_key_exists():
test_dict = {
"key": "value"
}
ret = helpers.key_exists(test_dict, "key")
assert ret is True
ret = helpers.key_exists(test_dict, "random")
assert ret is False
def test_value_is_string():
value_str = "random"
value_int = 0
value_bool = False
value_list = []
value_dict = {}
assert helpers.value_is_string(value_str) is True
assert helpers.value_is_string(value_int) is False
assert helpers.value_is_string(value_bool) is False
assert helpers.value_is_string(value_list) is False
assert helpers.value_is_string(value_dict) is False
|
[
"[email protected]"
] | |
3f39b4c11c3aa082d210897c4b788bb31b2e0551
|
8fcc27160f8700be46296568260fa0017a0b3004
|
/client/carbonui/control/windowDropDownMenu.py
|
6c26d7806b20cec4ebb3158345c97b472461b7f6
|
[] |
no_license
|
connoryang/dec-eve-serenity
|
5d867f4eedfa896a4ef60f92556356cafd632c96
|
b670aec7c8b4514fc47cd52e186d7ccf3aabb69e
|
refs/heads/master
| 2021-01-22T06:33:16.303760 | 2016-03-16T15:15:32 | 2016-03-16T15:15:32 | 56,389,750 | 1 | 0 | null | 2016-04-16T15:05:24 | 2016-04-16T15:05:24 | null |
UTF-8
|
Python
| false | false | 1,453 |
py
|
#Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\packages\carbonui\control\windowDropDownMenu.py
import carbonui.const as uiconst
from carbonui.primitives.container import Container
from carbonui.primitives.fill import Fill
from carbonui.primitives.line import Line
from carbonui.control.label import LabelOverride as Label
class WindowDropDownMenuCore(Container):
__guid__ = 'uicls.WindowDropDownMenuCore'
default_height = 10
default_align = uiconst.TOLEFT
default_state = uiconst.UI_NORMAL
def Setup(self, name, GetMenu):
self.name = name
self.expandOnLeft = 1
self.PrepareLayout()
self.GetMenu = GetMenu
def PrepareLayout(self):
Line(parent=self, align=uiconst.TORIGHT)
self.label = Label(text=self.name, parent=self, align=uiconst.CENTER, fontsize=9, letterspace=1, top=1, state=uiconst.UI_DISABLED, uppercase=1)
self.hilite = Fill(parent=self, state=uiconst.UI_HIDDEN, padding=1)
self.width = self.label.width + 10
self.cursor = uiconst.UICURSOR_SELECT
def OnMouseEnter(self):
self.hilite.state = uiconst.UI_DISABLED
def OnMouseExit(self):
self.hilite.state = uiconst.UI_HIDDEN
def GetMenuPosition(self, *args):
return (self.absoluteLeft, self.absoluteBottom + 2)
class WindowDropDownMenuCoreOverride(WindowDropDownMenuCore):
pass
|
[
"[email protected]"
] | |
f6de9c1334547fde5a62cc5def9e69351be99975
|
a01e7f87a0088965e2e0a02476d2df12a49a1a18
|
/cmds/network/client_server/transferFile_server.py
|
0b3fe2d70b42942ea32fd3e3528228e3ff446eb0
|
[] |
no_license
|
gsrr/IFT_jerry
|
0456a8a1fb98f84ad5c26dc36bdf32e2d85c750c
|
4c2f6900dfd7ae7f6b3cc2150b1c1be236b4c95c
|
refs/heads/master
| 2020-04-04T05:30:10.544252 | 2019-08-22T09:12:03 | 2019-08-22T09:12:03 | 48,145,836 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 385 |
py
|
import socket
import sys
HOST = '127.0.0.1'
PORT = 8888
s = socket.socket(socket.AF_INET , socket.SOCK_STREAM)
try:
s.bind((HOST , PORT))
except socket.error , msg:
sys.exit()
s.listen(10)
conn,addr = s.accept()
print addr
path = conn.recv(1024)
print path
fd = open(path , 'wb')
data = conn.recv(1024)
while data:
fd.write(data)
data = conn.recv(1024)
fd.close()
s.close()
|
[
"root@"
] |
root@
|
08cc518875bd409711acadb43c70cbc56a0cee66
|
2c1fc9f7d510232c85a6e4e4b7da5d1c59f1096f
|
/linkedlist/pairwise-swap.py
|
7a049c372c9808dae115060b1f7ddfb803221419
|
[] |
no_license
|
rohitjain994/must_do
|
eed4dc28aeb4972548be1b2890c3afaefb7e5b20
|
c279fe2dfdecc8d8d0ae7af053002d059ecf259b
|
refs/heads/main
| 2023-08-16T20:39:58.046112 | 2021-10-03T16:41:16 | 2021-10-03T16:41:16 | 390,668,029 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 409 |
py
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def swapPairs(self, head: ListNode) -> ListNode:
if not head or not head.next: return head
start = head.next.next
head, head.next = head.next, head
head.next.next = self.swapPairs(start)
return head
|
[
"[email protected]"
] | |
b86320a322d8f942de143704a086974303d1bcca
|
6bf2c00376d7a76909d4d85e0fac39962855dcfb
|
/bookmark/models.py
|
384cd387100bcb7502287a067725cd575530cd40
|
[] |
no_license
|
young84/django_project
|
2f0b6d808d338d5636b296b7110d767a5595b3de
|
26e15097d19615146fa8b84736aeb62e8d0360c0
|
refs/heads/master
| 2023-01-31T23:33:45.106578 | 2020-12-21T07:42:50 | 2020-12-21T07:42:50 | 323,263,847 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 393 |
py
|
from django.db import models
from django.urls import reverse
# Create your models here.
class Bookmark(models.Model):
site_name = models.CharField(max_length=100)
url = models.URLField('Site URL')
def __str__(self):
return "이름 : "+self.site_name + ", 주소 : "+self.url
def get_absolute_url(self):
return reverse('bookmark:detail', args=[str(self.id)])
|
[
"[email protected]"
] | |
271487705d8947bbf1c2301040973dd2bc9272d9
|
5f56a24a1514cd1c5790a7141ad35145ff906b5b
|
/katakana/model.py
|
700443cdece16abfa484d7852bcae0567ca63b67
|
[] |
no_license
|
hoangcuong2011/katakana
|
822a06f8f39f6efe62aa571f783f3eb3e30dec0b
|
d20e267454fb4b5342c175cc4eb4d735dc20f1c7
|
refs/heads/master
| 2020-08-06T10:27:18.061814 | 2019-07-21T06:46:33 | 2019-07-21T06:46:33 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,589 |
py
|
import json
import os
import shutil
import numpy as np
from tensorflow._api.v1.keras.layers import Input, Embedding, LSTM, TimeDistributed, Dense
from tensorflow._api.v1.keras.models import Model, load_model
from . import encoding
DEFAULT_INPUT_LENGTH = encoding.DEFAULT_VECTOR_LENGTH
DEFAULT_OUTPUT_LENGTH = encoding.DEFAULT_VECTOR_LENGTH
def load(save_dir='trained_models'):
input_encoding = json.load(open(save_dir + '/input_encoding.json'))
input_decoding = json.load(open(save_dir + '/input_decoding.json'))
input_decoding = {int(k): v for k, v in input_decoding.items()}
output_encoding = json.load(open(save_dir + '/output_encoding.json'))
output_decoding = json.load(open(save_dir + '/output_decoding.json'))
output_decoding = {int(k): v for k, v in output_decoding.items()}
model = load_model(save_dir + '/model.h5')
return model, input_encoding, input_decoding, output_encoding, output_decoding
def save(model, input_encoding, input_decoding, output_encoding, output_decoding,
save_dir='trained_models'):
if os.path.exists(save_dir):
shutil.rmtree(save_dir)
os.mkdir(save_dir)
with open(save_dir + '/input_encoding.json', 'w') as f:
json.dump(input_encoding, f)
with open(save_dir + '/input_decoding.json', 'w') as f:
json.dump(input_decoding, f)
with open(save_dir + '/output_encoding.json', 'w') as f:
json.dump(output_encoding, f)
with open(save_dir + '/output_decoding.json', 'w') as f:
json.dump(output_decoding, f)
model.save(save_dir + '/model.h5')
def create_model(
input_dict_size,
output_dict_size,
input_length=DEFAULT_INPUT_LENGTH,
output_length=DEFAULT_OUTPUT_LENGTH):
encoder_input = Input(shape=(input_length,))
decoder_input = Input(shape=(output_length,))
encoder = Embedding(input_dict_size, 64, input_length=input_length, mask_zero=True)(encoder_input)
encoder = LSTM(64, return_sequences=False)(encoder)
decoder = Embedding(output_dict_size, 64, input_length=output_length, mask_zero=True)(decoder_input)
decoder = LSTM(64, return_sequences=True)(decoder, initial_state=[encoder, encoder])
decoder = TimeDistributed(Dense(output_dict_size, activation="softmax"))(decoder)
model = Model(inputs=[encoder_input, decoder_input], outputs=[decoder])
model.compile(optimizer='adam', loss='categorical_crossentropy')
return model
def create_model_data(
encoded_input,
encoded_output,
output_dict_size):
encoder_input = encoded_input
decoder_input = np.zeros_like(encoded_output)
decoder_input[:, 1:] = encoded_output[:, :-1]
decoder_input[:, 0] = encoding.CHAR_CODE_START
decoder_output = np.eye(output_dict_size)[encoded_output.astype('int')]
return encoder_input, decoder_input, decoder_output
# =====================================================================
def to_katakana(text, model, input_encoding, output_decoding,
input_length=DEFAULT_INPUT_LENGTH,
output_length=DEFAULT_OUTPUT_LENGTH):
encoder_input = encoding.transform(input_encoding, [text.lower()], input_length)
decoder_input = np.zeros(shape=(len(encoder_input), output_length))
decoder_input[:, 0] = encoding.CHAR_CODE_START
for i in range(1, output_length):
output = model.predict([encoder_input, decoder_input]).argmax(axis=2)
decoder_input[:, i] = output[:, i]
decoder_output = decoder_input
return encoding.decode(output_decoding, decoder_output[0][1:])
|
[
"[email protected]"
] | |
28dcbf981159120bc9be52143d078899e8a57677
|
02ed5777f4090c3ac38ba42b7dd26798de22af71
|
/src/handler/push_service_handler.py
|
099247d32fb73e089ac5bc7cec160a45771731c4
|
[] |
no_license
|
tigerinsky/push-server
|
5125df4a640ea106bce3774f4d474d50a8abf53b
|
b2fff90dc235575efd12b621842fc9175e8f5e2f
|
refs/heads/master
| 2021-01-22T17:58:03.912766 | 2015-08-23T07:47:27 | 2015-08-23T07:47:27 | 35,807,022 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 15,111 |
py
|
#!/usr/bin/env python
#coding:utf-8
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
sys.path.append('./gen-py')
import time
import httplib
import urllib
import json
import random
import itertools
from third import xinge
import gevent
import gevent.monkey
from push import PushService
from push.ttypes import *
from util.log import logger
from util.timer import timer
from util import http
from config.const import SUCCESS, PARAM_NOTIFY_ERROR, PARAM_LIST_ERROR, \
MSG_ERROR, BROADCAST_ERROR, RET_UNKNOWN_ERROR
from model.user_push import UserPush
from model.user_detail import UserDetail
NOTIFY_EXPIRE_TIME = 86400 #TODO 移到配置文件中
ANDROID_ACCESS_ID = 2100106617
ANDROID_ACCESS_TOKEN = 'a797bf2b660b362736ea220a8e9f4b4e'#secret key
IOS_ACCESS_ID = 2200118927
IOS_ACCESS_TOKEN = '662a91c3bf96cc9e18111339764f22d2'
SCHEMA_PREFIX = 'meiyuan://'
IOS_ENV = 2
gevent.monkey.patch_all(ssl=False)
SCHEMA = {
LandingType.INDEX: '%s%s' % (SCHEMA_PREFIX, 'index'),
LandingType.WAP: '%s%s' % (SCHEMA_PREFIX, 'wap'),
LandingType.COMMUNITY_DETAIL: '%s%s' % (SCHEMA_PREFIX, 'tweet'),
LandingType.FRIEND: '%s%s' % (SCHEMA_PREFIX, 'friend'),
LandingType.PRIVATE_MSG: '%s%s' % (SCHEMA_PREFIX, 'pmsg'),
LandingType.SYSTEM_MSG: '%s%s' % (SCHEMA_PREFIX, 'smsg'),
LandingType.USER: '%s%s' % (SCHEMA_PREFIX, 'user'),
}
class PushServiceHandler:
def __init__(self):
self.android_push_app = xinge.XingeApp(ANDROID_ACCESS_ID, ANDROID_ACCESS_TOKEN)
self.ios_push_app = xinge.XingeApp(IOS_ACCESS_ID, IOS_ACCESS_TOKEN)
def _build_schema(self, notify):
ltype = notify.ltype
url = SCHEMA.get(ltype, '')
param = {}
if ltype == LandingType.WAP:
param['url'] = notify.url
elif ltype == LandingType.COMMUNITY_DETAIL:
param['tid'] = notify.tid
elif ltype == LandingType.PRIVATE_MSG or ltype == LandingType.USER:
param['uid'] = notify.uid
if url and param:
params = urllib.urlencode(param)
url = '%s?%s' % (url, params)
return url
def _get_msg_custom(self, notify):
custom = {}
custom['t'] = notify.mtype
param = {}
schema = ''
if notify.mtype == MessageType.NOTIFY:#类型为通知,需要生成schema
schema = self._build_schema(notify)
elif notify.mtype == MessageType.EMAILRED: #类型为私信小红点,需额外uid参数,告诉客户端是哪个人的小红点
param['uid'] = notify.uid
param['j'] = schema
custom['p'] = param
return custom
def _build_android_msg(self, notify):
'''定义通知消息'''
msg = xinge.Message()
msg.type = xinge.Message.TYPE_MESSAGE
msg.title = notify.title
msg.content = notify.content
msg.expireTime = NOTIFY_EXPIRE_TIME#TODO 小红点需要expire吗
msg.custom = self._get_msg_custom(notify)
return msg
def _build_ios_msg(self, notify):
msg = xinge.MessageIOS()
msg.alert = notify.content
if notify.mtype == MessageType.NOTIFY:
msg.sound = 'default'
msg.badage = 1
msg.expireTime = NOTIFY_EXPIRE_TIME
msg.custom = self._get_msg_custom(notify)
return msg
@timer('push end')
def _push_single_device(self, device_id, msg, device_type, env=IOS_ENV):#xinge.XingeApp.ENV_DEV):#ios上线要修改成1
result = None
if device_type == DeviceType.ANDROID:
logger.info('push start:device_id[%s] device_type[%s] msg_type[%s] title[%s] content[%s] expire[%s] custom[%s]' % (device_id, device_type, msg.type, msg.title, msg.content, msg.expireTime, msg.custom))
result = self.android_push_app.PushSingleDevice(device_id, msg, env)
elif device_type == DeviceType.IOS:
logger.info('push start:device_id[%s] device_type[%s] sound[%s] title[%s] content[%s] expire[%s] custom[%s]' % (device_id, device_type, msg.sound, '', msg.alert, msg.expireTime, msg.custom))
result = self.ios_push_app.PushSingleDevice(device_id, msg, env)
logger.info('result[%s] device_id[%s]' % (result, device_id))
def ping(self):
return 'hello, world'
def _build_msg(self, notify, device_type):
msg = None
if device_type == DeviceType.IOS:
msg = self._build_ios_msg(notify)
elif device_type == DeviceType.ANDROID:
msg = self._build_android_msg(notify)
else:
logger.warning('msg[device type error] device_type[%s] notify[%s]' % (device_type, notify))
return msg
#@timer('batch notify over')
def batch_notify(self, request):
logger.info('batch notify request begin, request[%s]' % request)
notify = request.notify;
if not isinstance(notify, Notify):
logger.warning('param notify is not instance of notify')
return PARAM_NOTIFY_ERROR
device_id_list = request.device_id_list;
if not isinstance(device_id_list, list):
logger.warning('param device_id_list is invalid')
return PARAM_LIST_ERROR
if len(device_id_list) <= 0:
return SUCCESS
device_type = request.device_type
msg = self._build_msg(notify, device_type)
if not msg:
logger.warning('msg[msg generate error]')
return MSG_ERROR
send_time = None
if hasattr(request, 'send_time'):
t = request.send_time
send_time = '1970-01-01 00:00:00'
try:
x = time.localtime(t)
send_time = time.strftime("%Y-%m-%d %H:%M:%S", x)
except Exception, e:
logger.warning('time trans exception,e[%s]' % e)
if send_time:
msg.sendTime = send_time
threads = [gevent.spawn(self._push_single_device, device_id, msg, request.device_type) for device_id in device_id_list]
gevent.joinall(threads)
return SUCCESS
#@timer('single push called cost')
def single_notify(self, request):
device_id = request.device_id
notify = request.notify
device_type = request.device_type
if not isinstance(notify, Notify):
logger.warning('msg[param error] param[%s]' % request)
return PARAM_NOTIFY_ERROR
msg = self._build_msg(notify, device_type)
if not msg:
logger.warning('msg[msg generate error]')
return MSG_ERROR
gevent.spawn(self._push_single_device, device_id, msg, device_type)
logger.info('single push called over')
return SUCCESS
#@timer('broadcast end')
def broadcast(self, request):
logger.info('broadcast begin request[%s]' % request)
notify = request.notify
send_time = None
if hasattr(request, 'send_time'):
t = request.send_time
send_time = '1970-01-01 00:00:00'
try:
x = time.localtime(t)
send_time = time.strftime("%Y-%m-%d %H:%M:%S", x)
except Exception, e:
logger.warning('time trans exception,e[%s]' % e)
if not isinstance(notify, Notify):
logger.warning('param notify is invalid')
return PARAM_NOTIFY_ERROR
device_type = request.device_type
ios_msg = self._build_msg(notify, DeviceType.IOS)
android_msg = self._build_msg(notify, DeviceType.ANDROID)
if not ios_msg or not android_msg:
logger.warning('msg[msg generate error]')
return MSG_ERROR
if send_time:
ios_msg.sendTime = send_time
android_msg.sendTime = send_time
ret = None
if device_type == DeviceType.ANDROID:
ret = self.android_push_app.PushAllDevices(0, android_msg)
logger.info('ret[%s]' % ret)
if device_type == DeviceType.IOS:
ret = self.ios_push_app.PushAllDevices(0, ios_msg, IOS_ENV)
logger.info('ret[%s]' % ret)
else:
ret1 = self.android_push_app.PushAllDevices(0, android_msg)
ret2 = self.ios_push_app.PushAllDevices(0, ios_msg, IOS_ENV)
logger.info('ret1[%s], ret2[%s]' % (ret1, ret2))
#ret = ret1 and ret2
if ret:
if ret[0] == 0:
return SUCCESS
else:
return BROADCAST_ERROR
return RET_UNKNOWN_ERROR
def _del_tag(self, token_group):
for dtype in token_group:
for token in token_group[dtype]:
result = ()
if dtype == DeviceType.ANDROID:
result = self.android_push_app.QueryTokenTags(token)
if dtype == DeviceType.IOS:
result = self.ios_push_app.QueryTokenTags(token)
logger.info('msg[query tag] token[%s] result[%s]' % (token, result))
tag_list = []
if result[0] == 0:
tag_list = result[2]
if tag_list:
tag_pair = [xinge.TagTokenPair(i, token) for i in tag_list]
if dtype == DeviceType.ANDROID:
result = self.android_push_app.BatchDelTag(tag_pair)
if dtype == DeviceType.IOS:
result = self.ios_push_app.BatchDelTag(tag_pair)
logger.info('msg[delete tag] token[%s] tag_pair[%s] result[%s]' % (token, tag_pair, result))
def _get_condition_push_device(self, city, school, ukind_verify):
limit = 5000
offset = 0
while True:
users = UserDetail.get_user(city, school, ukind_verify, offset, limit)
if users == None:
yield []
break
uid_list = [u.uid for u in users]
logger.info("msg[get condition push] city[%s] school[%s] ukind_verify[%s] uid%s offset[%s] limit[%s]" % (city, school, ukind_verify, uid_list, offset, limit))
device_infos = UserPush.get_device_list(uid_list)
yield [(d.device_type, d.xg_device_token) for d in device_infos]
if len(uid_list) < limit:
break
offset += limit
#@timer('op tag end')
def optag(self, request):
logger.info('msg[optag begin] request[%s]' % request)
if request.uid == 0 or not isinstance(request.uid, int):
logger.warning('msg[invalid uid] uid[%s]' % request.uid)
return PARAM_UID_ERROR
token_group = {}
if request.xg_device_token == '':
r = UserPush.get_device_info(request.uid)
for k, g in itertools.groupby(r, lambda x:x.device_type):
token_group[k] = [i.xg_device_token for i in g]
else:
r = UserPush.get_device_type(request.xg_device_token)
if not r:
logger.warning('msg[no this token in table] token[%s]' % request.xg_device_token)
return SUCCESS
token_group.setdefault(r, []).append(request.xg_device_token)
if request.op == 1:
self._del_tag(token_group)
tag_list = request.tag_list
tag_list.append('all_city')
tag_list.append('all_school')
for dtype in token_group:
tag_token_list = []
xg_device_token = token_group[dtype]
for token in xg_device_token:
if dtype == DeviceType.ANDROID:
UserPush.update_tags(token, ','.join(tag_list+['android']))
l = [xinge.TagTokenPair(x, token) for x in tag_list+['android']]
if dtype == DeviceType.IOS:
UserPush.update_tags(token, ','.join(tag_list+['ios']))
l = [xinge.TagTokenPair(x, token) for x in tag_list+['ios']]
tag_token_list += l
size = len(tag_token_list)
num = 19
slice_tag_list = [tag_token_list[i:i+num] for i in range(0, size, num)]
for l in slice_tag_list:
result = (0, '')
if dtype == DeviceType.ANDROID:
if request.op == 1:
result = self.android_push_app.BatchSetTag(l)
elif request.op == 2:
result = self.android_push_app.BatchDelTag(l)
if dtype == DeviceType.IOS:
if request.op == 1:
result = self.ios_push_app.BatchSetTag(l)
elif request.op == 2:
result = self.ios_push_app.BatchDelTag(l)
if result[0] != 0:
logger.warning('msg[set tag error] tags[%s] uid[%s] tagpair[%s] op[%s] ret[%s]' % (tag_list, request.uid, l, request.op, result))
return SUCCESS
def _tag_push(self, tag_list, msg, device_type, push_task_id):#暂时没用上
retry = 2
ret = None
for i in range(retry):
if device_type == DeviceType.IOS:
ret = self.ios_push_app.PushTags(0, tag_list, 'AND', msg, IOS_ENV)
if device_type == DeviceType.ANDROID:
ret = self.android_push_app.PushTags(0, tag_list, 'AND', msg)
logger.info('msg[condition push result] retry[%s] device_type[%s] tags[%s] msg[%s] ret[%s] push_task_id[%s]' % (i, device_type, tag_list, msg, ret, push_task_id))
if ret[0] == 0:
return ret[2]
return -1
def condition_push(self, request):
gevent.spawn(self._condition_push, request)
def _condition_push(self, request):
logger.info('msg[condition push begin] request[%s]' % request)
verify_map = {'unverify': 0, 'verify': 1}
notify = request.notify
push_task_id = request.push_task_id
city = request.city.split(',')
school = request.school.split(',')
ukind_verify = request.ukind_verify.split(',')
task_list = []
push_id_list = []
i = 1
for tag_list in itertools.product(city, school, ukind_verify):
for device_list in self._get_condition_push_device(tag_list[0], tag_list[1], verify_map.get(tag_list[2], 0)):
for device in device_list:
if request.device_type == device[0] or request.device_type == 0:
if device[0] == DeviceType.IOS:
ios_msg = self._build_msg(notify, DeviceType.IOS)
task_list.append(gevent.spawn(self._push_single_device, device[1], ios_msg, device[0]))
if device[0] == DeviceType.ANDROID:
android_msg = self._build_msg(notify, DeviceType.ANDROID)
task_list.append(gevent.spawn(self._push_single_device, device[1], android_msg, device[0]))
i+=1
if i % 700 == 0:
gevent.joinall(task_list, timeout=5)
task_list = []
|
[
"[email protected]"
] | |
bc9fb2afed22a652d7a229f920fb725987c8015a
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/7DrvnMeY2Ebzk2mfH_8.py
|
cdf4c6f5d8fb4f7a25817718499599ad9938b579
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 129 |
py
|
import re
body_insert = '(?<=<body>\n)'
body_append = '(?=\n</body>)'
body_rewrite = '(?<=<body>\n)(?:\n|.)+(?=\n</body>)'
|
[
"[email protected]"
] | |
2c4c70d9f94ee4beff9ab3f311d1fba80ecd5ff8
|
1bdf2523289cd9207c177702c1e36dadd6f847b5
|
/Project5-classification/perceptron_pacman.py
|
a47c906ce8ce505245601e225c8a27fac320c98b
|
[] |
no_license
|
xelarock/artificial-intelligence
|
6e7e449a7b5c9dafd19d8beae16997896bb867d9
|
3a4ae46ceac4a15834f4587dc26eac5d58648f6d
|
refs/heads/main
| 2023-01-01T02:27:02.404143 | 2020-10-26T15:30:27 | 2020-10-26T15:30:27 | 307,408,784 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,805 |
py
|
"""
THIS CODE WAS MY OWN WORK , IT WAS WRITTEN WITHOUT CONSULTING ANY
SOURCES OUTSIDE OF THOSE APPROVED BY THE INSTRUCTOR. Alex Welsh
"""
# perceptron_pacman.py
# --------------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# ([email protected]) and Dan Klein ([email protected]).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel ([email protected]).
# Perceptron implementation for apprenticeship learning
import util
from perceptron import PerceptronClassifier
from pacman import GameState
PRINT = True
class PerceptronClassifierPacman(PerceptronClassifier):
def __init__(self, legalLabels, maxIterations):
PerceptronClassifier.__init__(self, legalLabels, maxIterations)
self.weights = util.Counter()
def classify(self, data ):
"""
Data contains a list of (datum, legal moves)
Datum is a Counter representing the features of each GameState.
legalMoves is a list of legal moves for that GameState.
"""
guesses = []
for datum, legalMoves in data:
vectors = util.Counter()
for l in legalMoves:
vectors[l] = self.weights * datum[l] #changed from datum to datum[l]
guesses.append(vectors.argMax())
return guesses
def train( self, trainingData, trainingLabels, validationData, validationLabels ):
self.features = trainingData[0][0]['Stop'].keys() # could be useful later
# DO NOT ZERO OUT YOUR WEIGHTS BEFORE STARTING TRAINING, OR
# THE AUTOGRADER WILL LIKELY DEDUCT POINTS.
for iteration in range(self.max_iterations):
print "Starting iteration ", iteration, "..."
for i in range(len(trainingData)):
"*** YOUR CODE HERE ***"
score = util.Counter()
for label in trainingData[i][1]: # find score for label = w * f(s,a)
score[label] = self.weights * trainingData[i][0][label] # w * f(s, a)
highestLabel = score.argMax() # find the max label (a')
self.weights += trainingData[i][0][trainingLabels[i]] # f(s, a) with correct action
self.weights -= trainingData[i][0][highestLabel] # f(s, a') with incorrect action
|
[
"[email protected]"
] | |
26c41c4d14a5a00568d2e7b0def3c41e9006bae4
|
9e172019f6bb85bf95a7c49f96d6f21cdf0232a2
|
/20th/towerOfHanoi.py
|
b820c9e3569d84c4ff306498ce5d360aa504872d
|
[] |
no_license
|
razakadam74/Interview-Preparation
|
249628295f37ee6b5a023ac5aa8b195041f83573
|
43d4eef94e51ddf24085b4a82ba56e8cc2857091
|
refs/heads/master
| 2020-04-01T11:19:06.165423 | 2018-10-23T14:54:07 | 2018-10-23T14:54:07 | 153,156,869 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 378 |
py
|
def solveTowerOfHanio(height,fromPole,toPole, auxPole):
if height >=1:
solveTowerOfHanio(height - 1,fromPole, auxPole, toPole)
moveDisk(toPole,fromPole)
solveTowerOfHanio(height -1, auxPole, toPole, fromPole)
def moveDisk(toPole, fromPole):
print('Moving from Pole {} to Pole {}'.format(fromPole, toPole))
solveTowerOfHanio(3, 'A', 'B', 'C')
|
[
"[email protected]"
] | |
25f56a9c5358e117323c6a83feb396401feb9da6
|
405e9b1635112285adecaf655e4fde82541eb656
|
/src/main/python/parse/diskio/trace2list.py
|
3e1ad478da9a4be2e3d0ef35f5af0ac4ba0616b8
|
[] |
no_license
|
AlexJuarez/tracery-work
|
a019867dc42f786f0ae7de0e7e801268ab5977d7
|
ed0bffb70451aea4b7dcee88795ddfd2a256aedb
|
refs/heads/master
| 2021-06-03T18:57:31.498480 | 2016-08-03T03:54:42 | 2016-08-03T03:54:42 | 64,812,730 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,428 |
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from trace_parser import TraceParser
from collections import defaultdict
import argparse
import re
# IO trace format:
# Timestamp Op Count Thread CPU Filename Pg-Count Duration Pages Sectors
class IOTraceItem:
def __init__(self, ts, type, cnt, thread, cpu, file, pg_cnt, dur, pages, sectors):
self.timestamp = ts
self.type = type
self.count = cnt
self.thread_name = thread
self.cpu = cpu
self.file_name = file
self.page_count = pg_cnt
self.duration = dur
self.pages = pages
self.sectors = sectors
@staticmethod
def null_obj():
return IOTraceItem(0, '', 0, '', 0, '', 0, 0, [], [])
@staticmethod
def all_col_names():
return IOTraceItem.null_obj()._to_str([], True)
@staticmethod
def col_names(hidden_cols):
return IOTraceItem.null_obj()._to_str(hidden_cols, True)
def to_str(self, hidden_cols):
return self._to_str(hidden_cols, False)
def _to_str(self, hidden_cols, print_hdr):
SEP = ' | '
output = SEP
if 'timestamp' not in hidden_cols:
ts = 'timestamp' if print_hdr else str(self.timestamp)
output += (ts + SEP)
if 'type' not in hidden_cols:
type = 'type' if print_hdr else self.type
output += (type + SEP)
if 'count' not in hidden_cols:
count = 'count' if print_hdr else str(self.count)
output += (count + SEP)
if 'thread' not in hidden_cols:
thread = 'thread' if print_hdr else self.thread_name
output += (thread + SEP)
if 'cpu' not in hidden_cols:
cpu = 'cpu' if print_hdr else str(self.cpu)
output += (cpu + SEP)
if 'file' not in hidden_cols:
file = 'file' if print_hdr else self.file_name
output += (file + SEP)
if 'page_count' not in hidden_cols:
pc = 'page-count' if print_hdr else str(self.page_count)
output += (pc + SEP)
if 'duration' not in hidden_cols:
duration = 'duration' if print_hdr else str(self.duration)
output += (duration + SEP)
if 'pages' not in hidden_cols:
pages = 'pages' if print_hdr \
else ','.join(str(page) for page in self.pages)
output += (pages + SEP)
if 'sectors' not in hidden_cols:
sectors = 'sectors' if print_hdr \
else ','.join(str(sector) for sector in self.sectors)
output += (sectors + SEP)
return output
def matches(event, filter):
if not filter:
return True
key, value = filter.split(':')
if not key or not value:
return False
if key == 'file':
if re.match(value, event.file_name):
return True
return False
if key == 'thread':
if re.match(value, event.thread_name):
return True
return False
def page_count(event):
if event.type == 'R':
return event.page_count
if event.type == 'W':
return 1
return 0
def pages(event):
if event.type == 'R' or event.type == 'W':
return event.pages
return []
def sectors(event):
# TBD
return []
def group_key(group_by, event):
if group_by == 'file':
return event.file_name
if group_by == 'thread':
return event.thread_name
if group_by == 'type':
return event.type
def group_item(group_by, event):
file_name = thread_name = type = '-'
if group_by == 'file':
file_name = event.file_name
elif group_by == 'thread':
thread_name = event.thread_name
elif group_by == 'type':
type = event.type
return IOTraceItem(0, type, 1, thread_name, '-', file_name,
event.page_count, event.duration, event.pages, event.sectors)
def accumulate(a, b):
a.count += b.count
a.page_count += b.page_count
a.duration += b.duration
a.pages += b.pages
a.sectors += b.sectors
def group_list(group_by, list):
if not group_by:
return list
groups = {}
for item in list:
key = group_key(group_by, item)
if key not in groups:
groups[key] = group_item(group_by, item)
continue
accumulate(groups[key], item)
ret = []
for key, val in groups.iteritems():
ret.append(val)
return ret
# type_filter: [R|W|S|B]
# filter: [file:<filename> | thread:<threadname>]
# group_by: [file | thread | type]
def trace2list(path, type_filter, filter, group_by):
list = []
events, files = TraceParser().parse_trace(path)
data = defaultdict(lambda: defaultdict(int))
for event in events:
type = event.type
if type_filter and type != type_filter:
continue
if not matches(event, filter):
continue
item = IOTraceItem(
event.ts,
event.type,
1,
event.thread_name,
event.cpu,
event.file_name,
page_count(event),
event.duration,
pages(event),
sectors(event)
)
list.append(item)
return (group_list(group_by, list), files)
def parse_args():
parser = argparse.ArgumentParser(description="I/O profile raw trace to list")
parser.add_argument(
'--input',
required=True,
dest='path',
help='path to input file')
parser.add_argument(
'--type-filter',
dest='type_filter',
default=None,
help='show only events of this type. possible values are R|W|S|B')
parser.add_argument(
'--filter',
dest='filter',
default=None,
help='type+value to filter on. possible values are ' +
'file:<filename>, thread:<thread-name>, type:<type>')
parser.add_argument(
'--group-by',
dest='group_by',
default=None,
help='group by field. possible values are file | thread | type')
parser.add_argument(
'--hide-cols',
dest='hidden_cols',
default=['pages', 'sectors'],
help='comma-separated list of column names to hide')
parser.add_argument(
'--list-cols',
dest='list_cols',
action='store_true',
help='show all available column names')
parser.add_argument(
'--sort-col',
dest='sort_col',
default='timestamp',
choices=['timestamp', 'count', 'page_count', 'duration'],
help='column to sort by')
return parser.parse_args()
def main():
args = parse_args()
if args.list_cols:
print(IOTraceItem.all_col_names())
return
list, files = trace2list(args.path, args.type_filter, args.filter, args.group_by)
list = sorted(list, key=lambda item: getattr(item, args.sort_col))
# print legend
print(IOTraceItem.col_names(args.hidden_cols))
print('------------------------------------------------------------------')
# print items
for item in list:
print(item.to_str(args.hidden_cols))
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
4c6a44c71806a1a72dd99124ec9b6b5efd0ec15f
|
e7cd755171976f00cca3fb9747573c8b0ff06b10
|
/trophy/migrations/0001_initial.py
|
108d94d225f6bc74ef4f6d7d9df1b2585dd6a1a6
|
[] |
no_license
|
derriqo/Awardsdw
|
fa3b44a98ac37bcf79859737979e3cf55e66e28a
|
1e2ef581a616b723e0f412f3d7231576967c1460
|
refs/heads/master
| 2020-04-29T04:13:42.944553 | 2019-03-21T17:25:58 | 2019-03-21T17:25:58 | 175,831,723 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,727 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-03-18 10:47
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('profile_pic', models.ImageField(upload_to='profile-pic/')),
('bio', models.TextField()),
('phone_number', models.IntegerField()),
],
),
migrations.CreateModel(
name='Projects',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('projo_pic', models.ImageField(upload_to='projo-pic/')),
('title', models.CharField(max_length=50)),
('link', models.URLField(max_length=250)),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Rating',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('design', models.IntegerField()),
('usability', models.IntegerField()),
('content', models.IntegerField()),
],
),
]
|
[
"[email protected]"
] | |
bceb62f49e71304ad6adf8db18b2019cd7b907f3
|
428a970ee7aca21d1a7556f8937ebc8e4f06f394
|
/tests/test.py
|
e0caeb8c9491e0743cb807e704e2038d2bd0a962
|
[
"MIT"
] |
permissive
|
SriramRamesh/ImageScraper
|
42f5615ffe8e590a1e9f5a0838d2ce6636474513
|
41d5f328a74bea01e067d7deb219de5251b77789
|
refs/heads/master
| 2020-12-25T00:20:10.408740 | 2015-02-09T18:38:58 | 2015-02-09T18:38:58 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 170 |
py
|
#testset.py
#yet to write proper tests.
# TEST 1: Check if 3 images are dowloaded from ananth.co.in/test.html
from nose.tools import eq_
def test_sum():
eq_(2+2,4)
|
[
"[email protected]"
] | |
0c297e81d5ab99eb43ea0c1b3d9a817d82935b03
|
7aebf21ea5e46697804d395b1a32f8f97b9acc5c
|
/models/Bert-deepwide/bert_attention.py
|
2578d791610fca342abbd1d5c9c7d341597c467f
|
[] |
no_license
|
chenxingqiang/RecSys-CTR-Model-2020
|
d37b4a5b336bcdcf908780c116b6407c998e772c
|
3407657dc71427daf33b4a962173f36467378c1e
|
refs/heads/main
| 2023-07-04T17:56:26.926269 | 2021-08-27T08:45:05 | 2021-08-27T08:45:05 | 399,761,150 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 13,369 |
py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The main BERT model and related functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import json
import math
import re
import numpy as np
import six
import tensorflow as tf
def dropout(input_tensor, dropout_prob):
"""Perform dropout.
Args:
input_tensor: float Tensor.
dropout_prob: Python float. The probability of dropping out a value (NOT of
*keeping* a dimension as in `tf.nn.dropout`).
Returns:
A version of `input_tensor` with dropout applied.
"""
if dropout_prob is None or dropout_prob == 0.0:
return input_tensor
output = tf.nn.dropout(input_tensor, 1.0 - dropout_prob)
return output
def layer_norm(input_tensor, name=None):
"""Run layer normalization on the last dimension of the tensor."""
return tf.contrib.layers.layer_norm(
inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)
def layer_norm_and_dropout(input_tensor, dropout_prob, name=None):
"""Runs layer normalization followed by dropout."""
output_tensor = layer_norm(input_tensor, name)
output_tensor = dropout(output_tensor, dropout_prob)
return output_tensor
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range)
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range)
def assert_rank(tensor, expected_rank, name=None):
"""Raises an exception if the tensor rank is not of the expected rank.
Args:
tensor: A tf.Tensor to check the rank of.
expected_rank: Python integer or list of integers, expected rank.
name: Optional name of the tensor for the error message.
Raises:
ValueError: If the expected shape doesn't match the actual shape.
"""
if name is None:
name = tensor.name
expected_rank_dict = {}
if isinstance(expected_rank, six.integer_types):
expected_rank_dict[expected_rank] = True
else:
for x in expected_rank:
expected_rank_dict[x] = True
actual_rank = tensor.shape.ndims
if actual_rank not in expected_rank_dict:
scope_name = tf.get_variable_scope().name
raise ValueError(
"For the tensor `%s` in scope `%s`, the actual rank "
"`%d` (shape = %s) is not equal to the expected rank `%s`" %
(name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def reshape_to_matrix(input_tensor):
"""Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix)."""
ndims = input_tensor.shape.ndims
if ndims < 2:
raise ValueError("Input tensor must have at least rank 2. Shape = %s" %
(input_tensor.shape))
if ndims == 2:
return input_tensor
width = input_tensor.shape[-1]
output_tensor = tf.reshape(input_tensor, [-1, width])
return output_tensor
def reshape_from_matrix(output_tensor, orig_shape_list):
"""Reshapes a rank 2 tensor back to its original rank >= 2 tensor."""
if len(orig_shape_list) == 2:
return output_tensor
output_shape = get_shape_list(output_tensor)
orig_dims = orig_shape_list[0:-1]
width = output_shape[-1]
return tf.reshape(output_tensor, orig_dims + [width])
def attention_layer(from_tensor,
to_tensor,
attention_mask=None,
num_attention_heads=1,
size_per_head=128,
query_act=None,
key_act=None,
value_act=None,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
do_return_2d_tensor=False,
batch_size=1,
from_seq_length=None,
to_seq_length=None):
"""Performs multi-headed attention from `from_tensor` to `to_tensor`.
This is an implementation of multi-headed attention based on "Attention
is all you Need". If `from_tensor` and `to_tensor` are the same, then
this is self-attention. Each timestep in `from_tensor` attends to the
corresponding sequence in `to_tensor`, and returns a fixed-with vector.
This function first projects `from_tensor` into a "query" tensor and
`to_tensor` into "key" and "value" tensors. These are (effectively) a list
of tensors of length `num_attention_heads`, where each tensor is of shape
[batch_size, seq_length, size_per_head].
Then, the query and key tensors are dot-producted and scaled. These are
softmaxed to obtain attention probabilities. The value tensors are then
interpolated by these probabilities, then concatenated back to a single
tensor and returned.
In practice, the multi-headed attention are done with transposes and
reshapes rather than actual separate tensors.
Args:
from_tensor: float Tensor of shape [batch_size, from_seq_length,
from_width].
to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].
attention_mask: (optional) int32 Tensor of shape [batch_size,
from_seq_length, to_seq_length]. The values should be 1 or 0. The
attention scores will effectively be set to -infinity for any positions in
the mask that are 0, and will be unchanged for positions that are 1.
num_attention_heads: int. Number of attention heads.
size_per_head: int. Size of each attention head.
query_act: (optional) Activation function for the query transform.
key_act: (optional) Activation function for the key transform.
value_act: (optional) Activation function for the value transform.
attention_probs_dropout_prob: (optional) float. Dropout probability of the
attention probabilities.
initializer_range: float. Range of the weight initializer.
do_return_2d_tensor: bool. If True, the output will be of shape [batch_size
* from_seq_length, num_attention_heads * size_per_head]. If False, the
output will be of shape [batch_size, from_seq_length, num_attention_heads
* size_per_head].
batch_size: (Optional) int. If the input is 2D, this might be the batch size
of the 3D version of the `from_tensor` and `to_tensor`.
from_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `from_tensor`.
to_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `to_tensor`.
Returns:
float Tensor of shape [batch_size, from_seq_length,
num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is
true, this will be of shape [batch_size * from_seq_length,
num_attention_heads * size_per_head]).
Raises:
ValueError: Any of the arguments or tensor shapes are invalid.
"""
def transpose_for_scores(input_tensor, batch_size, num_attention_heads,
seq_length, width):
output_tensor = tf.reshape(
input_tensor, [batch_size, seq_length, num_attention_heads, width])
output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])
return output_tensor
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])
if len(from_shape) != len(to_shape):
raise ValueError(
"The rank of `from_tensor` must match the rank of `to_tensor`.")
if len(from_shape) == 3:
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_seq_length = to_shape[1]
elif len(from_shape) == 2:
if (batch_size is None or from_seq_length is None or to_seq_length is None):
raise ValueError(
"When passing in rank 2 tensors to attention_layer, the values "
"for `batch_size`, `from_seq_length`, and `to_seq_length` "
"must all be specified.")
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# F = `from_tensor` sequence length
# T = `to_tensor` sequence length
# N = `num_attention_heads`
# H = `size_per_head`
from_tensor_2d = reshape_to_matrix(from_tensor)
to_tensor_2d = reshape_to_matrix(to_tensor)
# `query_layer` = [B*F, N*H]
query_layer = tf.layers.dense(
from_tensor_2d,
num_attention_heads * size_per_head,
activation=query_act,
name="query",
kernel_initializer=create_initializer(initializer_range))
# `key_layer` = [B*T, N*H]
key_layer = tf.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=key_act,
name="key",
kernel_initializer=create_initializer(initializer_range))
# `value_layer` = [B*T, N*H]
value_layer = tf.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=value_act,
name="value",
kernel_initializer=create_initializer(initializer_range))
# `query_layer` = [B, N, F, H]
query_layer = transpose_for_scores(query_layer, batch_size,
num_attention_heads, from_seq_length,
size_per_head)
# `key_layer` = [B, N, T, H]
key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads,
to_seq_length, size_per_head)
# Take the dot product between "query" and "key" to get the raw
# attention scores.
# `attention_scores` = [B, N, F, T]
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
attention_scores = tf.multiply(attention_scores,
1.0 / math.sqrt(float(size_per_head)))
if attention_mask is not None:
# `attention_mask` = [B, 1, F, T]
attention_mask = tf.expand_dims(attention_mask, axis=[1])
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_scores += adder
# Normalize the attention scores to probabilities.
# `attention_probs` = [B, N, F, T]
attention_probs = tf.nn.softmax(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = dropout(attention_probs, attention_probs_dropout_prob)
# `value_layer` = [B, T, N, H]
value_layer = tf.reshape(
value_layer,
[batch_size, to_seq_length, num_attention_heads, size_per_head])
# `value_layer` = [B, N, T, H]
value_layer = tf.transpose(value_layer, [0, 2, 1, 3])
# `context_layer` = [B, N, F, H]
context_layer = tf.matmul(attention_probs, value_layer)
# `context_layer` = [B, F, N, H]
context_layer = tf.transpose(context_layer, [0, 2, 1, 3])
if do_return_2d_tensor:
# `context_layer` = [B*F, N*H]
context_layer = tf.reshape(
context_layer,
[batch_size * from_seq_length, num_attention_heads * size_per_head])
else:
# `context_layer` = [B, F, N*H]
context_layer = tf.reshape(
context_layer,
[batch_size, from_seq_length, num_attention_heads * size_per_head])
if sum_reduce_last_dim:
context_layer = tf.reduce_sum(context_layer,axis=-1)
return context_layer
|
[
"[email protected]"
] | |
f2f030f5726a06e38038e9244e679fbeaed5496b
|
cb4a07ed613c3d97391d92cf5a3411c0a42ff973
|
/leetcode/122.py
|
bc03cc73056deb2481491d8a5fe0f998e49011a6
|
[] |
no_license
|
ZMbiubiubiu/For_Test
|
27a2c3d432294c4eb2bd9a9e532dcfdf3c148068
|
6aada343a11890fc0078c3769aed2196ef7186b2
|
refs/heads/master
| 2020-06-14T01:34:57.853361 | 2019-07-20T14:52:15 | 2019-07-20T14:52:15 | 194,852,634 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 816 |
py
|
"""
买卖股票的最佳时机 II
给定一个数组,它的第 i 个元素是一支给定股票第 i 天的价格。
设计一个算法来计算你所能获取的最大利润。你可以尽可能地完成更多的交易(多次买卖一支股票)。
注意:你不能同时参与多笔交易(你必须在再次购买前出售掉之前的股票)。
"""
class Solution:
def maxProfit(self, prices: 'List[int]') -> int:
# 只要后一天比当前的股票价格高,我就买入,然后后一天卖出
# 赚到了所有交易日的钱,所有亏钱的交易日都未交易,理所当然会利益最大化
bonus = 0
for i in range(len(prices) - 1):
if prices[i + 1] > prices[i]:
bonus += prices[i + 1] - prices[i]
return bonus
|
[
"[email protected]"
] | |
18dc511ca7e964ffb86151143fd018120be351dd
|
482d7d5770dfc17db5b1a0e780b634d3a9f5572a
|
/Project3/code/Flipped counties.py
|
9c909942d9dfbf7bca813ffccfd5049540d6eb76
|
[] |
no_license
|
fmsilver89/FYS_STK_4155
|
5b9a878330f06a29ec6416aff92a06ebf0ba8dd8
|
189b7ef0d18cd9395eeab82702376ae91ad24d17
|
refs/heads/master
| 2020-09-11T13:24:15.963157 | 2019-11-16T10:18:21 | 2019-11-16T10:18:21 | 222,078,923 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 733 |
py
|
import numpy as np
import pandas as pd
# Import CSV-file
data = pd.read_table('Data/US_Election.csv', sep = ';', header = 0, encoding = 'latin1')
# 2012 Election
DEM_2012 = np.array(data.iloc[:, -2])
DEM_2012 = np.where(DEM_2012 > 0.5, 1, 0)
# 2016 Election
DEM_2016 = np.array(data.iloc[:, -4])
DEM_2016 = np.where(DEM_2016 > 0.5, 1, 0)
# Counties indices
flipped_counties_indices = np.where(DEM_2012 != DEM_2016)[0]
# Binary classification of the counties
n = len(DEM_2012)
flipped_counties = np.zeros((n,))
flipped_counties[flipped_counties_indices] = 1
# Write to file
#f = open("Data/flipped_counties.txt", "w+")
#for i in range(n):
# f.write('%d \n' % flipped_counties[i])
#f.close()
|
[
"[email protected]"
] | |
fa3e6a6040101a1c6605f060bbdfcd97858cad90
|
14344ea4c081bbfb9639cc33dcf843b9134b7d5b
|
/mindspore/nn/probability/infer/variational/svi.py
|
8aca1221ac80f0405c43c22d853a591e87c1a5fc
|
[
"Apache-2.0",
"Libpng",
"LGPL-2.1-only",
"MIT",
"IJG",
"Zlib",
"BSD-3-Clause-Open-MPI",
"AGPL-3.0-only",
"MPL-2.0-no-copyleft-exception",
"MPL-1.0",
"MPL-1.1",
"MPL-2.0",
"LicenseRef-scancode-unknown-license-reference",
"GPL-2.0-only",
"Unlicense",
"LicenseRef-scancode-proprietary-license",
"BSL-1.0",
"BSD-3-Clause",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
Joejiong/mindspore
|
d06318e456c000f6368dbe648daa39d82ba6a2da
|
083fd6565cab1aa1d3114feeacccf1cba0d55e80
|
refs/heads/master
| 2022-12-05T07:17:28.737741 | 2020-08-18T07:51:33 | 2020-08-18T07:51:33 | 288,415,456 | 0 | 0 |
Apache-2.0
| 2020-08-18T09:43:35 | 2020-08-18T09:43:34 | null |
UTF-8
|
Python
| false | false | 2,819 |
py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Stochastic Variational Inference(SVI)."""
import mindspore.common.dtype as mstype
from mindspore.common.tensor import Tensor
from ....wrap.cell_wrapper import TrainOneStepCell
class SVI:
r"""
Stochastic Variational Inference(SVI).
Variational inference casts the inference problem as an optimization. Some distributions over the hidden
variables that is indexed by a set of free parameters, and then optimize the parameters to make it closest to
the posterior of interest.
see more details in `Variational Inference: A Review for Statisticians<https://arxiv.org/abs/1601.00670>`_.
Args:
net_with_loss(Cell): Cell with loss function.
optimizer (Cell): Optimizer for updating the weights.
"""
def __init__(self, net_with_loss, optimizer):
self.net_with_loss = net_with_loss
self.optimizer = optimizer
self._loss = 0.0
def run(self, train_dataset, epochs=10):
"""
Optimize the parameters by training the probability network, and return the trained network.
Args:
epochs (int): Total number of iterations on the data. Default: 10.
train_dataset (Dataset): A training dataset iterator.
Outputs:
Cell, the trained probability network.
"""
train_net = TrainOneStepCell(self.net_with_loss, self.optimizer)
train_net.set_train()
for _ in range(1, epochs+1):
train_loss = 0
dataset_size = 0
for data in train_dataset.create_dict_iterator():
x = Tensor(data['image'], dtype=mstype.float32)
y = Tensor(data['label'], dtype=mstype.int32)
dataset_size += len(x)
loss = train_net(x, y).asnumpy()
train_loss += loss
self._loss = train_loss / dataset_size
model = self.net_with_loss.backbone_network
return model
def get_train_loss(self):
"""
Returns:
numpy.dtype, the loss after training.
"""
return self._loss
|
[
"[email protected]"
] | |
83b50545d5766335a90a8efd425ce27ac02e7134
|
2ed3f72d9db845400c7e5ef65bece43ac211279f
|
/landreg/wsgi.py
|
305e7e2bdd7e9e316a7369e4f8664cd406f2efb7
|
[] |
no_license
|
tech-cent/landreg
|
43e51f3576d1a490d7780d8407cffdd72bf0ee24
|
5c41128ea2a6712c12aaf51fc9dc90320e187212
|
refs/heads/dev
| 2022-12-10T10:10:14.129543 | 2019-12-05T16:40:12 | 2019-12-05T16:40:12 | 196,998,810 | 0 | 0 | null | 2022-05-25T02:22:29 | 2019-07-15T12:56:04 |
Python
|
UTF-8
|
Python
| false | false | 391 |
py
|
"""
WSGI config for landreg project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'landreg.settings')
application = get_wsgi_application()
|
[
"[email protected]"
] | |
42144426a8e54f20974cf27336604f14ea52c22b
|
16b1265dbd22ed99eae50fca99fbfd5c36db20be
|
/stringÖvningar/module16.py
|
31f974459ef3ce759cedbfb2279a4f385cc16623
|
[] |
no_license
|
athina-rm/string-vningar
|
320297d15b883e62e401f22dcacc3a8965a18e58
|
dcac23f178984fd757be82acef17d249a5f64b4d
|
refs/heads/master
| 2022-12-30T15:57:28.792511 | 2020-10-15T11:15:09 | 2020-10-15T11:15:09 | 304,301,354 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 75 |
py
|
def incre(x):
x=x+1
print(x)
x=3
print(x)
incre(x)
print(x)
|
[
"[email protected]"
] | |
ecee65f1859c9f82e0e68407c6063b9f18799178
|
eda523352682b87a6b750d970875d4647b86e5ea
|
/raw_data.py
|
10e81810818fca15c6d1693d0659dbd5c780751b
|
[] |
no_license
|
flyleave/ZingSemi
|
f3d94b6dbc918f8da7e1784b322f4684982f48c3
|
124be0594a25cf4de19eac7d717a7a2f59703b5d
|
refs/heads/master
| 2021-08-31T17:21:56.253836 | 2017-12-22T07:03:06 | 2017-12-22T07:03:06 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,066 |
py
|
import cx_Oracle
import pandas as pd
import numpy as np
import time
import scipy.stats as st
time_range = '201711'
conf_df = pd.read_excel('CPK/Collection Character.xlsx')
block_list = "'702'" #'711'
now_time = time.strftime('%Y-%m-%d', time.localtime(time.time()))
def connect2DB():
username = "frdata"
userpwd = "frdata2017"
host = "10.10.17.66"
port = 1521
dbname = "rptdb"
dsn = cx_Oracle.makedsn(host, port, dbname)
connection = cx_Oracle.connect(username, userpwd, dsn)
return connection
# def get_last_lot(block_list, time_range, start_spec):
#
#
# spec_list = "'XXXX'"
# for spec in conf_df['SPEC_ID']:
# spec_list += ",'%s'" % spec
# sql = "select spec_id, mat_id, substr(usr_cmf_07, 1, 10) as lot_id, substr(usr_cmf_07, 1, 8) as block_id, usr_cmf_07 as wafer_id, sys_time, ext_average " \
# "from tqs_summary_data@Mesarcdb " \
# "where substr(to_char(sys_time,'yyyymmdd hh24miss'), 1, 6) = '%s' " \
# "and spec_id in (%s) and substr(usr_cmf_07, 1, 3) = %s and factory in ('WE1', 'GR1')" % (
# time_range, spec_list, block_list)
# # sql = "select spec_id, mat_id, lot_id, substr(LOT_ID, 1, 8) as block_id, usr_cmf_07 as wafer_id , sys_time, ext_average " \
# # "from tqs_summary_data@Mesarcdb " \
# # "where substr(to_char(sys_time,'yyyymmdd hh24miss'), 1, 6) = '%s' " \
# # "and substr(LOT_ID, 1, 8) in (%s)" % (time_range, block_list)
# print(sql)
#
# connection = connect2DB()
# or_df = pd.read_sql(sql, connection)
# connection.close()
# print('Read from database sucessfully')
#
# temp_dict = {}
#
# index = conf_df[conf_df['SPEC_ID'] == start_spec].index.tolist()[0]
# print(index)
# specs = conf_df['SPEC_ID'].iloc[index: ]
#
# for i, spec in enumerate(specs):
# print("Current spec is : %s" % spec)
# if i == 0:
# temp_dict[spec] = or_df[or_df['SPEC_ID'] == spec]['LOT_ID'].unique()
# else:
# print("last spec: %s" % specs[index + i - 1])
# temp_dict[spec] = list(set(or_df[or_df['SPEC_ID'] == spec]['LOT_ID'].unique()) & set(temp_dict[specs[index + i - 1]]))
# print(temp_dict)
# return temp_dict
#
#
#
#
#
# temp_dict = get_last_lot(block_list, '201711', '3110-A1')
#
# writer = pd.ExcelWriter('CPK/Result/last_lot.xlsx')
# temp_df = pd.DataFrame.from_dict(temp_dict, orient='index')
# temp_df.to_excel(writer, index=True)
#
#
#
# def get_raw_data(block_list, time_range):
#
# writer = pd.ExcelWriter('CPK/Result/%s %s.xlsx' % (block_list, time_range))
# spec_list = "'XXXX'"
# for spec in conf_df['SPEC_ID']:
# spec_list += ",'%s'" % spec
# sql = "select spec_id, mat_id, lot_id, substr(LOT_ID, 1, 8) as block_id, usr_cmf_07 as wafer_id , sys_time, ext_average " \
# "from tqs_summary_data@Mesarcdb " \
# "where substr(to_char(sys_time,'yyyymmdd hh24miss'), 1, 6) = '%s' " \
# "and spec_id in (%s) and substr(usr_cmf_07, 1, 3) = %s and factory in ('WE1', 'GR1')" % (time_range, spec_list, block_list)
# # sql = "select spec_id, mat_id, lot_id, substr(LOT_ID, 1, 8) as block_id, usr_cmf_07 as wafer_id , sys_time, ext_average " \
# # "from tqs_summary_data@Mesarcdb " \
# # "where substr(to_char(sys_time,'yyyymmdd hh24miss'), 1, 6) = '%s' " \
# # "and substr(LOT_ID, 1, 8) in (%s)" % (time_range, block_list)
# print(sql)
#
# connection = connect2DB()
# or_df = pd.read_sql(sql, connection)
# print('Read from database sucessfully')
#
# sta_df = pd.DataFrame()
# col = 0
# null_list = []
# for spec, t in zip(conf_df['SPEC_ID'], conf_df['TYPE']):
#
# new_df = or_df[or_df['SPEC_ID'] == spec]
# sta_dict = {}
# # print(new_df['SPEC_ID'].iloc[0])
# qualified_num_df = pd.DataFrame()
# if len(new_df) != 0:
# sta_dict = {'SPEC': [spec], 'Average': [new_df['EXT_AVERAGE'].mean()], 'Standard Deviation': [new_df['EXT_AVERAGE'].std()],
# '0%': [float(np.percentile(new_df['EXT_AVERAGE'], 0))], '25%': [float(np.percentile(new_df['EXT_AVERAGE'], 25))],
# '50%': [float(np.percentile(new_df['EXT_AVERAGE'], 50))], '75%': [float(np.percentile(new_df['EXT_AVERAGE'], 75))],
# '100': [float(np.percentile(new_df['EXT_AVERAGE'], 100))]}
# else:
# null_list.append(spec)
# sta_df = pd.DataFrame(sta_dict).T
#
# if t == 'None':
# qualified_num = len(new_df[new_df['EXT_AVERAGE'] == 0])
# unqualified_num = len(new_df[new_df['EXT_AVERAGE'] != 0])
# qualified_num_dict = {'日期': [time_range], '合格数量': [qualified_num], '不合格数量': [unqualified_num]}
# qualified_num_df = pd.DataFrame(qualified_num_dict).T
# # print(sta_df)
# qualified_num_df.to_excel(writer, startrow=0, startcol=col, header=False)
# sta_df.to_excel(writer, startrow=len(qualified_num_df), startcol=col, header=False)
# new_df.to_excel(writer, startrow=len(qualified_num_df) + len(sta_df), startcol=col, index=False)
# col += new_df.shape[1] + 1
# print(null_list)
# connection.close()
#
# get_raw_data(block_list, time_range)
spec_list = "'XXXX'"
for spec in conf_df['SPEC_ID']:
spec_list += ",'%s'" % spec
sql = "select b.oper, b.recipe, a.spec_id, a.mat_id, substr(a.usr_cmf_07, 1, 10) as lot_id, substr(a.usr_cmf_07, 1, 8) as block_id, a.usr_cmf_07 as wafer_id, a.sys_time, a.ext_average " \
"from tqs_summary_data@Mesarcdb a, MRCPMFODEF@MESARCDB b where a.mat_id = b.mat_id and a.process = b.flow and a.step_id = b.oper " \
"and substr(to_char(a.sys_time,'yyyymmdd hh24miss'), 1, 6) = '%s' " \
"and a.spec_id in (%s) and substr(a.usr_cmf_07, 1, 3) = %s and a.factory in ('WE1', 'GR1')" % (
time_range, spec_list, block_list)
print(sql)
connection = connect2DB()
or_df = pd.read_sql(sql, connection)
connection.close()
print('Read from database sucessfully')
a = or_df[['WAFER_ID', 'RECIPE', 'OPER']] # TODO RECIPE
distinct = a.drop_duplicates(['WAFER_ID', 'OPER'])
recipe_df = distinct.pivot(index='WAFER_ID', columns='OPER', values='RECIPE')
# recipe_df = pd.DataFrame(index=a['WAFER_ID'].unique(), columns=a['OPER'].unique())
# for oper in a['OPER'].unique():
# for index in recipe_df.index.tolist():
# recipe_df[oper].loc[index] = a[(a['OPER'] == oper) & (a['WAFER_ID'] == index)]['RECIPE'].iloc[0] if \
# len(a[(a['OPER'] == oper) & (a['WAFER_ID'] == index)]['RECIPE']) != 0 else None
b = or_df.drop_duplicates(subset=['WAFER_ID', 'SPEC_ID'], keep='first').pivot_table(index=['MAT_ID', 'LOT_ID', 'BLOCK_ID', 'WAFER_ID', 'SYS_TIME'],
columns=['SPEC_ID'],
values='EXT_AVERAGE')
main_df = pd.merge(b.reset_index(), recipe_df.reset_index(), how='left', on='WAFER_ID')
statistic_df = pd.DataFrame(index=['PPK',
'Distribution',
'Process Range',
'Percent Defective',
'Sample Size',
'Average',
'Standard Deviation',
'0.00%',
'25.00%',
'50.00%',
'75.00%',
'100.00%'], columns=b.columns.values.tolist())
def get_best_distribution(data):
fitted_params_norm = st.norm.fit(data)
fitted_params_cauchy = st.boxcox([1,2,3,4,5])
fitted_params_expon = st.expon.fit(data)
logLikN = np.sum(st.norm.logpdf(data, loc=fitted_params_norm[0],
scale=fitted_params_norm[1]))
for col in b.columns.values.tolist():
statistic_df[col].loc['PPK'] = ''
statistic_df[col].loc['Distribution'] = ''
statistic_df[col].loc['Process Range'] = ''
statistic_df[col].loc['Percent Defective'] = ''
statistic_df[col].loc['Sample Size'] = ''
statistic_df[col].loc['Average'] = b[col].mean()
statistic_df[col].loc['Standard Deviation'] = b[col].std()
statistic_df[col].loc['0.00%'] = np.percentile(b[col].dropna(), 0)
statistic_df[col].loc['25.00%'] = np.percentile(b[col].dropna(), 25)
statistic_df[col].loc['50.00%'] = np.percentile(b[col].dropna(), 50)
statistic_df[col].loc['75.00%'] = np.percentile(b[col].dropna(), 75)
statistic_df[col].loc['100.00%'] = np.percentile(b[col].dropna(), 100)
writer = pd.ExcelWriter('CPK/%s Collection Data.xlsx' % now_time)
statistic_df.to_excel(writer, startcol=4)
main_df.to_excel(writer, startrow=len(statistic_df) + 1)
################
# or_df[(or_df['WAFER_ID'] == '702360380607') & (or_df['SPEC_ID'] == '6700-HazeMax')].drop_duplicates(subset='WAFER_ID', keep='first')
import scipy.stats as st
st.anderson([1,2,3,4,5,6,5,3,4,2,31,34,5,23,3,52,342,424234,52,234,2,342,34,1])
|
[
"[email protected]"
] | |
f67de883a6752ffbaab01bd20e984e4ddb2a51eb
|
7a40213ccfe36a16c803cf37111b96148a0a69a6
|
/tests/unit/async_/io/test_class_bolt5x1.py
|
2ee26b130a08e1ce24775282c0adad56c15af6cc
|
[
"Python-2.0",
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
neo4j/neo4j-python-driver
|
6bc0ae1acf63e65e8b6db8bc890e1d8ae45d7b7d
|
1bd382f48e00c748c83cb910401a74336fbf2feb
|
refs/heads/5.0
| 2023-09-06T05:19:09.892773 | 2023-09-05T15:04:40 | 2023-09-05T15:04:40 | 35,100,117 | 873 | 214 |
NOASSERTION
| 2023-09-05T15:04:42 | 2015-05-05T13:08:20 |
Python
|
UTF-8
|
Python
| false | false | 22,452 |
py
|
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [https://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import logging
import pytest
import neo4j
import neo4j.exceptions
from neo4j._async.io._bolt5 import AsyncBolt5x1
from neo4j._conf import PoolConfig
from neo4j._meta import USER_AGENT
from neo4j.exceptions import ConfigurationError
from ...._async_compat import mark_async_test
@pytest.mark.parametrize("set_stale", (True, False))
def test_conn_is_stale(fake_socket, set_stale):
address = neo4j.Address(("127.0.0.1", 7687))
max_connection_lifetime = 0
connection = AsyncBolt5x1(address, fake_socket(address),
max_connection_lifetime)
if set_stale:
connection.set_stale()
assert connection.stale() is True
@pytest.mark.parametrize("set_stale", (True, False))
def test_conn_is_not_stale_if_not_enabled(fake_socket, set_stale):
address = neo4j.Address(("127.0.0.1", 7687))
max_connection_lifetime = -1
connection = AsyncBolt5x1(address, fake_socket(address),
max_connection_lifetime)
if set_stale:
connection.set_stale()
assert connection.stale() is set_stale
@pytest.mark.parametrize("set_stale", (True, False))
def test_conn_is_not_stale(fake_socket, set_stale):
address = neo4j.Address(("127.0.0.1", 7687))
max_connection_lifetime = 999999999
connection = AsyncBolt5x1(address, fake_socket(address),
max_connection_lifetime)
if set_stale:
connection.set_stale()
assert connection.stale() is set_stale
@pytest.mark.parametrize(("args", "kwargs", "expected_fields"), (
(("", {}), {"db": "something"}, ({"db": "something"},)),
(("", {}), {"imp_user": "imposter"}, ({"imp_user": "imposter"},)),
(
("", {}),
{"db": "something", "imp_user": "imposter"},
({"db": "something", "imp_user": "imposter"},)
),
))
@mark_async_test
async def test_extra_in_begin(fake_socket, args, kwargs, expected_fields):
address = neo4j.Address(("127.0.0.1", 7687))
socket = fake_socket(address, AsyncBolt5x1.UNPACKER_CLS)
connection = AsyncBolt5x1(address, socket,
PoolConfig.max_connection_lifetime)
connection.begin(*args, **kwargs)
await connection.send_all()
tag, is_fields = await socket.pop_message()
assert tag == b"\x11"
assert tuple(is_fields) == expected_fields
@pytest.mark.parametrize(("args", "kwargs", "expected_fields"), (
(("", {}), {"db": "something"}, ("", {}, {"db": "something"})),
(("", {}), {"imp_user": "imposter"}, ("", {}, {"imp_user": "imposter"})),
(
("", {}),
{"db": "something", "imp_user": "imposter"},
("", {}, {"db": "something", "imp_user": "imposter"})
),
))
@mark_async_test
async def test_extra_in_run(fake_socket, args, kwargs, expected_fields):
address = neo4j.Address(("127.0.0.1", 7687))
socket = fake_socket(address, AsyncBolt5x1.UNPACKER_CLS)
connection = AsyncBolt5x1(address, socket,
PoolConfig.max_connection_lifetime)
connection.run(*args, **kwargs)
await connection.send_all()
tag, is_fields = await socket.pop_message()
assert tag == b"\x10"
assert tuple(is_fields) == expected_fields
@mark_async_test
async def test_n_extra_in_discard(fake_socket):
address = neo4j.Address(("127.0.0.1", 7687))
socket = fake_socket(address, AsyncBolt5x1.UNPACKER_CLS)
connection = AsyncBolt5x1(address, socket,
PoolConfig.max_connection_lifetime)
connection.discard(n=666)
await connection.send_all()
tag, fields = await socket.pop_message()
assert tag == b"\x2F"
assert len(fields) == 1
assert fields[0] == {"n": 666}
@pytest.mark.parametrize(
"test_input, expected",
[
(666, {"n": -1, "qid": 666}),
(-1, {"n": -1}),
]
)
@mark_async_test
async def test_qid_extra_in_discard(fake_socket, test_input, expected):
address = neo4j.Address(("127.0.0.1", 7687))
socket = fake_socket(address, AsyncBolt5x1.UNPACKER_CLS)
connection = AsyncBolt5x1(address, socket,
PoolConfig.max_connection_lifetime)
connection.discard(qid=test_input)
await connection.send_all()
tag, fields = await socket.pop_message()
assert tag == b"\x2F"
assert len(fields) == 1
assert fields[0] == expected
@pytest.mark.parametrize(
"test_input, expected",
[
(777, {"n": 666, "qid": 777}),
(-1, {"n": 666}),
]
)
@mark_async_test
async def test_n_and_qid_extras_in_discard(fake_socket, test_input, expected):
address = neo4j.Address(("127.0.0.1", 7687))
socket = fake_socket(address, AsyncBolt5x1.UNPACKER_CLS)
connection = AsyncBolt5x1(address, socket,
PoolConfig.max_connection_lifetime)
connection.discard(n=666, qid=test_input)
await connection.send_all()
tag, fields = await socket.pop_message()
assert tag == b"\x2F"
assert len(fields) == 1
assert fields[0] == expected
@pytest.mark.parametrize(
"test_input, expected",
[
(666, {"n": 666}),
(-1, {"n": -1}),
]
)
@mark_async_test
async def test_n_extra_in_pull(fake_socket, test_input, expected):
address = neo4j.Address(("127.0.0.1", 7687))
socket = fake_socket(address, AsyncBolt5x1.UNPACKER_CLS)
connection = AsyncBolt5x1(address, socket,
PoolConfig.max_connection_lifetime)
connection.pull(n=test_input)
await connection.send_all()
tag, fields = await socket.pop_message()
assert tag == b"\x3F"
assert len(fields) == 1
assert fields[0] == expected
@pytest.mark.parametrize(
"test_input, expected",
[
(777, {"n": -1, "qid": 777}),
(-1, {"n": -1}),
]
)
@mark_async_test
async def test_qid_extra_in_pull(fake_socket, test_input, expected):
address = neo4j.Address(("127.0.0.1", 7687))
socket = fake_socket(address, AsyncBolt5x1.UNPACKER_CLS)
connection = AsyncBolt5x1(address, socket,
PoolConfig.max_connection_lifetime)
connection.pull(qid=test_input)
await connection.send_all()
tag, fields = await socket.pop_message()
assert tag == b"\x3F"
assert len(fields) == 1
assert fields[0] == expected
@mark_async_test
async def test_n_and_qid_extras_in_pull(fake_socket):
address = neo4j.Address(("127.0.0.1", 7687))
socket = fake_socket(address, AsyncBolt5x1.UNPACKER_CLS)
connection = AsyncBolt5x1(address, socket,
PoolConfig.max_connection_lifetime)
connection.pull(n=666, qid=777)
await connection.send_all()
tag, fields = await socket.pop_message()
assert tag == b"\x3F"
assert len(fields) == 1
assert fields[0] == {"n": 666, "qid": 777}
@mark_async_test
async def test_hello_passes_routing_metadata(fake_socket_pair):
address = neo4j.Address(("127.0.0.1", 7687))
sockets = fake_socket_pair(address,
packer_cls=AsyncBolt5x1.PACKER_CLS,
unpacker_cls=AsyncBolt5x1.UNPACKER_CLS)
await sockets.server.send_message(b"\x70", {"server": "Neo4j/4.4.0"})
await sockets.server.send_message(b"\x70", {})
connection = AsyncBolt5x1(
address, sockets.client, PoolConfig.max_connection_lifetime,
routing_context={"foo": "bar"}
)
await connection.hello()
tag, fields = await sockets.server.pop_message()
assert tag == b"\x01"
assert len(fields) == 1
assert fields[0]["routing"] == {"foo": "bar"}
async def _assert_logon_message(sockets, auth):
tag, fields = await sockets.server.pop_message()
assert tag == b"\x6A" # LOGON
assert len(fields) == 1
keys = ["scheme", "principal", "credentials"]
assert list(fields[0].keys()) == keys
for key in keys:
assert fields[0][key] == getattr(auth, key)
@mark_async_test
async def test_hello_pipelines_logon(fake_socket_pair):
auth = neo4j.Auth("basic", "alice123", "supersecret123")
address = neo4j.Address(("127.0.0.1", 7687))
sockets = fake_socket_pair(address,
packer_cls=AsyncBolt5x1.PACKER_CLS,
unpacker_cls=AsyncBolt5x1.UNPACKER_CLS)
await sockets.server.send_message(
b"\x7F", {"code": "Neo.DatabaseError.General.MadeUpError",
"message": "kthxbye"}
)
connection = AsyncBolt5x1(
address, sockets.client, PoolConfig.max_connection_lifetime, auth=auth
)
with pytest.raises(neo4j.exceptions.Neo4jError):
await connection.hello()
tag, fields = await sockets.server.pop_message()
assert tag == b"\x01" # HELLO
assert len(fields) == 1
assert list(fields[0].keys()) == ["user_agent"]
assert auth.credentials not in repr(fields)
await _assert_logon_message(sockets, auth)
@mark_async_test
async def test_logon(fake_socket_pair):
auth = neo4j.Auth("basic", "alice123", "supersecret123")
address = neo4j.Address(("127.0.0.1", 7687))
sockets = fake_socket_pair(address,
packer_cls=AsyncBolt5x1.PACKER_CLS,
unpacker_cls=AsyncBolt5x1.UNPACKER_CLS)
connection = AsyncBolt5x1(address, sockets.client,
PoolConfig.max_connection_lifetime, auth=auth)
connection.logon()
await connection.send_all()
await _assert_logon_message(sockets, auth)
@mark_async_test
async def test_re_auth(fake_socket_pair, mocker, static_auth):
auth = neo4j.Auth("basic", "alice123", "supersecret123")
auth_manager = static_auth(auth)
address = neo4j.Address(("127.0.0.1", 7687))
sockets = fake_socket_pair(address,
packer_cls=AsyncBolt5x1.PACKER_CLS,
unpacker_cls=AsyncBolt5x1.UNPACKER_CLS)
await sockets.server.send_message(
b"\x7F", {"code": "Neo.DatabaseError.General.MadeUpError",
"message": "kthxbye"}
)
connection = AsyncBolt5x1(address, sockets.client,
PoolConfig.max_connection_lifetime)
connection.pool = mocker.AsyncMock()
connection.re_auth(auth, auth_manager)
await connection.send_all()
with pytest.raises(neo4j.exceptions.Neo4jError):
await connection.fetch_all()
tag, fields = await sockets.server.pop_message()
assert tag == b"\x6B" # LOGOFF
assert len(fields) == 0
await _assert_logon_message(sockets, auth)
assert connection.auth is auth
assert connection.auth_manager is auth_manager
@mark_async_test
async def test_logoff(fake_socket_pair):
address = neo4j.Address(("127.0.0.1", 7687))
sockets = fake_socket_pair(address,
packer_cls=AsyncBolt5x1.PACKER_CLS,
unpacker_cls=AsyncBolt5x1.UNPACKER_CLS)
await sockets.server.send_message(b"\x70", {})
connection = AsyncBolt5x1(address, sockets.client,
PoolConfig.max_connection_lifetime)
connection.logoff()
assert not sockets.server.recv_buffer # pipelined, so no response yet
await connection.send_all()
assert sockets.server.recv_buffer # now!
tag, fields = await sockets.server.pop_message()
assert tag == b"\x6B" # LOGOFF
assert len(fields) == 0
@pytest.mark.parametrize(("hints", "valid"), (
({"connection.recv_timeout_seconds": 1}, True),
({"connection.recv_timeout_seconds": 42}, True),
({}, True),
({"whatever_this_is": "ignore me!"}, True),
({"connection.recv_timeout_seconds": -1}, False),
({"connection.recv_timeout_seconds": 0}, False),
({"connection.recv_timeout_seconds": 2.5}, False),
({"connection.recv_timeout_seconds": None}, False),
({"connection.recv_timeout_seconds": False}, False),
({"connection.recv_timeout_seconds": "1"}, False),
))
@mark_async_test
async def test_hint_recv_timeout_seconds(
fake_socket_pair, hints, valid, caplog, mocker
):
address = neo4j.Address(("127.0.0.1", 7687))
sockets = fake_socket_pair(address,
packer_cls=AsyncBolt5x1.PACKER_CLS,
unpacker_cls=AsyncBolt5x1.UNPACKER_CLS)
sockets.client.settimeout = mocker.Mock()
await sockets.server.send_message(
b"\x70", {"server": "Neo4j/4.3.4", "hints": hints}
)
await sockets.server.send_message(b"\x70", {})
connection = AsyncBolt5x1(
address, sockets.client, PoolConfig.max_connection_lifetime
)
with caplog.at_level(logging.INFO):
await connection.hello()
if valid:
if "connection.recv_timeout_seconds" in hints:
sockets.client.settimeout.assert_called_once_with(
hints["connection.recv_timeout_seconds"]
)
else:
sockets.client.settimeout.assert_not_called()
assert not any("recv_timeout_seconds" in msg
and "invalid" in msg
for msg in caplog.messages)
else:
sockets.client.settimeout.assert_not_called()
assert any(repr(hints["connection.recv_timeout_seconds"]) in msg
and "recv_timeout_seconds" in msg
and "invalid" in msg
for msg in caplog.messages)
CREDENTIALS = "+++super-secret-sauce+++"
@pytest.mark.parametrize("auth", (
("user", CREDENTIALS),
neo4j.basic_auth("user", CREDENTIALS),
neo4j.kerberos_auth(CREDENTIALS),
neo4j.bearer_auth(CREDENTIALS),
neo4j.custom_auth("user", CREDENTIALS, "realm", "scheme"),
neo4j.Auth("scheme", "principal", CREDENTIALS, "realm", foo="bar"),
))
@mark_async_test
async def test_credentials_are_not_logged(auth, fake_socket_pair, caplog):
address = neo4j.Address(("127.0.0.1", 7687))
sockets = fake_socket_pair(address,
packer_cls=AsyncBolt5x1.PACKER_CLS,
unpacker_cls=AsyncBolt5x1.UNPACKER_CLS)
await sockets.server.send_message(b"\x70", {"server": "Neo4j/4.3.4"})
await sockets.server.send_message(b"\x70", {})
connection = AsyncBolt5x1(
address, sockets.client, PoolConfig.max_connection_lifetime, auth=auth
)
with caplog.at_level(logging.DEBUG):
await connection.hello()
if isinstance(auth, tuple):
auth = neo4j.basic_auth(*auth)
for field in ("scheme", "principal", "realm", "parameters"):
value = getattr(auth, field, None)
if value:
assert repr(value) in caplog.text
assert CREDENTIALS not in caplog.text
@pytest.mark.parametrize(("method", "args"), (
("run", ("RETURN 1",)),
("begin", ()),
))
@pytest.mark.parametrize("kwargs", (
{"notifications_min_severity": "WARNING"},
{"notifications_disabled_categories": ["HINT"]},
{"notifications_disabled_categories": []},
{
"notifications_min_severity": "WARNING",
"notifications_disabled_categories": ["HINT"]
},
))
def test_does_not_support_notification_filters(fake_socket, method,
args, kwargs):
address = neo4j.Address(("127.0.0.1", 7687))
socket = fake_socket(address, AsyncBolt5x1.UNPACKER_CLS)
connection = AsyncBolt5x1(address, socket,
PoolConfig.max_connection_lifetime)
method = getattr(connection, method)
with pytest.raises(ConfigurationError, match="Notification filtering"):
method(*args, **kwargs)
@mark_async_test
@pytest.mark.parametrize("kwargs", (
{"notifications_min_severity": "WARNING"},
{"notifications_disabled_categories": ["HINT"]},
{"notifications_disabled_categories": []},
{
"notifications_min_severity": "WARNING",
"notifications_disabled_categories": ["HINT"]
},
))
async def test_hello_does_not_support_notification_filters(
fake_socket, kwargs
):
address = neo4j.Address(("127.0.0.1", 7687))
socket = fake_socket(address, AsyncBolt5x1.UNPACKER_CLS)
connection = AsyncBolt5x1(
address, socket, PoolConfig.max_connection_lifetime,
**kwargs
)
with pytest.raises(ConfigurationError, match="Notification filtering"):
await connection.hello()
@mark_async_test
@pytest.mark.parametrize(
"user_agent", (None, "test user agent", "", USER_AGENT)
)
async def test_user_agent(fake_socket_pair, user_agent):
address = neo4j.Address(("127.0.0.1", 7687))
sockets = fake_socket_pair(address,
packer_cls=AsyncBolt5x1.PACKER_CLS,
unpacker_cls=AsyncBolt5x1.UNPACKER_CLS)
await sockets.server.send_message(b"\x70", {"server": "Neo4j/1.2.3"})
await sockets.server.send_message(b"\x70", {})
max_connection_lifetime = 0
connection = AsyncBolt5x1(
address, sockets.client, max_connection_lifetime, user_agent=user_agent
)
await connection.hello()
tag, fields = await sockets.server.pop_message()
extra = fields[0]
if not user_agent:
assert extra["user_agent"] == USER_AGENT
else:
assert extra["user_agent"] == user_agent
@mark_async_test
@pytest.mark.parametrize(
"user_agent", (None, "test user agent", "", USER_AGENT)
)
async def test_does_not_send_bolt_agent(fake_socket_pair, user_agent):
address = neo4j.Address(("127.0.0.1", 7687))
sockets = fake_socket_pair(address,
packer_cls=AsyncBolt5x1.PACKER_CLS,
unpacker_cls=AsyncBolt5x1.UNPACKER_CLS)
await sockets.server.send_message(b"\x70", {"server": "Neo4j/1.2.3"})
await sockets.server.send_message(b"\x70", {})
max_connection_lifetime = 0
connection = AsyncBolt5x1(
address, sockets.client, max_connection_lifetime, user_agent=user_agent
)
await connection.hello()
tag, fields = await sockets.server.pop_message()
extra = fields[0]
assert "bolt_agent" not in extra
@mark_async_test
@pytest.mark.parametrize(
("func", "args", "extra_idx"),
(
("run", ("RETURN 1",), 2),
("begin", (), 0),
)
)
@pytest.mark.parametrize(
("timeout", "res"),
(
(None, None),
(0, 0),
(0.1, 100),
(0.001, 1),
(1e-15, 1),
(0.0005, 1),
(0.0001, 1),
(1.0015, 1002),
(1.000499, 1000),
(1.0025, 1002),
(3.0005, 3000),
(3.456, 3456),
(1, 1000),
(
-1e-15,
ValueError("Timeout must be a positive number or 0")
),
(
"foo",
ValueError("Timeout must be specified as a number of seconds")
),
(
[1, 2],
TypeError("Timeout must be specified as a number of seconds")
)
)
)
async def test_tx_timeout(
fake_socket_pair, func, args, extra_idx, timeout, res
):
address = neo4j.Address(("127.0.0.1", 7687))
sockets = fake_socket_pair(address,
packer_cls=AsyncBolt5x1.PACKER_CLS,
unpacker_cls=AsyncBolt5x1.UNPACKER_CLS)
await sockets.server.send_message(b"\x70", {})
connection = AsyncBolt5x1(address, sockets.client, 0)
func = getattr(connection, func)
if isinstance(res, Exception):
with pytest.raises(type(res), match=str(res)):
func(*args, timeout=timeout)
else:
func(*args, timeout=timeout)
await connection.send_all()
tag, fields = await sockets.server.pop_message()
extra = fields[extra_idx]
if timeout is None:
assert "tx_timeout" not in extra
else:
assert extra["tx_timeout"] == res
@pytest.mark.parametrize(
"actions",
itertools.combinations_with_replacement(
itertools.product(
("run", "begin", "begin_run"),
("reset", "commit", "rollback"),
(None, "some_db", "another_db"),
),
2
)
)
@mark_async_test
async def test_tracks_last_database(fake_socket_pair, actions):
address = neo4j.Address(("127.0.0.1", 7687))
sockets = fake_socket_pair(address,
packer_cls=AsyncBolt5x1.PACKER_CLS,
unpacker_cls=AsyncBolt5x1.UNPACKER_CLS)
connection = AsyncBolt5x1(address, sockets.client, 0)
await sockets.server.send_message(b"\x70", {"server": "Neo4j/1.2.3"})
await sockets.server.send_message(b"\x70", {})
await connection.hello()
assert connection.last_database is None
for action, finish, db in actions:
await sockets.server.send_message(b"\x70", {})
if action == "run":
connection.run("RETURN 1", db=db)
elif action == "begin":
connection.begin(db=db)
elif action == "begin_run":
connection.begin(db=db)
assert connection.last_database == db
await sockets.server.send_message(b"\x70", {})
connection.run("RETURN 1")
else:
raise ValueError(action)
assert connection.last_database == db
await connection.send_all()
await connection.fetch_all()
assert connection.last_database == db
await sockets.server.send_message(b"\x70", {})
if finish == "reset":
await connection.reset()
elif finish == "commit":
if action == "run":
connection.pull()
else:
connection.commit()
elif finish == "rollback":
if action == "run":
connection.pull()
else:
connection.rollback()
else:
raise ValueError(finish)
await connection.send_all()
await connection.fetch_all()
assert connection.last_database == db
|
[
"[email protected]"
] | |
73001255a8f7dbbfd61f269fe5197b9756ba3082
|
f15b0cc981a7484bae48cd5bde82c9f6ab510f3a
|
/mysite/settings.py
|
1cc9346f7b86bc5e6e5f75c3a1b2eee5758f1c33
|
[] |
no_license
|
SabryCedra/my-first-blog
|
8e851764d505663331726e53490bff1d37cd15ad
|
4ca8955a3db785d23eb10f0a9e17c90590b69ebb
|
refs/heads/master
| 2021-01-17T17:15:23.809818 | 2016-07-02T15:34:19 | 2016-07-02T15:34:19 | 62,449,388 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,702 |
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'r(g_*&*c%f5_(d&qhe=%hks(6seo1lwdwu)2rkx+17bcb&1*s6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Rome'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static")
|
[
"[email protected]"
] | |
528ccbf57f0de4800085949f0d1914babcafee23
|
6eb165eb8e6012574230604aa2960cb1cf5cac4c
|
/evaluation/evaluate_stats.py
|
18c3e7463e99aac3979826f82429fa376c363309
|
[] |
no_license
|
Jakil83/Humanware-block2-b2phut3
|
60666889f7ea84c6ac2b785b148ff906e2c983cd
|
959b4da6c6aebab48f7e41a222bd05b5258b80f4
|
refs/heads/master
| 2020-04-23T13:57:41.059799 | 2019-03-16T02:05:08 | 2019-03-16T02:05:08 | 171,215,679 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,234 |
py
|
import argparse
import numpy as np
from scipy.stats import wilcoxon
def evaluate_stats(y_true, y_pred1, y_pred2):
"""
Statistical evaluation. Compare the distribution of the prediction
of two different models using the Wilcoxon signed-rank test and
print the p-value.
Parameters
----------
y_true: ndarray
The ground truth labels.
y_pred1: ndarray
The prediction of the first model.
y_pred2: ndarray
The prediction of the second model.
"""
assert len(y_true) == len(y_pred1), "# of samples differ"
assert len(y_true) == len(y_pred2), "# of samples differ"
acc_pred1 = (y_pred1 == y_true) * 1
acc_pred2 = (y_pred2 == y_true) * 1
def acc(y):
return np.sum(y) / len(y)
print("Accuracy of first model", acc(acc_pred1))
print("Accuracy of second model", acc(acc_pred2))
stat, p_value = wilcoxon(acc_pred1, acc_pred2, zero_method='zsplit')
# One-sided p_value
p_value = p_value / 2
print('''\nP-value score for the
Wilcoxon signed-rank test : {:.4f}'''.format(p_value))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--y_true", type=str,
default='/results/ground_truth.txt',
help='''y_true is the absolute path to the
file where the ground truth is
saved.''')
parser.add_argument("--y_pred1", type=str,
default='/results/eval_pred1.txt',
help='''y_pred1 is the absolute path to the
file where the output of model 1
inference is saved.''')
parser.add_argument("--y_pred2", type=str,
default='/results/eval_pred2.txt',
help='''y_pred2 is the absolute path to the
file where the output of model 2
inference is saved.''')
args = parser.parse_args()
y_true = np.loadtxt(args.y_true)
y_pred1 = np.loadtxt(args.y_pred1)
y_pred2 = np.loadtxt(args.y_pred2)
evaluate_stats(y_true, y_pred1, y_pred2)
|
[
"[email protected]"
] | |
a317a9e4f4f5d6e738556b77ccdf5ca54c22337f
|
d8ef155d2b931642e448263d43fbf856b3a466c0
|
/certificates/__main__.py
|
ac85092b9df679740502289f380cc93e8e0a251c
|
[
"Apache-2.0"
] |
permissive
|
diemesleno/certificates
|
a34632bc97a175fd739cdaa6d78f880316176a3c
|
7aedf80903304216c6d9a8c99efd4df5aa7f8049
|
refs/heads/master
| 2022-02-15T17:44:43.132433 | 2019-08-16T05:44:26 | 2019-08-16T05:44:45 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 610 |
py
|
import argparse
from .certificates import make_certificates
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"participants", help="csv filaname containing participants"
)
parser.add_argument(
"template", help="certificate template in svg format used to build"
)
parser.add_argument(
"--output",
"-o",
default="./output",
help="destination of the generated certificates",
)
args = parser.parse_args()
make_certificates(args.participants, args.template, args.output)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
1f70f4fccc266bf1e88c3b6567166559faf77759
|
093c78da7ed780049cbec1f3d8a0f9f2a334ccc1
|
/Day02/scripts/test01_testcase.py
|
4765c79510ad1d92471f5f1f96c7155c4b06f54c
|
[] |
no_license
|
jason-girl/Study
|
d5191216c39b6b765630a99dfc1d3079f30384dc
|
a6574a74f0d3cd3ff44ddf8fe4d5884f118ea78c
|
refs/heads/master
| 2023-04-04T02:41:32.650966 | 2021-04-06T03:02:11 | 2021-04-06T03:02:11 | 322,759,930 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 693 |
py
|
# _*_ coding:utf-8 _*_
# File Name: test01_testcase
# Author: Emily
# Date: 2021/3/2 16:45
# Description : TestCase使用
# 导包
import unittest
# 编写求和函数
def add(x, y):
return x+y
# 定义测试类并继承
class Test01(unittest.TestCase):
# 定义测试方法 注意:以test字母开头
def test_add01(self):
# 调用要测的函数
result = add(1, 1)
print("结果为:", result)
def test_add02(self):
# 调用要测的函数
result = add(1, 2)
print("结果为:", result)
def test_add03(self):
# 调用要测的函数
result = add(1, 3)
print("结果为:", result)
|
[
"[email protected]"
] | |
5a3675690263bd6753e2de539da4af61e2c1edf3
|
4f4635caac8e8dd5d1ef727f9945a1c3bbc61650
|
/python/hackerearth/Arrays/StrangeGame.py
|
989322470a387ebd6d7c074cf75481dc6a278ea0
|
[
"MIT"
] |
permissive
|
ramo/competitive-programming-solutions
|
99cdc4a66d241d435b2e9be1056dff229acb48f9
|
3d857f8e492debbf229b35d50c6d2bebb83b57c4
|
refs/heads/master
| 2021-07-12T18:10:02.998097 | 2019-01-09T17:44:54 | 2019-01-09T17:44:54 | 136,951,958 | 3 | 4 | null | null | null | null |
UTF-8
|
Python
| false | false | 282 |
py
|
for _ in range(int(input())):
n, k = map(int, input().split())
a = list(map(int, input().split()))
b = list(map(int, input().split()))
mx = max(b) + 1
ans = 0
for i in range(n):
d = mx - a[i]
if d > 0:
ans += d * k
print(ans)
|
[
"[email protected]"
] | |
bd5d4faa17a341677a9d9f49921f040b2ffa8302
|
0391b9a73193e36156cd17c2f778eb03b96f575e
|
/seism_to_csv_Z.py
|
00a5f6552ee8c47efe3c0ac2d693281b17f5f51c
|
[] |
no_license
|
xian-ran/anisotropy_nn
|
0349565eafef87c2ab32ff59859409520e5c8bc7
|
65bee9be0673a48bfd87774c1d1b8b2a90ec8541
|
refs/heads/master
| 2022-04-05T17:07:33.910285 | 2020-02-18T17:05:21 | 2020-02-18T17:05:21 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 574 |
py
|
# Z-component
# column -- timesample, row -- trace
import obspy
import numpy as np
import pandas as pd
st = obspy.read('model_1\\model+GathNP-Z.sgy')
N_traces = len(st)
N_timesamples = len(st[0])
for i in range(1,1101):
print('\t',i,'\t/\t1100')
st = obspy.read('model_{}/model+GathNP-Z.sgy'.format(i))
data = np.empty([N_timesamples])
for n in range(N_traces):
data = np.vstack((data,st[n]))
df = pd.DataFrame(data[1:])
df.to_csv('csv_models_2frac_Thomsen_saturated_full_formulae_Z\\model_{}.csv'.format(i),index=None)
|
[
"[email protected]"
] | |
79c9a8a19c7a2c262c379b8827e57615fb0863a1
|
228fc2d62fe49b75849dfd0ee2e58e1db2dff3a1
|
/app/extensions.py
|
6e1b0fdf409d98f52a4a5d55f998fe32683d3080
|
[] |
no_license
|
wggglggg/wggglggg-todoism
|
de59b846c4ad52550e7972760047b99deea42ade
|
f52a18e3b3754bb86144a520a1578de22bfe8d54
|
refs/heads/master
| 2023-07-07T01:32:37.194325 | 2021-08-14T08:25:43 | 2021-08-14T08:25:43 | 387,105,559 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,529 |
py
|
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_wtf.csrf import CSRFProtect
from faker import Faker
from flask_babel import Babel
from flask_login import current_user
from flask import request, current_app
db = SQLAlchemy()
csrf = CSRFProtect()
login_manager = LoginManager()
fake = Faker()
babel = Babel()
@login_manager.user_loader
def load_user(user_id):
from app.models import User
return User.query.get(int(user_id)) # 如果能查到返回true, 反之false
# 获取用户的Locale区域语言
@babel.localeselector
def get_locale():
if current_user.is_authenticated and current_user.locale is not None:
return current_user.locale # 如果用户已登录并且用户的区域不是None, 就返回用户的区域值
locale = request.cookies.get('locale')
if locale is not None: # 否则就从网址携带的cookie中获取locale区域值
return locale
'''
cookie里面也没有locale值, 就直接从用户请求头部包含的 (Accept-Language: zh-CN,zh;q=0.9)与config里面的TODOISM_LOCALES匹配最 接近的一个语言偏好
'''
return request.accept_languages.best_match(current_app.config['TODOISM_LOCALES'])
@babel.timezoneselector
def get_timezone():
if current_user.is_authenticated and current_user.timezone is not None:
return current_user.timezone
timezone = request.cookies.get('timezone')
if timezone is not None:
return timezone
return None
|
[
"[email protected]"
] | |
4d756080e3866e9ce0f11af949a141e29330102a
|
14ed9961ba4a685b262c52d7234ac1cb76b9972c
|
/Codes/MPC/MPC-ball_balancer/controller.py
|
c8173b9bdeff170021ca87920d53e756bbd403dd
|
[] |
no_license
|
gongchenooo/CS339-Quanser-Robots
|
5fbfc3bb2b8f5596b91f001127d86633fd2c6d06
|
e6208a2d48c85594dca6a115934ca9accb47db9d
|
refs/heads/master
| 2023-03-04T03:18:55.559660 | 2021-02-18T08:02:04 | 2021-02-18T08:02:04 | 339,973,882 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,607 |
py
|
import numpy as np
from Hive import Hive
from Hive import Utilities
from cannon import *
class MPC(object):
def __init__(self, env, config):
self.env = env
mpc_config = config["mpc_config"]
self.horizon = mpc_config["horizon"]
self.numb_bees = mpc_config["numb_bees"]
self.max_itrs = mpc_config["max_itrs"]
self.gamma = mpc_config["gamma"]
self.action_low = mpc_config["action_low"]
self.action_high = mpc_config["action_high"]
self.evaluator = Evaluator(self.gamma)
def act(self, state, dynamic_model):
'''
Optimize the action by Artificial Bee Colony algorithm
:param state: (numpy array) current state
:param dynamic_model: system dynamic model
:return: (float) optimal action
'''
self.evaluator.update(state, dynamic_model)
'''
optimizer = Hive.BeeHive( lower = [float(self.action_low)] * self.horizon,
upper = [float(self.action_high)] * self.horizon,
fun = self.evaluator.evaluate,
numb_bees = self.numb_bees,
max_itrs = self.max_itrs,
verbose=False)
'''
optimizer = Cannon(lower=[[float(self.action_low)]*2]*self.horizon, upper=[[float(self.action_high)]*2]*self.horizon,fun=self.evaluator.evaluate)
cost = optimizer.run()
#print("Solution: ",optimizer.solution[0])
#print("Fitness Value ABC: {0}".format(optimizer.best))
# Uncomment this if you want to see the performance of the optimizer
#Utilities.ConvergencePlot(cost)
return optimizer.solution[0]
class Evaluator(object):
def __init__(self, gamma=0.8):
self.gamma = gamma
self.Q = np.diag([1e-2, 1e-2, 1e-0, 1e-0, 1e-4, 1e-4, 1e-2, 1e-2]) # see dim of state space
self.R = np.diag([1e-4, 1e-4]) # see dim of action space
self.min_rew = 1e-4
def update(self, state, dynamic_model):
self.state = state
self.dynamic_model = dynamic_model
def evaluate(self, actions):
actions = np.array(actions)
horizon = actions.shape[0]
rewards = 0
state_tmp = self.state.copy()
for j in range(horizon):
##changing here
input_data = np.concatenate( (state_tmp,[actions[j][0],actions[j][1]]) )
# print(input_data)
###end changing
state_dt = self.dynamic_model.predict(input_data)
state_tmp = state_tmp + state_dt[0]
rewards -= (self.gamma ** j) * self.get_reward(state_tmp, actions[j])
return rewards
def get_reward(self,obs, action):
err_s = (self.state - obs).reshape(-1,) # or self._state
err_a = action.reshape(-1,)
quadr_cost = err_s.dot(self.Q.dot(err_s)) + err_a.dot(self.R.dot(err_a))
state_max = np.array([np.pi/4., np.pi/4., 0.15, 0.15, 4.*np.pi, 4.*np.pi, 0.5, 0.5])
obs_max = state_max.reshape(-1, )
actmax = np.array([5.0, 5.0])
act_max = actmax.reshape(-1, )
max_cost = obs_max.dot(self.Q.dot(obs_max)) + act_max.dot(self.R.dot(act_max))
# Compute a scaling factor that sets the current state and action in relation to the worst case
self.c_max = -1.0 * np.log(self.min_rew) / max_cost
# Calculate the scaled exponential
rew = np.exp(-self.c_max * quadr_cost) # c_max > 0, quard_cost >= 0
return float(rew)
|
[
"[email protected]"
] | |
47705667e33f6a7904b1f49b64c31ef0d425499d
|
613bac317b6094b6d055e1d2e83576611a47b535
|
/Lab-2/filters.py
|
6d18959c8ef3b325fdd875054cb09dcaf57c87d2
|
[] |
no_license
|
Bylundc/ME-499-Python
|
2b082b9608613e35b5fc046d3e4d74dbb498a68d
|
30fedc6855268bc61a2a4f5cf25eeaee442fa502
|
refs/heads/master
| 2021-01-19T03:52:24.929976 | 2017-07-26T20:11:39 | 2017-07-26T20:11:39 | 87,340,116 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,861 |
py
|
#!/usr/bin/env python
#import Gnuplot, Gnuplot.funcutils
import numpy
from sensor import *
# load raw data in
rawme = numpy.loadtxt("raw",usecols=(1,))
print rawme
# load raw data in
rawmed = numpy.loadtxt("raw",usecols=(1,))
# define a MEAN filter
def filtme(data):
k = 0
filtered = []
while True:
filtered += [sum(data[k:k+w])/w]
if k < len(data)-w:
k +=1
else:
break
return filtered
# define a MEDIAN filter
def filtmed(data, w = 3):
k = 0
filteredm = []
while True:
filteredm += [numpy.median(rawmed[k:k+w])]
if k < len(rawmed)-w:
k +=1
else:
break
return filteredm
# Ask for a filter width
w = int(raw_input('Enter a filter width: ')) #width
if w % 2 == 0:
print "Width is even, choose an odd number."
elif w < 0:
print "Width is negative, choose a positive number."
else:
print filtme(rawme)
"""
# save filtered data
print_sensor_data(filtme(rawme), 'filtered')
# load in filtered data
filtered = numpy.loadtxt("filtered",usecols=(1,))
# plot raw data vs filtered data - MEAN
gplot = Gnuplot.Gnuplot(persist=1)
gplot.title("Filtered Data vs Raw - Mean")
rawme = Gnuplot.PlotItems.Data(rawme, with_="linespoints lt rgb 'blue' lw 1 pt 1", title="raw")
filteredme = Gnuplot.PlotItems.Data(filtered, with_="linespoints lt rgb 'black' lw 1 pt 1", title="filtered")
gplot.plot(rawme,filteredme)
# save filtered data
print_sensor_data(filtmed(rawmed), 'filteredm')
# load in filtered data
filteredm = numpy.loadtxt("filteredm",usecols=(1,))
# plot raw data vs filtered data - MEDIAN
g = Gnuplot.Gnuplot(persist=1)
g.title("Filtered Data vs Raw - Median")
rawmed = Gnuplot.PlotItems.Data(rawmed, with_="linespoints lt rgb 'blue' lw 1 pt 1", title="raw")
filteredmed = Gnuplot.PlotItems.Data(filteredm, with_="linespoints lt rgb 'red' lw 1 pt 1", title="filtered")
g.plot(rawmed,filteredmed)
"""
|
[
"[email protected]"
] | |
9a1f313eb6e673c676202754084889113cd6fc4f
|
9f682c3b6880c1683b5b7d2fa16b7f2f82398da9
|
/03_Django/나의 프로젝트/articles/migrations/0001_initial.py
|
d7027bf90ed0499d6386d586aedc9616eebf1c68
|
[] |
no_license
|
nsk324/TIL
|
f5c132ed377cbed4e40889a1668d8e91c1ec44cc
|
55438d5db02ea7b080ef621bb17b0ba9a75fe27a
|
refs/heads/master
| 2023-01-11T21:42:24.899763 | 2019-11-29T05:29:29 | 2019-11-29T05:29:29 | 195,937,996 | 1 | 1 | null | 2023-01-07T21:53:36 | 2019-07-09T05:22:45 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 1,415 |
py
|
# Generated by Django 2.2.6 on 2019-10-22 08:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('content', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'ordering': ('-pk',),
},
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('article', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='articles.Article')),
],
options={
'ordering': ('-pk',),
},
),
]
|
[
"[email protected]"
] | |
f01c131352814fb648562b0c77b9f73fbbc0514c
|
96f5c1c61566a1f65a64838043d36eaaa6f27864
|
/lhalo_to_hdf5/__init__.py
|
043a4accd5ba90bcb2cf9d11c39cf9bfb9a2eb8c
|
[
"MIT"
] |
permissive
|
jacobseiler/short_scripts
|
3ac19880c820d8b4a3e681760dceee53ae43c56d
|
a52637dce09407059b45fd4d5c32af5f8a87c3f7
|
refs/heads/master
| 2021-04-15T11:37:04.509829 | 2019-10-03T06:09:39 | 2019-10-03T06:09:39 | 126,785,952 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 29 |
py
|
__all__ = ("lhalo_to_hdf5",)
|
[
"[email protected]"
] | |
cdbbb2e4e9b0b6b4cbd73db33338923d64f8d519
|
a862bf03863411c3bdeb34a16af0f61647384172
|
/utils/tests/test_utils.py
|
f5a4c2bbf2442581d0e25bb4e075ea662b86ed88
|
[
"MIT"
] |
permissive
|
braun-steven/torch-utils
|
87765e542dc855c2a107b7c048899262a93d22be
|
e4a7057217de3daf7eeef144ca7b12bc909b8473
|
refs/heads/master
| 2022-11-28T16:14:12.072970 | 2019-09-27T11:12:02 | 2019-09-27T11:12:02 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,437 |
py
|
import numpy as np
import torch
from torch import nn
import datetime
import time
import os
import tempfile
import unittest
from unittest import TestCase
from utils import (
ensure_dir,
generate_run_base_dir,
count_params,
load_args,
save_args,
set_cuda_device,
clone_args,
set_seed,
)
import argparse
class Test(TestCase):
def test_ensure_dir(self):
d = os.path.join(TMP, "a", "b", "c") + "/"
ensure_dir(d)
self.assertTrue(os.path.isdir(d))
def test_generate_run_base_dir(self):
res_dir = os.path.join(TMP, "res")
t0 = time.time()
tag = "tag"
sub_dirs = ["a", "b", "c"]
generate_run_base_dir(
result_dir=res_dir, timestamp=t0, tag=tag, sub_dirs=sub_dirs
)
date_str = datetime.datetime.fromtimestamp(t0).strftime("%y%m%d_%H%M")
self.assertTrue(
os.path.isdir(os.path.join(res_dir, date_str + "_" + tag, *sub_dirs))
)
def test_count_params(self):
linear = nn.Linear(123, 42)
n_weights = 123 * 42
n_bias = 42
n_total = n_weights + n_bias
self.assertEqual(n_total, count_params(linear))
def test_load_save_args(self):
parser = argparse.ArgumentParser()
args = parser.parse_args(args=[])
args.__dict__ = {"name": "test", "foo": "bar"}
path = os.path.join(TMP, "args") + "/"
ensure_dir(path)
save_args(args, path)
args_loaded = load_args(path)
self.assertEqual(args, args_loaded)
def test_clone_args(self):
parser = argparse.ArgumentParser()
args = parser.parse_args(args=[])
args.__dict__ = {"name": "test", "foo": "bar"}
cloned = clone_args(args)
self.assertEqual(args.__dict__, cloned.__dict__)
def test_set_cuda_device(self):
set_cuda_device([0, 1, 2])
self.assertEqual(os.environ["CUDA_VISIBLE_DEVICES"], "0,1,2")
def test_set_seed(self):
seed = 42
set_seed(seed)
np_samples_a = np.random.randn(10)
torch_samples_a = torch.randn(10)
set_seed(seed)
np_samples_b = np.random.randn(10)
torch_samples_b = torch.randn(10)
self.assertTrue(np.all(np_samples_a == np_samples_b))
self.assertTrue(torch.all(torch_samples_a == torch_samples_b))
if __name__ == "__main__":
TMP = tempfile.gettempdir()
unittest.main()
|
[
"[email protected]"
] | |
eee490dcf526ffb10b67a1324f01736b974f8ce9
|
89f8a2e609c2b2a7e4ca10be3830200c7e8e438e
|
/ftp_homework/ftp_1/bin/start_server.py
|
e0741d5f538a88369aa9ea5194dab97ea4334bde
|
[] |
no_license
|
boundshunter/s5-study
|
b8265ccc0d09f19624002b5919c5fb6104bf65d3
|
528eda7435a14a2a79c88af02695efec13972f25
|
refs/heads/master
| 2018-09-27T17:40:28.352951 | 2018-06-11T15:38:13 | 2018-06-11T15:38:13 | 111,669,896 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 268 |
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = 'jfsu'
import sys
import os
BaseDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(BaseDir)
from core import ftpserver
if __name__ == '__main__':
sv = ftpserver.FtpServer()
|
[
"[email protected]"
] | |
812988adf18876c0cce0bafb504a64050e1ff7f7
|
ddcf878cca43d49f73fd673279a97e82ced521e8
|
/peyotl/nexson_proxy.py
|
0695ac59e11bb24cc41859e401f46be707764742
|
[
"BSD-2-Clause",
"Python-2.0"
] |
permissive
|
OpenTreeOfLife/peyotl
|
ca5fcbc4f1754c3da7a25c93d89cfeaaad17057f
|
b50f7217966c18195c9b52be42454513ffa3e7f3
|
refs/heads/master
| 2023-08-03T14:35:46.793662 | 2023-07-26T20:30:08 | 2023-07-26T20:30:08 | 16,637,087 | 6 | 4 |
BSD-2-Clause
| 2023-07-24T20:02:30 | 2014-02-08T05:52:12 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 13,589 |
py
|
#!/usr/bin/env python
"""Provides high level wrappers around a Nexson data model blob to
let it be treated as if it were a list of OTUs and a list of trees
Accessors return either references to part of the NexSON or wrappers around
those entities (not copies!)
Weakrefs are used, so the more inclusive containers must be kept in scope
while you are accessing constituents.
Currently converts any NexSON blob to HBF v1.2 data model!
"""
from peyotl.nexson_syntax import (BY_ID_HONEY_BADGERFISH,
convert_nexson_format,
detect_nexson_version,
get_nexml_el,
read_as_json)
from peyotl.utility.str_util import is_str_type
from peyotl.utility import get_logger
import weakref
_LOG = get_logger(__name__)
def otu_iter_nexson_proxy(nexson_proxy, otu_sort=None):
"""otu_sort can be None (not sorted or stable), True (sorted by ID lexigraphically)
or a key function for a sort function on list of otuIDs
Note that if there are multiple OTU groups, the NexSON specifies the order of sorting
of the groups (so the sort argument here only refers to the sorting of OTUs within
a group)
"""
nexml_el = nexson_proxy._nexml_el
og_order = nexml_el['^ot:otusElementOrder']
ogd = nexml_el['otusById']
for og_id in og_order:
og = ogd[og_id]
if otu_sort is None:
for k, v in og:
yield nexson_proxy._create_otu_proxy(k, v)
else:
key_list = list(og.keys())
if otu_sort is True:
key_list.sort()
else:
key_list.sort(key=otu_sort)
for k in key_list:
v = og[k]
yield nexson_proxy._create_otu_proxy(k, v)
def tree_iter_nexson_proxy(nexson_proxy):
"""Iterates over NexsonTreeProxy objects in order determined by the nexson blob"""
nexml_el = nexson_proxy._nexml_el
tg_order = nexml_el['^ot:treesElementOrder']
tgd = nexml_el['treesById']
for tg_id in tg_order:
tg = tgd[tg_id]
tree_order = tg['^ot:treeElementOrder']
tbid = tg['treeById']
otus = tg['@otus']
for k in tree_order:
v = tbid[k]
yield nexson_proxy._create_tree_proxy(tree_id=k, tree=v, otus=otus)
def reverse_edge_by_source_dict(ebs, root_id):
d = {}
for edge_by_id in ebs.values():
for edge_id, edge in edge_by_id.items():
t = edge['@target']
assert t not in d
d[t] = (edge_id, edge)
assert root_id in ebs
assert root_id not in d
d[root_id] = (None, None)
return d
class NexsonProxy(object):
class NexsonOTUProxy(object):
def __init__(self, nexson_proxy, otu_id, otu):
self._nexson_proxy = nexson_proxy
self._otu_id = otu_id
self._otu = otu
def __getitem__(self, key):
return self.otu[key]
def __setitem__(self, key, value):
self.otu[key] = value
@property
def ott_id(self):
return self._otu.get('^ot:ottId')
@property
def otu(self):
return self._otu
@property
def _id(self):
return self._otu_id
def get(self, key, default=None):
return self._otu.get(key, default)
def keys(self, key, default=None):
return self._otu.get(key, default)
def __init__(self, filepath='', nexson=None):
self.filepath = filepath
if nexson is None:
if not filepath:
raise ValueError('Either a filepath or nexson argument must be provided')
self._nexson = read_as_json(self.filepath)
else:
self._nexson = nexson
v = detect_nexson_version(self._nexson)
if v != BY_ID_HONEY_BADGERFISH:
_LOG.debug('NexsonProxy converting to hbf1.2')
convert_nexson_format(self._nexson, BY_ID_HONEY_BADGERFISH)
self._nexml_el = get_nexml_el(self._nexson)
self._otu_cache = {}
self._tree_cache = {}
self._wr = None
def otu_iter(self):
return iter(otu_iter_nexson_proxy(self))
def tree_iter(self):
return iter(tree_iter_nexson_proxy(self))
def _create_otu_proxy(self, otu_id, otu):
np = self._otu_cache.get(otu_id)
if np is None:
if self._wr is None:
self._wr = weakref.proxy(self)
np = NexsonProxy.NexsonOTUProxy(self._wr, otu_id, otu)
self._otu_cache[otu_id] = np
return np
def _create_tree_proxy(self, tree_id, tree, otus):
np = self._tree_cache.get(tree_id)
if np is None:
if self._wr is None:
self._wr = weakref.proxy(self)
np = NexsonTreeProxy(tree_id=tree_id, tree=tree, otus=otus, nexson_proxy=self._wr)
self._tree_cache[tree_id] = np
return np
def get_tree(self, tree_id):
np = self._tree_cache.get(tree_id)
if np is not None:
return np
tgd = self._nexml_el['treesById']
for tg in tgd.values():
tbid = tg['treeById']
if tree_id in tbid:
otus = tg['@otus']
tree = tbid[tree_id]
return self._create_tree_proxy(tree_id=tree_id, tree=tree, otus=otus)
return None
def get_otu(self, otu_id):
np = self._otu_cache.get(otu_id)
if np is not None:
return np
ogd = self._nexml_el['otusById']
for og in ogd.values():
o = og['otuById'].get(otu_id)
if o is not None:
return self._create_otu_proxy(otu_id=otu_id, otu=o)
return None
class NexsonTreeProxy(object):
"""Provide more natural operations by wrapping a NexSON 1.2 tree blob and its otus"""
class NexsonNodeProxy(object):
def __init__(self, tree, edge_id, edge, node_id=None, node=None):
self._tree = tree
self._node_id = node_id
self._node = node
self._edge_id = edge_id
self._edge = edge
self._otu = None
def get(self, key, default=None):
return self.node.get(key, default)
def __getitem__(self, key):
return self.node[key]
def __setitem__(self, key, value):
self.node[key] = value
def keys(self):
return self.node.keys()
@property
def is_leaf(self):
return self._tree.is_leaf(self.node_id)
def child_iter(self):
return self._tree.child_iter(self.node_id)
@property
def ott_id(self):
return self._tree.get_ott_id(self.node)
@property
def edge_id(self):
return self._edge_id
@property
def edge(self):
return self._edge
@property
def _id(self):
return self.node_id
@property
def parent(self):
if self._edge_id is None:
return None
par_node_id = self.edge['@source']
par_edge_id, par_edge = self._tree._find_edge_from_child(par_node_id)
return self._tree._create_node_proxy_from_edge(edge_id=par_edge_id, edge=par_edge, node_id=par_node_id)
@property
def node_id(self):
if self._node_id is None:
self._node_id = self._edge['@target']
return self._node_id
@property
def otu(self):
if self._otu is None:
otu_id, otu = self._tree._raw_otu_for_node(self.node)
self._otu = self._tree._nexson_proxy._create_otu_proxy(otu_id=otu_id, otu=otu)
return self._otu
@property
def node(self):
if self._node is None:
self._node = self._tree.get_nexson_node(self.node_id)
return self._node
def __iter__(self):
return iter(nexson_tree_preorder_iter(self._tree,
node=self.node,
node_id=self.node_id,
edge_id=self.edge_id,
edge=self.edge))
def preorder_iter(self):
return iter(nexson_tree_preorder_iter(self))
def __init__(self, tree, tree_id=None, otus=None, nexson_proxy=None):
self._nexson_proxy = nexson_proxy
self._nexson_tree = tree
self._edge_by_source_id = tree['edgeBySourceId']
self._node_by_source_id = tree['nodeById']
if is_str_type(otus):
self._otus_group_id = otus
self._otus = nexson_proxy._nexml_el['otusById'][otus]['otuById']
else:
self._otus = otus
self._tree_id = tree_id
# not part of nexson, filled on demand. will be dict of node_id -> (edge_id, edge) pair
self._edge_by_target = None
self._wr = None
self._node_cache = {}
def get_nexson_node(self, node_id):
return self._node_by_source_id[node_id]
def get_node(self, node_id):
np = self._node_cache.get(node_id)
if np is not None:
return np
edge_id, edge = self._find_edge_from_child(node_id)
node = self._node_by_source_id[node_id]
return self._create_node_proxy_from_edge(edge_id, edge, node_id, node)
def get(self, key, default=None):
return self._nexson_tree.get(key, default)
def __getitem__(self, key):
return self._nexson_tree[key]
def __setitem__(self, key, value):
self._nexson_tree[key] = value
@property
def edge_by_target(self):
"""Returns a reference to the dict of target node id to (edge_id, edge)"""
if self._edge_by_target is None:
self._edge_by_target = reverse_edge_by_source_dict(self._edge_by_source_id,
self._nexson_tree['^ot:rootNodeId'])
return self._edge_by_target
def _find_edge_from_child(self, node_id):
"""Returns (edge_id, edge)"""
return self.edge_by_target[node_id]
def _create_node_proxy_from_edge(self, edge_id, edge, node_id=None, node=None):
np = self._node_cache.get(edge_id)
if np is None:
if self._wr is None:
self._wr = weakref.proxy(self)
np = NexsonTreeProxy.NexsonNodeProxy(self._wr, edge_id=edge_id, edge=edge, node_id=node_id, node=node)
self._node_cache[edge_id] = np
if node_id is not None:
self._node_cache[node_id] = np
return np
def child_iter(self, node_id):
return nexson_child_iter(self._edge_by_source_id.get(node_id, {}), self)
def is_leaf(self, node_id):
return node_id not in self._edge_by_source_id
def get_ott_id(self, node):
return self._raw_otu_for_node(node)[1].get('^ot:ottId')
def _raw_otu_for_node(self, node):
otu_id = node['@otu']
# _LOG.debug('otu_id = {}'.format(otu_id))
return otu_id, self._otus[otu_id]
def annotate(self, obj, key, value):
obj[key] = value
def __iter__(self):
return iter(nexson_tree_preorder_iter(self))
def preorder_iter(self):
return iter(nexson_tree_preorder_iter(self))
def nodes(self):
return [i for i in iter(self)]
def nexson_child_iter(edict, nexson_tree_proxy):
for edge_id, edge in edict.items():
yield nexson_tree_proxy._create_node_proxy_from_edge(edge_id, edge)
def nexson_tree_preorder_iter(tree_proxy, node_id=None, node=None, edge_id=None, edge=None):
"""Takes a tree in "By ID" NexSON (v1.2). provides and iterator over:
NexsonNodeProxy object
where the edge of the object is the edge connectin the node to the parent.
The first node will be the root and will have None as it's edge
"""
tree = tree_proxy._nexson_tree
ebsid = tree['edgeBySourceId']
nbid = tree['nodeById']
if edge_id is not None:
assert edge is not None
if node_id is None:
node_id = edge['@target']
else:
assert node_id == edge['@target']
if node is None:
node = nbid[node_id]
else:
assert node == nbid[node_id]
yield tree_proxy._create_node_proxy_from_edge(edge_id, edge, node_id=node_id, node=node)
root_id = node_id
elif node_id is not None:
if node is None:
node = nbid[node_id]
else:
assert node == nbid[node_id]
yield tree_proxy._create_node_proxy_from_edge(None, None, node_id=node_id, node=node)
root_id = node_id
else:
root_id = tree['^ot:rootNodeId']
root = nbid[root_id]
yield tree_proxy._create_node_proxy_from_edge(None, None, node_id=root_id, node=root)
stack = []
new_stack = [(i['@target'], edge_id, i) for edge_id, i in ebsid[root_id].items()]
stack.extend(new_stack)
while stack:
target_node_id, edge_id, edge = stack.pop()
node = nbid[target_node_id]
yield tree_proxy._create_node_proxy_from_edge(edge_id=edge_id, edge=edge, node_id=target_node_id)
daughter_edges = ebsid.get(target_node_id)
if daughter_edges is not None:
new_stack = [(i['@target'], edge_id, i) for edge_id, i in daughter_edges.items()]
stack.extend(new_stack)
|
[
"[email protected]"
] | |
27a9e101cd4a7f253db5f5c89fb3068918340ead
|
34745a8d54fa7e3d9e4237415eb52e507508ad79
|
/Python Fundamentals/03 Lists Basics/Exercises/07_Easter_Gifts.py
|
172ea7853a18b1443adb30323f730642b61c1f6b
|
[] |
no_license
|
DilyanTsenkov/SoftUni-Software-Engineering
|
50476af0dc88b267d72c56fa87eeb88d841164b2
|
fe446e3a50a00bb2e48d71ab8f783e0a4a406094
|
refs/heads/main
| 2023-08-12T18:18:42.144210 | 2021-09-25T11:10:38 | 2021-09-25T11:10:38 | 317,235,419 | 1 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 870 |
py
|
gifts_names = input().split(" ")
command = input()
while command != "No Money":
command_list = command.split(" ")
if command_list[0] == "OutOfStock":
if command_list[1] in gifts_names:
for i in range(len(gifts_names)):
if gifts_names[i] == command_list[1]:
gifts_names[i] = "None"
elif command_list[0] == "Required" and int(command_list[2]) > 0 and int(command_list[2]) <= int(
len(gifts_names)) - 1:
gifts_names[int(command_list[2])] = command_list[1]
elif command_list[0] == "JustInCase":
gifts_names[int(len(gifts_names)) - 1] = command_list[1]
command = input()
for n in range(len(gifts_names)):
if "None" in gifts_names:
gifts_names.remove("None")
gifts_names_print = " ".join(gifts_names)
print(gifts_names_print)
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.