hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8d58501cd2a4cf7d4be038ee750ddd345cd594fc | 196 | py | Python | src/main.py | C4theBomb/python-calendar-app | 6776403f7f2440c6497d9a53be5e8d617a2ee817 | [
"MIT"
]
| null | null | null | src/main.py | C4theBomb/python-calendar-app | 6776403f7f2440c6497d9a53be5e8d617a2ee817 | [
"MIT"
]
| null | null | null | src/main.py | C4theBomb/python-calendar-app | 6776403f7f2440c6497d9a53be5e8d617a2ee817 | [
"MIT"
]
| null | null | null | from calendarApp import shell, models
import os
def main():
os.system("clear")
calendar = models.Calendar("main")
shell.main_screen(calendar)
if __name__ == "__main__":
main()
| 15.076923 | 38 | 0.673469 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 0.117347 |
8d5852ea5b1463bc9be5da885619fc756c5bd1fc | 4,329 | py | Python | personal/Ervin/Word2Vec_recommender.py | edervishaj/spotify-recsys-challenge | 4077201ac7e4ed9da433bd10a92c183614182437 | [
"Apache-2.0"
]
| 3 | 2018-10-12T20:19:57.000Z | 2019-12-11T01:11:38.000Z | personal/Ervin/Word2Vec_recommender.py | kiminh/spotify-recsys-challenge | 5e7844a77ce3c26658400f161d2d74d682f30e69 | [
"Apache-2.0"
]
| null | null | null | personal/Ervin/Word2Vec_recommender.py | kiminh/spotify-recsys-challenge | 5e7844a77ce3c26658400f161d2d74d682f30e69 | [
"Apache-2.0"
]
| 4 | 2018-10-27T20:30:18.000Z | 2020-10-14T07:43:27.000Z | import time
import numpy as np
import scipy.sparse as sps
from gensim.models import Word2Vec
from tqdm import tqdm
from recommenders.recommender import Recommender
from utils.datareader import Datareader
from utils.evaluator import Evaluator
from utils.post_processing import eurm_to_recommendation_list
from recommenders.similarity.s_plus import dot_product
class W2VRecommender(Recommender):
"""
Requires gensim package: pip install gensim
"""
RECOMMENDER_NAME = "W2VRecommender"
def __init__(self):
super()
def compute_model(self, negative=5, sg=1, size=50, min_count=1, workers=64, iter=1, window=None, verbose=False):
sentences = []
for row in tqdm(range(self.urm.shape[0]), desc='Generating sentences'):
words = self.urm.indices[self.urm.indptr[row]:self.urm.indptr[row+1]]
words = words.astype(np.str)
if len(words) > 0:
sentences.append(words.tolist())
if verbose:
print('[ Building Word2Vec model ]')
start_time = time.time()
if window is None:
window = np.max(self.urm.sum(axis=1).A1)
w2v = Word2Vec(sentences=sentences, sg=sg, size=size, min_count=min_count, workers=workers, iter=iter,
seed=123, negative=negative, window=window)
w2v.init_sims(replace=True)
self.kv = w2v.wv
# if verbose:
# print('[ Building Similarity Matrix ]')
#
# syn0norm = sps.csr_matrix(self.kv.syn0norm)
# self.model = dot_product(syn0norm, syn0norm.T, k=850)
if verbose:
print("time: " + str(int(time.time() - start_time) / 60))
def compute_rating(self, verbose=False, small=False, mode="offline", top_k=750):
if small:
self.urm = sps.csr_matrix(self.urm)[self.pid]
self.eurm = sps.lil_matrix(self.urm.shape, dtype=np.float32)
if verbose:
print('[ Computing ratings ]')
start_time = time.time()
for row in tqdm(range(1000, self.urm.shape[0]), desc='Calculating similarities'):
test_words = self.urm.indices[self.urm.indptr[row]:self.urm.indptr[row+1]]
test_words = test_words.astype(np.str)
most_sim = self.kv.most_similar(positive=test_words, topn=top_k)
tracks = [tup[0] for tup in most_sim]
sim = [tup[1] for tup in most_sim]
self.eurm[row, tracks] = sim
self.eurm = self.eurm.tocsr()
self.eurm.eliminate_zeros()
if verbose:
print("time: " + str(int(time.time() - start_time) / 60))
# def compute_rating2(self, verbose=False, small=False, mode="offline", remove_seed=True):
# if small:
# self.urm = sps.csr_matrix(self.urm)[self.pid]
# self.eurm = sps.lil_matrix(self.urm.shape, dtype=np.float32)
#
# if verbose:
# print('[ Computing ratings ]')
# start_time = time.time()
#
# for row in tqdm(range(1000, self.urm.shape[0]), desc='Calculating similarities'):
# test_words = self.urm.indices[self.urm.indptr[row]:self.urm.indptr[row+1]]
# test_words = test_words.astype(np.str)
# for w in test_words:
# most_sim = self.kv.most_similar(positive=w, topn=500)
# tracks = [tup[0] for tup in most_sim]
# sim = [tup[1] for tup in most_sim]
# self.eurm[row, tracks] = self.eurm[row, tracks].toarray() + sim
#
# print(self.eurm.shape)
# self.eurm = self.eurm.tocsr()
# self.eurm.eliminate_zeros()
#
# if verbose:
# print("time: " + str(int(time.time() - start_time) / 60))
if __name__ == '__main__':
dr = Datareader(only_load=True, mode='offline', test_num='1', verbose=False)
pid = dr.get_test_playlists().transpose()[0]
urm = dr.get_urm()
urm.data = np.ones(urm.data.shape[0])
ev = Evaluator(datareader=dr)
model = W2VRecommender()
model.fit(urm, pid)
model.compute_model(verbose=True, size=50)
model.compute_rating(verbose=True, small=True, top_k=750)
ev.evaluate(recommendation_list=eurm_to_recommendation_list(model.eurm, remove_seed=True, datareader=dr),
name="W2V", old_mode=False) | 37.973684 | 116 | 0.613075 | 3,392 | 0.783553 | 0 | 0 | 0 | 0 | 0 | 0 | 1,383 | 0.319473 |
8d58f2b0959a8386b4c708d7cc38bd2e9f103bb6 | 1,321 | py | Python | pyesasky/__init__.py | pierfra-ro/pyesasky | a9342efcaa5cca088ed9a5afa2c98d3e9aa4bd0f | [
"BSD-3-Clause"
]
| 13 | 2019-05-30T19:57:37.000Z | 2021-09-10T09:43:49.000Z | pyesasky/__init__.py | pierfra-ro/pyesasky | a9342efcaa5cca088ed9a5afa2c98d3e9aa4bd0f | [
"BSD-3-Clause"
]
| 21 | 2019-06-21T18:55:25.000Z | 2022-02-27T14:48:13.000Z | pyesasky/__init__.py | pierfra-ro/pyesasky | a9342efcaa5cca088ed9a5afa2c98d3e9aa4bd0f | [
"BSD-3-Clause"
]
| 8 | 2019-05-30T12:20:48.000Z | 2022-03-04T04:01:20.000Z | from ._version import __version__ # noqa
from .pyesasky import ESASkyWidget # noqa
from .catalogue import Catalogue # noqa
from .catalogueDescriptor import CatalogueDescriptor # noqa
from .cooFrame import CooFrame # noqa
from .footprintSet import FootprintSet # noqa
from .footprintSetDescriptor import FootprintSetDescriptor # noqa
from .HiPS import HiPS # noqa
from .imgFormat import ImgFormat # noqa
from .jupyter_server import load_jupyter_server_extension # noqa
from .metadataDescriptor import MetadataDescriptor # noqa
from .metadataType import MetadataType # noqa
import json
from pathlib import Path
HERE = Path(__file__).parent.resolve()
with (HERE / "labextension" / "package.json").open() as fid:
data = json.load(fid)
# Jupyter Extension points
def _jupyter_nbextension_paths():
return [{'section': 'notebook',
# the path is relative to the `pyesasky` directory
'src': 'nbextension/static',
# directory in the `nbextension/` namespace
'dest': 'pyesasky',
# _also_ in the `nbextension/` namespace
'require': 'pyesasky/extension'}]
def _jupyter_server_extension_paths():
return [{"module": "pyesasky"}]
def _jupyter_labextension_paths():
return [{
"src": "labextension",
"dest": data["name"]
}]
| 33.025 | 65 | 0.711582 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 397 | 0.30053 |
8d5933b202fa0260d94c68bc7edbd14a32abb844 | 2,930 | py | Python | visualize.py | jcamstan3370/MachineLearningPerovskites | d7bc433bac349bf53473dc6d636954cae996b8d2 | [
"MIT"
]
| 6 | 2020-05-09T17:18:00.000Z | 2021-09-22T09:37:40.000Z | visualize.py | jstanai/ml_perovskites | d7bc433bac349bf53473dc6d636954cae996b8d2 | [
"MIT"
]
| null | null | null | visualize.py | jstanai/ml_perovskites | d7bc433bac349bf53473dc6d636954cae996b8d2 | [
"MIT"
]
| 1 | 2021-03-24T04:21:31.000Z | 2021-03-24T04:21:31.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Jared
"""
import numpy as np
import pandas as pd
import myConfig
import matplotlib.pyplot as plt
from ast import literal_eval
from plotter import getTrendPlot1
from matplotlib.pyplot import figure
df = pd.read_csv(myConfig.extOutput)
dffExt = pd.read_csv(myConfig.featurePathExt)
dffExt = dffExt.copy().dropna(axis=0, how='any').reset_index()
y_predict_ext = df['yhat_ext']
print('Num dummy crystals: {}'.format(len(y_predict_ext)))
print([n for n in dffExt.columns if 'p_' not in n])
s = 'fracCl'
dffExt['yhat_ext'] = df['yhat_ext']
ylabel = '$E_{g}$ (eV)'
getTrendPlot1(dffExt, y_predict_ext, s,
ylabel = ylabel,
xlabel = s,
title = 'Trend')
plt.show()
'''
s = 'volume'
g = dffExt.groupby('fracCl')
for i, group in g:
getTrendPlot1(group, y_predict_ext, s,
ylabel = ylabel,
xlabel = s,
title = 'Trend',
scatter = False)
plt.show()
'''
s = 'fracCs'
g = dffExt.groupby('fracSn')
for i, group in g:
getTrendPlot1(group, y_predict_ext, s,
ylabel = ylabel,
xlabel = s,
title = 'Trend',
scatter = False)
plt.show()
'''
print(dffExt[['fracCs', 'fracRb', 'fracK', 'fracNa',
'fracSn' , 'fracGe',
'fracCl', 'fracI', 'fracBr', 'yhat_ext']].head(10))
'''
g = dffExt.groupby([
'fracCs', 'fracRb', 'fracK', 'fracNa',
'fracSn' , 'fracGe',
'fracCl', 'fracI', 'fracBr'])
x = []
y = []
x_all = []
y_all = []
for (gr, gi) in g:
labels = ['Cs', 'Rb', 'K', 'Na', 'Sn', 'Ge',
'Cl', 'I', 'Br']
#print(gr)
sarr = []
for i, n in enumerate(gr):
if i < 6:
m = 1
else:
m = 3
if n != 0:
#if n == 1.0:
sarr.append(labels[i] + '$_{' + str(int(4*m*n)) + '}$')
#else:
#sarr.append(labels[i] + '$_{' + str(4*m*n) + '}$')
#print(sarr, gr)
x += [''.join(sarr)]
y.append(gi['yhat_ext'].mean())
x_all += [''.join(sarr)]*len(gi)
y_all += gi['yhat_ext'].tolist()
print(len(x_all), len(x))
fig = plt.figure(figsize=(13, 4), dpi=200)
#(Atomic 3%, Lattice 10%)
#plt.title('Stability Trends')
plt.title('Direct Bandgap Trends')
#plt.ylabel('$\Delta E_{hull}$ (meV/atom)')
plt.ylabel('$E_{g}$ (eV)')
plt.xticks(rotation=90)
plt.scatter(x, y)
#figure(num=None, figsize=(8, 6), dpi=200, facecolor='w', edgecolor='k')
plt.savefig('/Users/Jared/Documents/test.png', bbox_inches='tight')
plt.show()
'''
plt.title('Bandgap Trends (Atomic 5%, Lattice 5%)')
plt.ylabel('E$_{g}$ (eV)')
plt.xticks(rotation=90)
plt.scatter(x_all, y_all)
figure(num=None, figsize=(8, 6), dpi=200, facecolor='w', edgecolor='k')
''' | 23.821138 | 72 | 0.531058 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,289 | 0.439932 |
8d5938563047da10af2e319b379482b6a7545552 | 237 | py | Python | 11-if-elif-else-condition.py | GunarakulanGunaretnam/python-basic-fundamentals | c62bf939fbaef8895d28f85af9ef6ced70801f96 | [
"Apache-2.0"
]
| null | null | null | 11-if-elif-else-condition.py | GunarakulanGunaretnam/python-basic-fundamentals | c62bf939fbaef8895d28f85af9ef6ced70801f96 | [
"Apache-2.0"
]
| null | null | null | 11-if-elif-else-condition.py | GunarakulanGunaretnam/python-basic-fundamentals | c62bf939fbaef8895d28f85af9ef6ced70801f96 | [
"Apache-2.0"
]
| null | null | null | name = input("Enter your name? ")
if name == "guna":
print("1234567890")
elif name == "david":
print("0987654321")
elif name == "rakulan":
print("12345")
elif name == "raj":
print("1234455667")
else:
print("No contacts found")
| 13.941176 | 33 | 0.632911 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 108 | 0.455696 |
8d595677f62dbebf986ab917f4b41f5f89af2fea | 13,409 | py | Python | InstagramCrawler.py | Bagas8015/Instagram-Posts-Crawler-Users-v1 | 82d5da12f7f6caf8c085085135134f58affb1ec7 | [
"MIT"
]
| null | null | null | InstagramCrawler.py | Bagas8015/Instagram-Posts-Crawler-Users-v1 | 82d5da12f7f6caf8c085085135134f58affb1ec7 | [
"MIT"
]
| null | null | null | InstagramCrawler.py | Bagas8015/Instagram-Posts-Crawler-Users-v1 | 82d5da12f7f6caf8c085085135134f58affb1ec7 | [
"MIT"
]
| null | null | null | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
import emoji
import string
import csv
import os
def getFileSize(nameFile):
return os.stat(nameFile).st_size
browser = webdriver.Chrome()
def loginInstagram(url, username, password):
browser.get(url) #Masuk ke url.
time.sleep(2) #Memberi kesempatan untuk loading page.
browser.find_element_by_xpath('/html/body/span/section/main/article/div[2]/div[2]/p/a').click() #Click untuk ke halaman login.
#3 baris ke bawah berfungsi untuk mengisi form dan login.
print("Mengisi form login ....")
time.sleep(2)
browser.find_element_by_xpath('/html/body/span/section/main/div/article/div/div[1]/div/form/div[2]/div/label/input').send_keys(username)
browser.find_element_by_xpath('/html/body/span/section/main/div/article/div/div[1]/div/form/div[3]/div/label/input').send_keys(password)
browser.find_element_by_xpath('/html/body/span/section/main/div/article/div/div[1]/div/form/div[4]/button').click()
time.sleep(3) #Memberi kesempatan untuk loading page.
browser.find_element_by_xpath('/html/body/div[3]/div/div/div[3]/button[2]').click() #Menutup pop-up yang muncul.
browser.find_element_by_xpath('/html/body/span/section/nav/div[2]/div/div/div[3]/div/div[3]/a/span').click() #Menuju ke halaman profile user.
def getListFollowers(username, jml_followers = 0):
print("Sedang mengload data daftar followers " + username + " ....")
time.sleep(3) #Untuk menunggu page profile home selesai diload
if jml_followers == 0:
jml_followers = browser.find_element_by_xpath('/html/body/span/section/main/div/header/section/ul/li[2]/a/span').get_attribute('title') #Untuk mendapatkan jumlah followers users di dalam list
jml_followers.replace(',','')
browser.find_element_by_xpath('/html/body/span/section/main/div/header/section/ul/li[2]/a').click() #Meng-click href untuk melihat tampilan followersnya
time.sleep(2)
followersList = browser.find_element_by_xpath('/html/body/div[3]/div/div[2]/ul')
lengthListFollowers = len(followersList.find_elements_by_css_selector('li')) #Untuk mendapatkan panjang list followers yang sudah ditampilkan
time.sleep(2)
followersList.click()#klik bar kosong akun pertama
actionChain = webdriver.ActionChains(browser) #Mengambil ActionChains
daftar = []
nilai_berulang = 0
batas_berulang = 0
while lengthListFollowers < int(jml_followers) and lengthListFollowers < 200:
time.sleep(1)
browser.find_element_by_xpath('/html/body/div[3]/div/div[2]/ul/div/li[' + str(lengthListFollowers-2) + ']').click() #Supaya bisa ngescroll sampai batas yang ditentukan
actionChain.key_down(Keys.SPACE).key_up(Keys.SPACE).perform()
if nilai_berulang == lengthListFollowers:
batas_berulang += 1
if batas_berulang == 4:
break
else:
batas_berulang = 0
nilai_berulang = lengthListFollowers
lengthListFollowers = len(browser.find_elements_by_xpath('/html/body/div[3]/div/div[2]/ul/div/li'))
for i in range(1,lengthListFollowers+1):
if int(jml_followers) > 12:
daftar.append(browser.find_element_by_xpath('/html/body/div[3]/div/div[2]/ul/div/li['+str(i)+']/div/div[1]/div[2]/div[1]/a').get_attribute('title'))
else:
daftar.append(browser.find_element_by_xpath('/html/body/div[3]/div/div[2]/ul/div/li['+str(i)+']/div/div[2]/div[1]/div/div/a').get_attribute('title'))
return daftar
def writeToCSVandGTF(index, username, namafile): #GTF = Get Total Followers from target, GTF berguna untuk penentuan target selanjutnya.
print('Sedang Crawling target ' + username + ' ....')
try:
browser.find_element_by_xpath('/html/body/span/section/main/div/div/article/div[1]/div/h2') #Ngecek private atau ngga, kalau ngga private lanjut ke except
return 0, index
except:
time.sleep(2)
translator = str.maketrans('', '', string.punctuation) #Untuk ngebuat teksnya rapih
def give_emoji_free_text(text): #Untuk membuang semua emoji
allchars = [str for str in text.encode('ascii', 'ignore').decode('utf-8')]
emoji_list = [c for c in allchars if c in emoji.UNICODE_EMOJI]
clean_text = ' '.join([str for str in text.encode('ascii', 'ignore').decode('utf-8').split() if not any(i in str for i in emoji_list)])
return clean_text
def hashtag(text): #Untuk mendapatkan tag
char = text.encode('ascii', 'ignore').decode('utf-8').replace('\n',' ')
tag = []
teks = ''
tulis = 0
for i in range(len(char)):
if tulis == 1:
teks = teks + char[i]
if char[i] == '#':
tulis = 1
elif (char[i] == ' ' or i == len(char)-1) and teks != '':
teks = '#' + teks
tag.append(teks)
tulis = 0
teks = ''
return tag
jml_followers = browser.find_element_by_xpath('/html/body/span/section/main/div/header/section/ul/li[2]/a/span').get_attribute('title') #Untuk mendapatkan total followers target
jml_posts = browser.find_element_by_xpath('/html/body/span/section/main/div/header/section/ul/li[1]/span/span').text #Untuk mendapatkan total posts target
jml_followers = jml_followers.replace(',','')
jml_posts = jml_posts.replace(',','')
if int(jml_posts) == 0:
return int(jml_followers), index
tes = 0
galat = 0
benar = 1
while benar == 1 and int(jml_posts) != 0:
try:
browser.find_element_by_xpath('/html/body/span/section/main/div/div['+str(tes)+']/article/div[1]/div/div[1]/div[1]').click()
benar = 0
except:
tes += 1
galat += 1
if galat == 10:
break
continue
time.sleep(1)
#Crawling post
limit = 0
while limit < int(jml_posts)-1 and int(jml_posts) != 0 and galat != 11:
#print("Sedang crawling data posts target " + username + " ....")
loading = False
kanan = False
kiri = False
try:
time.sleep(3)
browser.find_element_by_xpath('/html/body/div[3]/div[2]/div/div/div/svg')
if limit > 0:
browser.find_element_by_xpath('/html/body/div[3]/div[1]/div/div/a').click()
loading = True
kanan = True
else:
browser.find_element_by_xpath('/html/body/div[3]/div[1]/div/div/a').click()
loading = True
kiri = True
except:
try:
### Ini jika ada bulet-buletan loading
if loading:
if kiri:
time.sleep(2)
browser.find_element_by_xpath('/html/body/div[3]/div[1]/div/div/a').click()
loading = False
kiri = False
continue
elif kanan:
time.sleep(2)
browser.find_element_by_xpath('/html/body/div[3]/div[1]/div/div/a[2]').click()
loading = False
kanan = False
continue
### Sampai sini lalu hasilnya akan dikontinue ke awal, untuk ngambil pos yang sebelumnya muter-muter
teks = browser.find_element_by_xpath('/html/body/div[3]/div[2]/div/article/div[2]/div[1]/ul/div/li/div/div/div[2]/span').text #Mengambil captionnya dan menyimpannya dalam variabel teks
tag = hashtag(teks) #Meyimpan kumpulan tag
if len(tag) == 0:
tag = ''
teks = give_emoji_free_text(teks) #Menyingkirkan emoji dari teks
teks = teks.translate(translator).lower() #Membuat huruf menjadi kecil
except:
teks = ''
tag = ''
try:
try:
likes = browser.find_element_by_xpath('/html/body/div[3]/div[2]/div/article/div[2]/section[2]/div/div/button/span').text #Untuk mengambil like yang punya banyak likes.
except:
likes = browser.find_element_by_xpath('/html/body/div[3]/div[2]/div/article/div[2]/section[2]/div/div/button').text #Untuk likes-nya sedikit
likes = likes.replace('like this','').replace('like','')#Untuk me-replace 'like this' atau 'like'
except:
likes = browser.find_element_by_xpath('/html/body/div[3]/div[2]/div/article/div[2]/section[2]/div/span/span').text #Untuk mendapatkan likes dari video
#print(teks, likes, tag)
try:
commentlist = len(browser.find_elements_by_xpath('/html/body/div[3]/div[2]/div/article/div[2]/div[1]/ul/ul')) #panjang dari banyak komen
comment = []
##print(commentlist)
for i in range(1,commentlist+1):
morecomment = []
commentter = browser.find_element_by_xpath('/html/body/div[3]/div[2]/div/article/div[2]/div[1]/ul/ul['+str(i)+']/div/li/div/div[1]/div[2]/h3/a').text
teksc = browser.find_element_by_xpath('/html/body/div[3]/div[2]/div/article/div[2]/div[1]/ul/ul['+str(i)+']/div/li/div/div[1]/div[2]/span').text
teksc = give_emoji_free_text(teksc)
teksc = teksc.translate(translator).lower()
morecomment.append(commentter)
morecomment.append(teksc)
comment.append(morecomment)
#print(commentter,teks)
if len(comment) == 0:
comment = ''
except:
comment = ''
if index == 0:
with open(namafile,'a',newline='') as csvfile: #Membuka dan membuat file '.csv'
writer = csv.writer(csvfile)
writer.writerow(['username','post','tag','likes','comment'])
writer.writerow([username, teks, tag, likes, comment])
index += 1
else:
with open(namafile, 'a', newline = '') as csvfile: #Menambahkan file '.csv' dengan data baru
writer = csv.writer(csvfile)
#print(username, teks, tag, likes, comment)
writer.writerow([username, teks, tag, likes, comment])
index += 1
if limit == 0:
browser.find_element_by_xpath('/html/body/div[3]/div[1]/div/div/a').click()
else:
browser.find_element_by_xpath('/html/body/div[3]/div[1]/div/div/a[2]').click()
#print()
time.sleep(2)
limit += 1
return int(jml_followers), index
def mulaiProgram(url, username, password):
loginInstagram(url, username, password)
hitung = 0
sizeOfFile = 0
namafile = input("Masukkan nama file: ")
namafix = namafile+'.csv'
while sizeOfFile < 1024*1024*100:
tertinggi = 0
indekss = 0
try:
listTotalFollowersFromTarget = []
listFollowers = []
listFollowers = getListFollowers(username, tertinggi)
#print(listFollowers)
for usertarget in listFollowers:
browser.get(url+'/'+usertarget)
time.sleep(3)
totalFollowers, indekss = writeToCSVandGTF(indekss, usertarget,namafix)
listTotalFollowersFromTarget.append(totalFollowers)
hitung += 1
#print( listTotalFollowersFromTarget )
tertinggi = max(listTotalFollowersFromTarget)
#print(tertinggi)
indeks = listTotalFollowersFromTarget.index(tertinggi)
#print(indeks)
browser.get(url+'/'+username)
time.sleep(2)
username = listFollowers[indeks]
#print(username)
browser.get(url+'/'+username)
except:
continue
sizeOfFile = getFileSize(namafix)
user = input('Masukkan username akun anda: ')
passwo = input('Masukkan password akun anda: ')
url = 'https://www.instagram.com'
username = user
password = passwo
mulaiProgram(url, username, password)
browser.quit()
| 50.220974 | 205 | 0.548736 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,070 | 0.303527 |
8d596a354fbcf53937f22d7c7dc7a505553f0379 | 5,310 | py | Python | pages/process.py | nchibana/dash-app-template | a51ad0ac92e719b2ef60739b6c1126aebb920d47 | [
"MIT"
]
| null | null | null | pages/process.py | nchibana/dash-app-template | a51ad0ac92e719b2ef60739b6c1126aebb920d47 | [
"MIT"
]
| 4 | 2020-03-24T17:36:39.000Z | 2021-08-23T20:13:16.000Z | pages/process.py | nchibana/dash-app-template | a51ad0ac92e719b2ef60739b6c1126aebb920d47 | [
"MIT"
]
| null | null | null | import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.graph_objects as go
import plotly.express as px
import plotly.figure_factory as ff
from sklearn.metrics import roc_curve
import pandas as pd
from joblib import load
from app import app
column1 = dbc.Col(
[
dcc.Markdown(
"""
## Process
********
To build this model, two datasets with similar labels were combined to form a dataset with 102,840 observations.
I would like to thank the research team behind [this study](https://arxiv.org/pdf/1802.00393.pdf), as they promptly gave me access to their data, which was labeled through Crowdflower.
This model builds largely on their work, as well as that of [this previous study](https://aaai.org/ocs/index.php/ICWSM/ICWSM17/paper/view/15665).
After gaining access to both datasets, I proceeded to retrieve the corresponding tweet text for all IDs in the second set (as it was not provided) via Twitter's API.
This was [the code](https://stackoverflow.com/questions/44581647/retrieving-a-list-of-tweets-using-tweet-id-in-tweepy) I used to retrieve the text, without exceeding the rate limit.
"""
),
html.Iframe(src='data:text/html;charset=utf-8,%3Cbody%3E%3Cscript%20src%3D%22https%3A%2F%2Fgist.github.com%2Fnchibana%2F20d6d9f8ae62a6cc36b773d37dd7dc70.js%22%3E%3C%2Fscript%3E%3C%2Fbody%3E', style=dict(border=0, padding=40), height=780, width=1000),
dcc.Markdown(
"""
After that, I proceeded to combine the datasets and eliminate all duplicate tweets. I also defined a baseline accuracy score of 56%, which is the percent accuracy the model would achieve
if it predicted the majority class for all tweets.
Using some of the processes followed by the studies mentioned above, I also continued to preprocess the data by eliminating excess spaces, removing punctuation and retrieving the stem words of terms
used in tweets.
Next, I used Scikit-learn's [TfidVectorizer](https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html) to convert tweet text into a matrix of
TF-IDF features, which is a statistic that calculates how important a word is to a document or collection of words.
"""
),
html.Iframe(src='data:text/html;charset=utf-8,%3Cbody%3E%3Cscript%20src%3D%22https://gist.github.com/nchibana/c15cbc4a1d97af02fa62fff5868bc36e.js%22%3E%3C%2Fscript%3E%3C%2Fbody%3E', style=dict(border=0, padding=40), height=460, width=1000),
dcc.Markdown(
"""
To increase the accuracy of the model, additional features were engineered, such as the number of syllables per word, the total number of characters, the number of words, the number of unique
terms, as well as readability and sentiment scores for each tweet.
Additionally, the number of mentions, hashtags and links in each tweet were also counted. For this study, images or any other type of media content were not analyzed.
"""
),
html.Iframe(src='data:text/html;charset=utf-8,%3Cbody%3E%3Cscript%20src%3D%22https%3A%2F%2Fgist.github.com%2Fnchibana%2F5cebfbfa700974edcd9f5fa6e43cc513.js%22%3E%3C%2Fscript%3E%3C%2Fbody%3E', style=dict(border=0, padding=40), height=600, width=1000),
dcc.Markdown(
"""
After testing several models such as Linear SVC, I finally settled on a logistic regression model which I trained on the data and used for the final model and app.
I also used grid search to find the optimal parameters for this logistic regression model.
Finally, I computed all accuracy scores and proceeded to plot visualizations to help me get a deeper understanding of the model, such as a confusion matrix to visualize misclassified tweets.
"""
),
html.Iframe(src='data:text/html;charset=utf-8,%3Cbody%3E%3Cscript%20src%3D%22https%3A%2F%2Fgist.github.com%2Fnchibana%2F0cc0c44c9b5a991adbc2690c97023d0c.js%22%3E%3C%2Fscript%3E%3C%2Fbody%3E', style=dict(border=0, padding=40), height=300, width=1000),
dcc.Markdown(
"""
## Sources
********
1. Automated Hate Speech Detection and the Problem of Offensive Language
Davidson, Thomas and Warmsley, Dana and Macy, Michael and Weber, Ingmar
Proceedings of the 11th International AAAI Conference on Web and Social Media p. 512-515. 2017
2. Large Scale Crowdsourcing and Characterization of Twitter Abusive Behavior
Founta, Antigoni-Maria and Djouvas, Constantinos and Chatzakou, Despoina and Leontiadis, Ilias and Blackburn, Jeremy and Stringhini, Gianluca and Vakali, Athena and Sirivianos, Michael and Kourtellis, Nicolas
11th International Conference on Web and Social Media, ICWSM 2018 2018
"""
),
],
md=12,
)
layout = dbc.Row([column1]) | 50.09434 | 259 | 0.688512 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,287 | 0.807345 |
8d597279dcdef01055e59ebc350f3cf1d766f1a3 | 599 | py | Python | tests/sdk/test_service.py | kusanagi/katana-sdk-python3 | cd089409ec0d822f4d7bd6b4bebd527e003089ee | [
"MIT"
]
| 2 | 2017-03-21T20:02:47.000Z | 2017-05-02T19:32:01.000Z | tests/sdk/test_service.py | kusanagi/katana-sdk-python3 | cd089409ec0d822f4d7bd6b4bebd527e003089ee | [
"MIT"
]
| 19 | 2017-03-10T12:09:34.000Z | 2018-06-01T18:10:06.000Z | tests/sdk/test_service.py | kusanagi/katana-sdk-python3 | cd089409ec0d822f4d7bd6b4bebd527e003089ee | [
"MIT"
]
| 5 | 2017-03-10T11:40:50.000Z | 2019-03-26T06:28:33.000Z | from katana.sdk.service import get_component
from katana.sdk.service import Service
def test_service_component():
# Check service component singleton creation
assert get_component() is None
service = Service()
assert get_component() == service
def action_callback():
pass
assert service._callbacks == {}
# Set an action callback
action_name = 'foo'
assert action_name not in service._callbacks
service.action(action_name, action_callback)
assert action_name in service._callbacks
assert service._callbacks[action_name] == action_callback
| 27.227273 | 61 | 0.736227 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 73 | 0.12187 |
8d5b40af3f077c2c14c5035c4efe391b9a38cc70 | 527 | py | Python | DesignPatterns/MVC/server/controllers/index.py | TigranGit/CodeBase | d58e30b1d83fab4b388ec2cdcb868fa751c62188 | [
"Apache-2.0"
]
| 1 | 2020-08-13T19:09:27.000Z | 2020-08-13T19:09:27.000Z | DesignPatterns/MVC/server/controllers/index.py | TigranGit/CodeBase | d58e30b1d83fab4b388ec2cdcb868fa751c62188 | [
"Apache-2.0"
]
| null | null | null | DesignPatterns/MVC/server/controllers/index.py | TigranGit/CodeBase | d58e30b1d83fab4b388ec2cdcb868fa751c62188 | [
"Apache-2.0"
]
| null | null | null | from .base_controller import BaseController
from ..helper.utils import render_template
from ..helper.constants import STATUS_OK
class IndexController(BaseController):
def __init__(self, client_address):
self.user_ip = client_address[0]
self.user_port = str(client_address[1])
self.title = "Home"
def get(self):
return STATUS_OK, render_template(
"index.html",
title=self.title,
user_ip=self.user_ip,
user_port=self.user_port,
)
| 27.736842 | 47 | 0.654649 | 396 | 0.751423 | 0 | 0 | 0 | 0 | 0 | 0 | 18 | 0.034156 |
8d5bd4af92a66ece14d4931534ffa3416cb4b661 | 3,919 | py | Python | plugins/tff_backend/bizz/payment.py | threefoldfoundation/app_backend | b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a | [
"Apache-2.0"
]
| null | null | null | plugins/tff_backend/bizz/payment.py | threefoldfoundation/app_backend | b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a | [
"Apache-2.0"
]
| 178 | 2017-08-02T12:58:06.000Z | 2017-12-20T15:01:12.000Z | plugins/tff_backend/bizz/payment.py | threefoldfoundation/app_backend | b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a | [
"Apache-2.0"
]
| 2 | 2018-01-10T10:43:12.000Z | 2018-03-18T10:42:23.000Z | # -*- coding: utf-8 -*-
# Copyright 2017 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.3@@
import time
from google.appengine.api import users
from google.appengine.ext import ndb
from framework.utils import now
from mcfw.rpc import returns, arguments
from plugins.rogerthat_api.exceptions import BusinessException
from plugins.tff_backend.models.payment import ThreeFoldTransaction, ThreeFoldPendingTransaction
from plugins.tff_backend.to.payment import WalletBalanceTO
def _get_balance_from_transactions(transactions, token):
# type: (list[ThreeFoldTransaction], unicode) -> WalletBalanceTO
available_balance = 0
total_balance = 0
total_description_details = []
# TODO set to minimum precision of all transactions when transactions have the 'precision' property
# (and multiply available / total amount depending on precision)
precision = 2
# for transaction in transactions:
# precision = max(transaction.precision, precision)
for transaction in transactions:
if transaction.token != token:
raise BusinessException('Invalid transaction supplied to _get_balance_from_transactions. '
'All transactions must have %s as token', token)
amount_spent = transaction.amount - transaction.amount_left
unlocked_amount = 0
now_ = now()
for unlock_timestamp, unlock_amount in zip(transaction.unlock_timestamps, transaction.unlock_amounts):
if unlock_timestamp <= now_:
unlocked_amount += unlock_amount
else:
total_description_details.append((unlock_timestamp, unlock_amount))
spendable_amount = unlocked_amount - amount_spent
available_balance += spendable_amount
total_balance += transaction.amount_left
if total_description_details:
total_description = u"""## %(token)s Unlock times'
|Date|#%(token)s|
|---|---:|
""" % {'token': token}
for unlock_timestamp, unlock_amount in sorted(total_description_details, key=lambda tup: tup[0]):
date = time.strftime('%a %d %b %Y %H:%M:%S GMT', time.localtime(unlock_timestamp))
amount = u'{:0,.2f}'.format(unlock_amount / 100.0)
total_description += u'\n|%s|%s|' % (date, amount)
else:
total_description = None
return WalletBalanceTO(available=available_balance, total=total_balance, description=total_description, token=token,
precision=precision)
@returns([WalletBalanceTO])
@arguments(username=unicode)
def get_all_balances(username):
transactions = ThreeFoldTransaction.list_with_amount_left(username)
token_types = set(map(lambda transaction: transaction.token, transactions))
results = []
for token in token_types:
transactions_per_token = [trans for trans in transactions if trans.token == token]
results.append(_get_balance_from_transactions(transactions_per_token, token))
return results
@returns(tuple)
@arguments(username=unicode, page_size=(int, long), cursor=unicode)
def get_pending_transactions(username, page_size, cursor):
# type: (users.User, long, unicode) -> tuple[list[ThreeFoldPendingTransaction], ndb.Cursor, bool]
return ThreeFoldPendingTransaction.list_by_user(username) \
.fetch_page(page_size, start_cursor=ndb.Cursor(urlsafe=cursor))
| 42.597826 | 120 | 0.720082 | 0 | 0 | 0 | 0 | 863 | 0.220209 | 0 | 0 | 1,265 | 0.322786 |
8d5f94f57caf92571a35ef22a1aa7566e2df0d65 | 1,582 | py | Python | tasks/tests/ui/conftest.py | MisterLenivec/django_simple_todo_app | 8e694a67df43de7feaae785c0b3205534c701923 | [
"MIT"
]
| null | null | null | tasks/tests/ui/conftest.py | MisterLenivec/django_simple_todo_app | 8e694a67df43de7feaae785c0b3205534c701923 | [
"MIT"
]
| 4 | 2020-06-07T01:25:14.000Z | 2021-06-10T18:34:10.000Z | tasks/tests/ui/conftest.py | MisterLenivec/django_simple_todo_app | 8e694a67df43de7feaae785c0b3205534c701923 | [
"MIT"
]
| null | null | null | from django.conf import settings
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import pytest
import os
@pytest.fixture(scope='session')
def django_db_setup():
settings.DATABASES['default'] = {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ.get('simple_todo_db_name'),
'USER': os.environ.get('simple_todo_db_user'),
'PASSWORD': os.environ.get('simple_todo_db_password'),
'HOST': '127.0.0.1',
'PORT': '5432',
}
def pytest_addoption(parser):
parser.addoption('--browser_name', action='store', default="chrome",
help="Choose browser: chrome or firefox")
def chrome_options():
options = Options()
options.add_argument("--headless") # No open browser
options.add_argument("--window-size=1920x1080")
return options
def firefox_options():
fp = webdriver.FirefoxProfile()
return fp
@pytest.fixture(scope="session")
def browser(request):
browser_name = request.config.getoption("browser_name")
browser = None
if browser_name == "chrome":
print("\nstart chrome browser for test..")
browser = webdriver.Chrome(
options=chrome_options()
)
elif browser_name == "firefox":
print("\nstart firefox browser for test..")
browser = webdriver.Firefox(
firefox_profile=firefox_options()
)
else:
raise pytest.UsageError("--browser_name should be chrome or firefox")
yield browser
print("\nquit browser..")
browser.quit()
| 27.275862 | 77 | 0.653603 | 0 | 0 | 605 | 0.382427 | 1,014 | 0.640961 | 0 | 0 | 468 | 0.295828 |
8d606a3efd5feb490b057183d05dc39513b2525a | 3,519 | py | Python | erp/migrations/0026_auto_20200205_0950.py | Foohx/acceslibre | 55135e096f2ec4e413ff991f01c17f5e0d5925c0 | [
"MIT"
]
| 8 | 2020-07-23T08:17:28.000Z | 2022-03-09T22:31:36.000Z | erp/migrations/0026_auto_20200205_0950.py | Foohx/acceslibre | 55135e096f2ec4e413ff991f01c17f5e0d5925c0 | [
"MIT"
]
| 37 | 2020-07-01T08:47:33.000Z | 2022-02-03T19:50:58.000Z | erp/migrations/0026_auto_20200205_0950.py | Foohx/acceslibre | 55135e096f2ec4e413ff991f01c17f5e0d5925c0 | [
"MIT"
]
| 4 | 2021-04-08T10:57:18.000Z | 2022-01-31T13:16:31.000Z | # Generated by Django 3.0.3 on 2020-02-05 08:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('erp', '0025_auto_20200205_0839'),
]
operations = [
migrations.AddField(
model_name='cheminement',
name='aide_humaine',
field=models.BooleanField(blank=True, help_text="Présence ou possibilité d'une aide humaine au déplacement", null=True),
),
migrations.AlterField(
model_name='accessibilite',
name='accueil_personnels',
field=models.CharField(blank=True, choices=[(None, 'Inconnu'), ('aucun', 'Aucun personnel'), ('formés', 'Personnels sensibilisés et formés'), ('non-formés', 'Personnels non non-formés')], help_text="Présence et type de personnel d'accueil", max_length=255, null=True, verbose_name="Personnel d'accueil"),
),
migrations.AlterField(
model_name='accessibilite',
name='accueil_visibilite',
field=models.BooleanField(blank=True, help_text="La zone d'accueil est-elle visible depuis l'entrée ?", null=True, verbose_name="Visibilité de la zone d'accueil"),
),
migrations.AlterField(
model_name='accessibilite',
name='entree_reperage',
field=models.BooleanField(blank=True, help_text="Présence d'éléments de répérage de l'entrée", null=True, verbose_name="Repérage de l'entrée"),
),
migrations.AlterField(
model_name='cheminement',
name='devers',
field=models.CharField(blank=True, choices=[(None, 'Inconnu'), ('aucun', 'Aucun'), ('léger', 'Léger'), ('important', 'Important')], help_text='Inclinaison transversale du cheminement', max_length=15, null=True, verbose_name='Dévers'),
),
migrations.AlterField(
model_name='cheminement',
name='escalier_reperage',
field=models.BooleanField(blank=True, help_text='Si marches contrastées, bande d’éveil ou nez de marche contrastés, indiquez “Oui”', null=True, verbose_name="Repérage de l'escalier"),
),
migrations.AlterField(
model_name='cheminement',
name='pente',
field=models.CharField(blank=True, choices=[(None, 'Inconnu'), ('aucune', 'Aucune'), ('légère', 'Légère'), ('importante', 'Importante')], help_text='Présence et type de pente', max_length=15, null=True),
),
migrations.AlterField(
model_name='cheminement',
name='rampe',
field=models.CharField(blank=True, choices=[(None, 'Inconnu'), ('aucune', 'Aucune'), ('fixe', 'Fixe'), ('amovible', 'Amovible')], help_text='Présence et type de rampe', max_length=20, null=True),
),
migrations.AlterField(
model_name='cheminement',
name='type',
field=models.CharField(choices=[('stationnement_vers_erp', "Cheminement depuis le stationnement de l'ERP"), ('stationnement_ext_vers_erp', "Cheminement depuis le stationnement extérieur à l'ERP"), ('stationnement_vers_entree', "Cheminement du stationnement à l'entrée du bâtiment"), ('parcelle_vers_entree', "Cheminement depuis l'entrée de la parcelle de terrain à l'entrée du bâtiment"), ('entree_vers_accueil', "Cheminement de l'entrée du bâtiment à la zone d'accueil"), ('entree', "Cheminement autour de l'entrée")], default='entree', help_text='Type de cheminement', max_length=100, verbose_name='Cheminement'),
),
]
| 59.644068 | 627 | 0.651321 | 3,477 | 0.97395 | 0 | 0 | 0 | 0 | 0 | 0 | 1,643 | 0.460224 |
8d60c377538ddae6447654f6c37f24bae517225c | 3,629 | py | Python | convert.py | Ellen7ions/bin2mem | 51e3216cbf5e78547751968ef1619a925f2f55ef | [
"MIT"
]
| 3 | 2021-05-18T13:07:39.000Z | 2021-05-24T12:46:43.000Z | convert.py | Ellen7ions/bin2mem | 51e3216cbf5e78547751968ef1619a925f2f55ef | [
"MIT"
]
| null | null | null | convert.py | Ellen7ions/bin2mem | 51e3216cbf5e78547751968ef1619a925f2f55ef | [
"MIT"
]
| null | null | null | import os, sys
import json
class Config:
def __init__(self, config_path='./config.json'):
super(Config, self).__init__()
self.config_path = config_path
self.bin2mem_path = None
self.init_configs(json.load(open(config_path)))
def init_configs(self, json_data):
self.bin2mem_path = json_data['bin2mem.path']
Config.check_file_exists(self.bin2mem_path)
@staticmethod
def check_file_exists(file_name):
if not os.path.exists(file_name):
raise Exception(f'{file_name} not found!')
class Convert:
def __init__(self):
super(Convert, self).__init__()
self.config = Config()
self.FLAG_SAVE_FILES = False
self.FLAG_FILE_NAME = ''
self.FLAG_CLEAN_ALL = False
self.workspace_name = ''
self.file_name = ''
self.o_file_path = ''
self.bin_file_path = ''
self.coe_file_path = ''
self.init_flags()
self.make_workspace()
self.set_files_path()
def init_flags(self):
for i in sys.argv:
if i == '-s':
self.FLAG_SAVE_FILES = True
if i.endswith('.s'):
self.FLAG_FILE_NAME = i
if i == 'clean':
self.FLAG_CLEAN_ALL = True
if self.FLAG_FILE_NAME == '':
if os.path.exists('main.s'):
self.FLAG_FILE_NAME = 'main.s'
else:
raise Exception('Where is your input file :(')
self.workspace_name = self.FLAG_FILE_NAME[:-2]
self.file_name = self.FLAG_FILE_NAME[:-2]
def make_workspace(self):
if not os.path.exists(self.workspace_name):
os.mkdir(self.workspace_name)
def set_files_path(self):
self.o_file_path = f'.\\{self.workspace_name}\\{self.file_name}.o'
self.bin_file_path = f'.\\{self.workspace_name}\\{self.file_name}.bin'
self.coe_file_path = f'.\\{self.workspace_name}\\{self.file_name}.txt'
def mips_gcc_c(self):
os.system(f'mips-sde-elf-gcc -c {self.FLAG_FILE_NAME} -o {self.o_file_path}')
def mips_objcopy(self):
os.system(f'mips-sde-elf-objcopy -O binary {self.o_file_path} {self.bin_file_path}')
def mips_bin2mem(self):
os.system(f'{self.config.bin2mem_path} {self.bin_file_path} {self.coe_file_path}')
def clean_process_files(self):
try:
Config.check_file_exists(self.o_file_path)
os.system(f'del {self.o_file_path}')
except Exception as e:
pass
try:
Config.check_file_exists(self.bin_file_path)
os.system(f'del {self.bin_file_path}')
except Exception as e:
pass
def run(self):
self.mips_gcc_c()
self.mips_objcopy()
self.mips_bin2mem()
def clean(self):
self.clean_process_files()
try:
Config.check_file_exists(self.coe_file_path)
os.system(f'del {self.coe_file_path}')
except Exception as e:
pass
os.removedirs(self.workspace_name)
def mips_objdump(self):
if os.path.exists(self.o_file_path):
os.system(f'mips-sde-elf-objdump -d {self.o_file_path}')
def apply(self):
if self.FLAG_CLEAN_ALL:
self.clean()
return
self.run()
if not self.FLAG_SAVE_FILES:
self.clean_process_files()
return
self.mips_objdump()
if __name__ == '__main__':
c = Convert()
c.apply()
# c.mips_gcc_c()
# c.mips_objcopy()
# c.mips_bin2mem()
# config = Config()
| 28.801587 | 92 | 0.590245 | 3,444 | 0.949022 | 0 | 0 | 148 | 0.040783 | 0 | 0 | 688 | 0.189584 |
8d61a4b35ddf035024fe7d951c745cb83a2a9d4d | 3,161 | py | Python | stats.py | DisinfoResearch/TwitterCollector | 183b6761cca54b5db5b98a2f9f86bd8bcc98a7cb | [
"MIT"
]
| null | null | null | stats.py | DisinfoResearch/TwitterCollector | 183b6761cca54b5db5b98a2f9f86bd8bcc98a7cb | [
"MIT"
]
| null | null | null | stats.py | DisinfoResearch/TwitterCollector | 183b6761cca54b5db5b98a2f9f86bd8bcc98a7cb | [
"MIT"
]
| null | null | null | #!/bin/python3
# Copyright (C) 2021, Michigan State University.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import csv
import json
import argparse
import sys
import datetime
from dateutil.parser import parse
def calc_row(u):
created_date = parse(u['created_at'])
t = today - created_date.date()
# Prevent divide by zero
ff_ratio = 0
if int(u['friends_count']) != 0: ff_ratio = int(u['followers_count'])/int(u['friends_count'])
# Force conversions to int, as you never know with Twitter
return {'Twitter_ID':u['id'], 'Handle':u['screen_name'], 'Followed':u['friends_count'], 'Followers':u['followers_count'], 'Followers/Followed':ff_ratio, 'Tweets':u['statuses_count'], 'Days_old':int(t.days), 'Tweets/Days_old':int(u['statuses_count'])/int(t.days), 'Followers/Days_old':int(u['followers_count'])/int(t.days)}
def process_csv(inp, out):
# Uses a Tuple to ensure a specific column order
csv_writer = csv.DictWriter(out, fieldnames=('Twitter_ID', 'Handle', 'Followed', 'Followers', 'Followers/Followed', 'Tweets', 'Days_old', 'Tweets/Days_old', 'Followers/Days_old'))
csv_writer.writeheader()
for line in inp:
csv_writer.writerow(calc_row(json.loads(line)))
def process_json(inp, out):
for line in inp:
j = json.loads(line)
out.write(json.dumps(calc_row(j))+"\n")
parser = argparse.ArgumentParser(description='Convert JSON to CSV', epilog='P.S. Trust The Plan')
parser.add_argument('--format', help='either JSON or CSV', required=True)
parser.add_argument('input', help='JSON File, or stdin if not specified', type=argparse.FileType('r', encoding='utf-8'), default=sys.stdin)
parser.add_argument('output', help='output to File, or stdout if not specified', type=argparse.FileType('w', encoding='utf-8'), default=sys.stdout)
args = parser.parse_args()
today = datetime.date.today()
if args.format.upper() == 'CSV':
process_csv(args.input, args.output)
elif args.format.upper() == 'JSON':
process_json(args.input, args.output)
else:
print(f"Error: '{args.format}' is an invalid format, must be CSV or JSON.", end="\n\n")
parser.print_help()
exit(-1) | 45.157143 | 326 | 0.726669 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,898 | 0.600443 |
8d61d1b5d6b0de975b9d576cfadcd886cc44204a | 10,970 | py | Python | Scratch/lstm.py | imadtoubal/MultimodalDeepfakeDetection | 46539e16c988ee9fdfb714893788bbbf72836595 | [
"MIT"
]
| 2 | 2022-03-12T09:18:13.000Z | 2022-03-23T08:29:10.000Z | Scratch/lstm.py | imadtoubal/MultimodalDeepfakeDetection | 46539e16c988ee9fdfb714893788bbbf72836595 | [
"MIT"
]
| null | null | null | Scratch/lstm.py | imadtoubal/MultimodalDeepfakeDetection | 46539e16c988ee9fdfb714893788bbbf72836595 | [
"MIT"
]
| null | null | null | import torch
from torch import nn
import torch.nn.functional as F
import torch.optim as optim
from preprocess import *
from torch.utils.data import Dataset, DataLoader
from blazeface import BlazeFace
import os
import cv2
import numpy as np
from matplotlib import pyplot as plt
import random
import pickle
DATA_FOLDER = '../input/deepfake-detection-challenge'
TRAIN_SAMPLE_FOLDER = 'train_sample_videos'
TEST_FOLDER = 'test_videos'
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
NET = BlazeFace().to(device)
NET.load_weights("../input/blazeface.pth")
NET.load_anchors("../input/anchors.npy")
class MyLSTM(nn.Module):
def __init__(self, num_layers=2, num_hidden_nodes=512):
super(MyLSTM, self).__init__()
self.num_layers = num_layers
self.num_hidden_nodes = num_hidden_nodes
# input dim is 167, output 200
self.lstm = nn.LSTM(167, num_hidden_nodes,
batch_first=True, num_layers=num_layers)
# fully connected
self.fc1 = nn.Linear(num_hidden_nodes, num_hidden_nodes)
self.act = nn.Sigmoid()
self.fc2 = nn.Linear(num_hidden_nodes, 2)
self.softmax = nn.Softmax()
def forward(self, x, hidden):
y, hidden = self.lstm(x, hidden) # returns the two outputs
y = y[:, -1, :] # get only the last output
y = self.fc1(y)
y = self.fc2(y)
y = F.softmax(y, dim=1)
return y, hidden
def init_hidden(self, batch_size):
weight = next(self.parameters()).data
hidden = (weight.new(self.num_layers, batch_size, self.num_hidden_nodes).zero_(),
weight.new(self.num_layers, batch_size, self.num_hidden_nodes).zero_())
return hidden
class FourierDataset(Dataset):
def __init__(self, data):
"""
data: a list of (label: string, fourier_data: numpy array, name: string)
"""
self.data = []
for elt in data:
label, spects, name = elt
label = torch.tensor(0 if label == 'FAKE' else 1)
# Moving window sequence generation without overalap
# other ideas: 1. Random sampling, 2. Moving qindow with overlap
# this data will be shuffled
for i in range(0, 24 * (spects.shape[0] // 24), 24):
spect = torch.tensor(spects[i:i+24, :])
self.data.append((spect, label))
def __getitem__(self, idx):
return self.data[idx] # spect (24, 167), label (2)
def __len__(self):
return len(self.data)
sequence = 24 # 1 sec of video
feature_size = 167 # length of spatial frequency
def read_video(filename):
vidcap = cv2.VideoCapture(filename)
success, image = vidcap.read()
count = 0
images = []
while success:
tiles, resize_info = stride_search(image)
detections = NET.predict_on_image(tiles[1])
blazeface_endpoints = get_face_endpoints(tiles[1], detections)[
0] # take the first face only
# we need to resize them on the original image and get the amount shifted to prevent negative values
# in this case it will be 1080
split_size = 128 * resize_info[1]
# determine how much we shifted for this tile
x_shift = (image.shape[1] - split_size) // 2
face_endpoints = (int(blazeface_endpoints[0] * resize_info[0]),
int(blazeface_endpoints[1] *
resize_info[0] + x_shift),
int(blazeface_endpoints[2] * resize_info[0]),
int(blazeface_endpoints[3] * resize_info[0] + x_shift))
# next we need to expand the rectangle to be 240, 240 pixels (for this training example)
# we can do this equally in each direction, kind of
face_width = face_endpoints[3] - face_endpoints[1]
face_height = face_endpoints[2] - face_endpoints[0]
buffer = 20
face_box = image[max(0, face_endpoints[0] - buffer): min(face_endpoints[2] + buffer, image.shape[0]),
max(0, face_endpoints[1] - buffer): min(face_endpoints[3] + buffer, image.shape[1])]
# print(face_box.shape) # almost a square or very close to it
face = cv2.resize(face_box, (240, 240))
images.append(face)
# cv2.imshow("face", face)
success, image = vidcap.read()
count += 1
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if images:
return np.stack(images)
def get_spects(vid):
spects = []
for i in range(vid.shape[0]):
img = vid[i]
spects.append(fourier_tranform(img, ''))
return np.stack(spects)
def get_face_endpoints(img, detections, with_keypoints=False):
if isinstance(detections, torch.Tensor):
detections = detections.cpu().numpy()
if detections.ndim == 1:
detections = np.expand_dims(detections, axis=0)
detected_faces_endpoints = []
for i in range(detections.shape[0]): # dependent on number of faces found
ymin = detections[i, 0] * img.shape[0]
xmin = detections[i, 1] * img.shape[1]
ymax = detections[i, 2] * img.shape[0]
xmax = detections[i, 3] * img.shape[1]
detected_faces_endpoints.append((ymin, xmin, ymax, xmax))
cv2.rectangle(img, (int(xmin), int(ymin)),
(int(xmax), int(ymax)), (0, 0, 255), 2)
if with_keypoints:
for k in range(6):
kp_x = detections[i, 4 + k*2] * img.shape[1]
kp_y = detections[i, 4 + k*2 + 1] * img.shape[0]
circle = patches.Circle((kp_x, kp_y), radius=0.5, linewidth=1,
edgecolor="lightskyblue", facecolor="none",
alpha=detections[i, 16])
return detected_faces_endpoints
def prepare_data():
# Here we check the train data files extensions.
train_list = list(os.listdir(
os.path.join(DATA_FOLDER, TRAIN_SAMPLE_FOLDER)))
ext_dict = []
for file in train_list:
file_ext = file.split('.')[1]
if (file_ext not in ext_dict):
ext_dict.append(file_ext)
print(f"Extensions: {ext_dict}")
# Let's count how many files with each extensions there are.
for file_ext in ext_dict:
print(
f"Files with extension `{file_ext}`: {len([file for file in train_list if file.endswith(file_ext)])}")
test_list = list(os.listdir(os.path.join(DATA_FOLDER, TEST_FOLDER)))
ext_dict = []
for file in test_list:
file_ext = file.split('.')[1]
if (file_ext not in ext_dict):
ext_dict.append(file_ext)
print(f"Extensions: {ext_dict}")
for file_ext in ext_dict:
print(
f"Files with extension `{file_ext}`: {len([file for file in train_list if file.endswith(file_ext)])}")
json_file = [file for file in train_list if file.endswith('json')][0]
print(f"JSON file: {json_file}")
meta_train_df = get_meta_from_json(TRAIN_SAMPLE_FOLDER, json_file)
meta_train_df.head()
fake_train_sample_video = list(
meta_train_df.loc[meta_train_df.label == 'FAKE'].sample(90).index)
real_train_sample_video = list(
meta_train_df.loc[meta_train_df.label == 'REAL'].index)
training_data = []
for video_file in fake_train_sample_video:
try:
data = process_video_data(os.path.join(
DATA_FOLDER, TRAIN_SAMPLE_FOLDER, video_file))
training_data.append(('FAKE', data, video_file)) # (X, 24, 167)
except:
continue
for video_file in real_train_sample_video:
try:
data = process_video_data(os.path.join(
DATA_FOLDER, TRAIN_SAMPLE_FOLDER, video_file))
training_data.append(('REAL', data, video_file))
except:
continue
random.shuffle(training_data)
with open('train_data.txt', 'wb') as fp: # pickling
pickle.dump(training_data, fp)
return training_data
def read_data():
with open("train_data.txt", "rb") as fp: # Unpickling
training_data = pickle.load(fp)
return training_data
def process_video_data(video_file):
stack = read_video(video_file)
stack = stack.mean(axis=-1) / 255
return get_spects(stack)
def prepare_spect(spect):
return torch.tensor(spect)
def convert_scores(label):
return torch.tensor([1, 0]) if label == 'FAKE' else torch.tensor([0, 1])
def train(training_data):
batch_size = 69
model = MyLSTM()
loss_function = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
training_data = FourierDataset(training_data)
trainloader = DataLoader(
training_data, batch_size=batch_size, shuffle=True)
hidden = model.init_hidden(batch_size)
print_every = 10
for epoch in range(100): # again, normally you would NOT do 100 epochs, it is toy data
running_loss = 0.0
running_acc = 0.0
i = 0
for inp, labels in trainloader: # renamed sequence to inp because inp is a batch of sequences
# Step 1. Remember that Pytorch accumulates gradients.
# We need to clear them out before each instance
model.zero_grad()
inp = inp.float()
# Step 2. Run our forward pass.
tag_scores, h = model(inp, hidden)
# Step 3. Compute the loss, gradients, and update the parameters by
# calling optimizer.step()
loss = loss_function(tag_scores, labels)
loss.backward()
optimizer.step()
running_acc += torch.mean((tag_scores.argmax(dim=1)
== labels).float()).item()
# print statistics
running_loss += loss.item()
if i % print_every == print_every-1:
print('[%d, %5d] loss: %.3f - acc: %.3f' %
(epoch + 1, i + 1, running_loss / print_every, running_acc * 100 / print_every))
running_loss = 0.0
running_acc = 0.0
i += 1
def main():
# prepare_data()
'''
stack = read_video(os.path.join(DATA_FOLDER, TRAIN_SAMPLE_FOLDER, 'aagfhgtpmv.mp4'))
print(stack.shape)
stack = stack.mean(axis=-1) / 255
spects = get_spects(stack)
# print(spects.shape)
print(spects[0])
plt.plot(spects[0])
plt.xlabel('Spatial Frequency')
plt.ylabel('Power Spectrum')
plt.show()
'''
training_data = read_data()
train(training_data)
if __name__ == '__main__':
main()
| 34.388715 | 116 | 0.591978 | 1,994 | 0.181768 | 0 | 0 | 0 | 0 | 0 | 0 | 2,369 | 0.215953 |
8d63217e5fdc8f7f711034a43dd2b7d398591281 | 18,373 | py | Python | analysis/plot/python/plot_groups/estimator.py | leozz37/makani | c94d5c2b600b98002f932e80a313a06b9285cc1b | [
"Apache-2.0"
]
| 1,178 | 2020-09-10T17:15:42.000Z | 2022-03-31T14:59:35.000Z | analysis/plot/python/plot_groups/estimator.py | leozz37/makani | c94d5c2b600b98002f932e80a313a06b9285cc1b | [
"Apache-2.0"
]
| 1 | 2020-05-22T05:22:35.000Z | 2020-05-22T05:22:35.000Z | analysis/plot/python/plot_groups/estimator.py | leozz37/makani | c94d5c2b600b98002f932e80a313a06b9285cc1b | [
"Apache-2.0"
]
| 107 | 2020-09-10T17:29:30.000Z | 2022-03-18T09:00:14.000Z | # Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Plots relating to the estimator."""
from makani.analysis.plot.python import mplot
from makani.avionics.common import plc_messages
from makani.control import control_types
from makani.lib.python import c_helpers
from makani.lib.python.h5_utils import numpy_utils
from matplotlib.pyplot import plot
from matplotlib.pyplot import yticks
import numpy as np
from scipy import interpolate
MFig = mplot.PlotGroup.MFig # pylint: disable=invalid-name
_WING_GPS_RECEIVER_HELPER = c_helpers.EnumHelper(
'WingGpsReceiver', control_types)
_GROUND_STATION_MODE_HELPER = c_helpers.EnumHelper(
'GroundStationMode', plc_messages)
def _QuatToVec(q):
dims = ['q0', 'q1', 'q2', 'q3']
return np.array([q[d] for d in dims])
class Plots(mplot.PlotGroup):
"""Plots of the estimator."""
@MFig(title='Filtered Velocity', ylabel='Velocity [m/s]', xlabel='Time [s]')
def PlotFilteredVelocity(self, e, c, s, params):
mplot.PlotVec3(c['time'], c['state_est']['Vg'], label='Vg', linestyle='-')
mplot.PlotVec3(c['time'], c['state_est']['Vg_f'], label='Vg_f',
linestyle='-.')
if s is not None:
mplot.PlotVec3(s['time'], s['wing']['Vg'], label='sim', linestyle=':')
@MFig(title='Acc Norm f', ylabel='Acc. [m/s^2]', xlabel='Time [s]')
def PlotAccNormF(self, e, c, s, params, imu_index=0):
plot(c['time'], c['state_est']['acc_norm_f'])
@MFig(title='Gyros', ylabel='Rate [rad/s]', xlabel='Time [s]')
def PlotGyros(self, e, c, s, params, imu_index=0):
mplot.PlotVec3(c['time'], c['control_input']['imus']['gyro'][:, imu_index])
@MFig(title='Filtered Body Rates', ylabel='Rate [rad/s]', xlabel='Time [s]')
def PlotBodyRates(self, e, c, s, params):
mplot.PlotVec3(c['time'], c['state_est']['pqr_f'])
@MFig(title='Attitude Error', ylabel='Error [deg]', xlabel='Time [s]')
def PlotAttitudeError(self, e, c, s, params):
for imu_index in range(3):
if s is not None:
dims = ['q0', 'q1', 'q2', 'q3']
q_s = {d: np.zeros(c['time'].shape) for d in dims}
for d in dims:
q_s[d] = interpolate.interp1d(s['time'], s['wing']['q'][d],
bounds_error=False)(c['time'])
q_s = _QuatToVec(q_s)
if 'q_g2b' in e.dtype.names:
q_c = e['q_g2b'][:, imu_index]
q_c = _QuatToVec(q_c)
plot(c['time'], np.rad2deg(
np.arccos(1.0 - 2.0 * (1.0 - np.sum(q_c * q_s, axis=0)**2.0))),
label='Imu %d' % imu_index)
if 'mahony_states' in e.dtype.names:
q_c = e['mahony_states']['q'][:, imu_index]
q_c = _QuatToVec(q_c)
plot(c['time'], np.rad2deg(
np.arccos(1.0 - 2.0 * (1.0 - np.sum(q_c * q_s, axis=0)**2.0))),
label='Imu %d' % imu_index)
@MFig(title='Gyro Biases', ylabel='Biases [rad/s]', xlabel='Time [s]')
def PlotGyroBiases(self, e, c, s, params, imu_index=0):
mplot.PlotVec3(c['time'], e['gyro_biases'][:, imu_index],
label='IMU %d' % imu_index)
if s is not None:
mplot.PlotVec3(s['time'], s['imus']['gyro_bias_b'][:, imu_index],
linestyle=':')
@MFig(title='Acc Biases', ylabel='Biases [m/s^1]', xlabel='Time [s]')
def PlotAccBiases(self, e, c, s, params, imu_index=0):
mplot.PlotVec3(c['time'], e['acc_b_estimates'][:, imu_index],
label='IMU %d' % imu_index)
@MFig(title='Air Speed', ylabel='Speed [m/s]', xlabel='Time [s]')
def PlotAirspeed(self, e, c, s, params):
plot(c['time'], c['state_est']['apparent_wind']['sph']['v'], 'b',
label='est')
plot(c['time'], c['state_est']['apparent_wind']['sph_f']['v'], 'g',
label='filt')
if s is not None:
plot(s['time'], s['wing']['apparent_wind_b']['v'], 'b:', label='sim')
@MFig(title='Magnetometer', ylabel='Field [Gauss]', xlabel='Time [s]')
def PlotMagnetometer(self, e, c, s, params):
mplot.PlotVec3(c['time'], c['control_input']['imus']['mag'][:, 0],
linestyle='-', label='A')
mplot.PlotVec3(c['time'], c['control_input']['imus']['mag'][:, 1],
linestyle=':', label='B')
mplot.PlotVec3(c['time'], c['control_input']['imus']['mag'][:, 2],
linestyle='-.', label='C')
@MFig(title='Specific Force', ylabel='Specific Force [m/s^2]',
xlabel='Time [s]')
def PlotAccelerometer(self, e, c, s, params):
mplot.PlotVec3(c['time'], c['control_input']['imus']['acc'][:, 0],
linestyle='-', label='A')
mplot.PlotVec3(c['time'], c['control_input']['imus']['acc'][:, 1],
linestyle=':', label='B')
mplot.PlotVec3(c['time'], c['control_input']['imus']['acc'][:, 2],
linestyle='-.', label='C')
@MFig(title='Accel.', ylabel='Specific Force [m/s^2]', xlabel='Time [s]')
def PlotSpecificForce(self, e, c, s, params):
mplot.PlotVec3(c['time'], c['estimator']['acc_b_estimates'][:, 0],
linestyle='-', label='A')
mplot.PlotVec3(c['time'], c['estimator']['acc_b_estimates'][:, 1],
linestyle=':', label='B')
mplot.PlotVec3(c['time'], c['estimator']['acc_b_estimates'][:, 2],
linestyle='-.', label='C')
@MFig(title='Magnetometer Diff', ylabel='Field [Gauss]', xlabel='Time [s]')
def PlotMagnetometerDiff(self, e, c, s, params, dimension='x'):
plot(c['time'],
c['control_input']['imus']['mag'][dimension][:, 0]
- c['control_input']['imus']['mag'][dimension][:, 1], 'b',
label='A-B ' + dimension)
plot(c['time'],
c['control_input']['imus']['mag'][dimension][:, 1]
- c['control_input']['imus']['mag'][dimension][:, 2], 'g',
label='B-C ' + dimension)
plot(c['time'],
c['control_input']['imus']['mag'][dimension][:, 2]
- c['control_input']['imus']['mag'][dimension][:, 0], 'r',
label='C-A ' + dimension)
@MFig(title='Current GPS', ylabel='GPS Receiver', xlabel='Time [s]')
def PlotGpsReceiver(self, e, c, s, params):
plot(c['time'], e['current_gps_receiver'], label='current_receiver')
yticks(_WING_GPS_RECEIVER_HELPER.Values(),
_WING_GPS_RECEIVER_HELPER.ShortNames())
def _PlotGpsPositionEcefChannel(self, c, d):
sigma = c['control_input']['wing_gps']['pos_sigma'][d]
wing_gps_pos = np.array(c['control_input']['wing_gps']['pos'][d])
wing_gps_pos[wing_gps_pos == 0] = float('nan')
plot(c['time'], wing_gps_pos[:, 0], 'b', label='0:%s ECEF' % d)
plot(c['time'], wing_gps_pos[:, 0] + sigma[:, 0], 'b:')
plot(c['time'], wing_gps_pos[:, 0] - sigma[:, 0], 'b:')
plot(c['time'], wing_gps_pos[:, 1], 'g', label='1:%s ECEF' % d)
plot(c['time'], wing_gps_pos[:, 1] + sigma[:, 1], 'g:')
plot(c['time'], wing_gps_pos[:, 1] - sigma[:, 1], 'g:')
@MFig(title='GPS Position ECEF', ylabel='Position [m]', xlabel='Time [s]')
def PlotGpsPositionEcefX(self, e, c, s, params):
self._PlotGpsPositionEcefChannel(c, 'x')
@MFig(title='GPS Position ECEF', ylabel='Position [m]', xlabel='Time [s]')
def PlotGpsPositionEcefY(self, e, c, s, params):
self._PlotGpsPositionEcefChannel(c, 'y')
@MFig(title='GPS Position ECEF', ylabel='Position [m]', xlabel='Time [s]')
def PlotGpsPositionEcefZ(self, e, c, s, params):
self._PlotGpsPositionEcefChannel(c, 'z')
@MFig(title='Kite Velocity Sigma', ylabel='Sigma Velocity [m/s]',
xlabel='Time [s]')
def PlotVelocitySigmas(self, e, c, s, params, plot_glas=True):
if 'cov_vel_g' in e.dtype.names:
plot(c['time'], e['cov_vel_g']['x']**0.5, 'b', label='Vg_x est')
plot(c['time'], e['cov_vel_g']['y']**0.5, 'g', label='Vg_y est')
plot(c['time'], e['cov_vel_g']['z']**0.5, 'r', label='Vg_z est')
if 'gps' in e.dtype.names:
aux_indices = np.argwhere(e['current_gps_receiver'] == 1)
vg = e['gps']['sigma_Vg'][:, 0]
vg[aux_indices] = e['gps']['sigma_Vg'][aux_indices, 1]
plot(c['time'], vg['x'], 'b-.', label='Vg_x gps')
plot(c['time'], vg['y'], 'g-.', label='Vg_y gps')
plot(c['time'], vg['z'], 'r-.', label='Vg_z gps')
@MFig(title='Kite Position Sigma', ylabel='Sigma Position [m]',
xlabel='Time [s]')
def PlotPositionSigmas(self, e, c, s, params, plot_glas=True):
if 'cov_vel_g' in e.dtype.names:
plot(c['time'], e['cov_pos_g']['x']**0.5, 'b', label='Xg_x est')
plot(c['time'], e['cov_pos_g']['y']**0.5, 'g', label='Xg_y est')
plot(c['time'], e['cov_pos_g']['z']**0.5, 'r', label='Xg_z est')
if 'gps' in e.dtype.names:
aux_indices = np.argwhere(e['current_gps_receiver'] == 1)
xg = e['gps']['sigma_Xg'][:, 0]
xg[aux_indices] = e['gps']['sigma_Xg'][aux_indices, 1]
plot(c['time'], xg['x'], 'b-.', label='Xg_x gps')
plot(c['time'], xg['y'], 'g-.', label='Xg_y gps')
plot(c['time'], xg['z'], 'r-.', label='Xg_z gps')
@MFig(title='Kite Velocity', ylabel='Velocity [m/s]', xlabel='Time [s]')
def PlotVelocity(self, e, c, s, params, plot_glas=True):
plot(c['time'], c['state_est']['Vg']['x'], 'b', label='Vg_x est')
plot(c['time'], c['state_est']['Vg']['y'], 'g', label='Vg_y est')
plot(c['time'], c['state_est']['Vg']['z'], 'r', label='Vg_z est')
if 'Vg_gps' in e.dtype.names:
plot(c['time'], e['Vg_gps']['x'], 'b-.', label='Vg_x gps')
plot(c['time'], e['Vg_gps']['y'], 'g-.', label='Vg_y gps')
plot(c['time'], e['Vg_gps']['z'], 'r-.', label='Vg_z gps')
if 'gps' in e.dtype.names:
aux_indices = np.argwhere(e['current_gps_receiver'] == 1)
vg = e['gps']['Vg'][:, 0]
vg[aux_indices] = e['gps']['Vg'][aux_indices, 1]
plot(c['time'], vg['x'], 'b-.', label='Vg_x gps')
plot(c['time'], vg['y'], 'g-.', label='Vg_y gps')
plot(c['time'], vg['z'], 'r-.', label='Vg_z gps')
if plot_glas and 'Vg_glas' in e.dtype.names:
plot(c['time'], e['Vg_glas']['x'], 'b:', label='Vg_x glas')
plot(c['time'], e['Vg_glas']['y'], 'g:', label='Vg_y glas')
plot(c['time'], e['Vg_glas']['z'], 'r:', label='Vg_z glas')
if s is not None:
plot(s['time'], s['wing']['Vg']['x'], 'b-o', label='Vg_x sim')
plot(s['time'], s['wing']['Vg']['y'], 'g-o', label='Vg_y sim')
plot(s['time'], s['wing']['Vg']['z'], 'r-o', label='Vg_z sim')
@MFig(title='Payout', ylabel='Payout [m]', xlabel='Time [s]')
def PlotPayout(self, e, c, s, params):
plot(c['time'], c['state_est']['winch']['payout'], label='Payout')
@MFig(title='Tension', ylabel='Tension [N]', xlabel='Time [s]')
def PlotTension(self, e, c, s, params):
plot(c['time'], c['state_est']['tether_force_b']['sph']['tension'],
label='Tension est')
@MFig(title='Tether Angles', ylabel='Angles [deg]', xlabel='Time [s]')
def PlotTetherAngles(self, e, c, s, params):
plot(c['time'],
np.rad2deg(c['state_est']['tether_force_b']['sph']['roll']),
label='Tether Roll')
plot(c['time'],
np.rad2deg(c['state_est']['tether_force_b']['sph']['pitch']),
label='Tether Pitch')
@MFig(title='Kite Position', ylabel='Position [m]', xlabel='Time [s]')
def PlotPosition(self, e, c, s, params, plot_glas=True):
for (d, clr) in [('x', 'b'), ('y', 'g'), ('z', 'r')]:
plot(c['time'], c['state_est']['Xg'][d], clr, label='Xg_%s est' % d)
plot(c['time'],
c['state_est']['Xg'][d] + c['estimator']['cov_pos_g'][d]**0.5,
clr+':', label='Xg_%s est' % d)
plot(c['time'],
c['state_est']['Xg'][d] - c['estimator']['cov_pos_g'][d]**0.5,
clr+':', label='Xg_%s est' % d)
plot(c['time'], e['gps']['Xg'][d][:], clr+'--', label='Xg_%s gps' % d)
plot(c['time'], e['gps']['Xg'][d][:]
+ e['gps']['sigma_Xg'][d][:], clr+':', label='Xg_%s gps' % d)
plot(c['time'], e['gps']['Xg'][d][:]
- e['gps']['sigma_Xg'][d][:], clr+':', label='Xg_%s gps' % d)
if plot_glas:
plot(c['time'], e['glas']['Xg'][d][:], clr+'-.', label='Xg_%s glas' % d)
plot(c['time'], e['glas']['Xg'][d][:]
+ e['glas']['sigma_Xg'][d][:], clr+':', label='Xg_%s glas' % d)
plot(c['time'], e['glas']['Xg'][d][:]
- e['glas']['sigma_Xg'][d][:], clr+':', label='Xg_%s glas' % d)
clr = 'r' # z-color from above loop
plot(c['time'], e['baro']['Xg_z'], clr+'-*', label='Xg_z baro')
if s is not None:
plot(s['time'], s['wing']['Xg']['x'], 'b-o', label='Xg_x sim')
plot(s['time'], s['wing']['Xg']['y'], 'g-o', label='Xg_y sim')
plot(s['time'], s['wing']['Xg']['z'], 'r-o', label='Xg_z sim')
@MFig(title='GSG Biases', ylabel='Angles [deg]', xlabel='Time [s]')
def PlotGsgBias(self, e, c, s, params):
plot(c['time'], np.rad2deg(e['gsg_bias']['azi']), 'b', label='Azi Bias')
plot(c['time'], np.rad2deg(e['gsg_bias']['ele']), 'g', label='Ele Bias')
@MFig(title='GPS Bias', ylabel='Position [m]', xlabel='Time [s]')
def PlotGpsBias(self, e, c, s, params):
mplot.PlotVec3(c['time'], e['Xg_gps_biases'][:, 0], label='GPS A bias')
mplot.PlotVec3(c['time'], e['Xg_gps_biases'][:, 1], label='GPS B bias')
@MFig(title='Wind Speed', ylabel='Wind Speed [m/s]', xlabel='Time [s]')
def PlotWindSpeed(self, e, c, s, params):
if s is not None:
wind_g = s['wind_sensor']['wind_g']
plot(s['time'], numpy_utils.Vec3Norm(wind_g), 'C1--',
label='wind speed at wind sensor [sim]')
wind_g = s['wing']['wind_g']
plot(s['time'], numpy_utils.Vec3Norm(wind_g), 'C2:',
label='wind speed at kite [sim]')
# Plot the estimated wind speed last so that it will be on top.
plot(c['time'], c['state_est']['wind_g']['speed_f'], 'C0-',
linewidth=2, label='wind speed [est]')
@MFig(title='Kite Azimuth', ylabel='Azimuth [deg]', xlabel='Time [s]')
def PlotKiteAzimuth(self, e, c, s, params):
xg = c['state_est']['Xg']
plot(c['time'], np.rad2deg(np.arctan2(xg['y'], xg['x'])), 'b')
@MFig(title='Wind Direction (FROM)', ylabel='Direction [deg]',
xlabel='Time [s]')
def PlotWindDir(self, e, c, s, params):
if s is not None:
wind_g = s['wind_sensor']['wind_g']
plot(s['time'],
np.rad2deg(np.arctan2(-wind_g['y'], -wind_g['x'])), 'C1--',
label='wind direction at wind sensor [sim]')
wind_g = s['wing']['wind_g']
plot(s['time'],
np.rad2deg(np.arctan2(-wind_g['y'], -wind_g['x'])), 'C1--',
label='wind direction at kite [sim]')
# The estimator's "dir_f" is the TO direction. Here we convert to a
# FROM direction.
dir_f = np.rad2deg(c['state_est']['wind_g']['dir_f']) + 180.0
dir_f[dir_f > 360.0] -= 360.0
# Plot the estimated wind speed last so that it will be on top.
plot(c['time'], dir_f, 'C0-',
linewidth=2, label='wind direction [est]')
@MFig(title='Tether Elevation', ylabel='[deg]', xlabel='Time [s]')
def PlotTetherElevation(self, e, c, s, params):
elevation = c['state_est']['tether_ground_angles']['elevation']
elevation[np.logical_not(
c['state_est']['tether_ground_angles']['elevation_valid']
)] = float('nan')
plot(c['time'], np.rad2deg(elevation), label='Est')
if s is not None:
plot(s['time'], np.rad2deg(s['tether']['Xv_start_elevation']), '--',
label='Sim')
@MFig(title='Ground Station Mode', ylabel='Mode [enum]', xlabel='Time [s]')
def PlotGroundStationMode(self, e, c, s, params):
plot(c['time'], c['control_input']['gs_sensors']['mode'], label='ci')
plot(c['time'], c['state_est']['gs_mode'], label='est')
if s is not None:
plot(s['time'], s['gs02']['mode'], '-.', label='Sim')
yticks(_GROUND_STATION_MODE_HELPER.Values(),
_GROUND_STATION_MODE_HELPER.ShortNames())
@MFig(title='Ground Station Transform Stage', ylabel='Stage [#]',
xlabel='Time [s]')
def PlotGroundStationTransformStage(self, e, c, s, params):
plot(c['time'], c['control_input']['gs_sensors']['transform_stage'],
label='ci')
plot(c['time'], c['state_est']['gs_transform_stage'], label='est')
if s is not None:
# This value is not yet in simulator telemetry.
pass
# TODO: Create separate 'simulator' plot group.
@MFig(title='Moments', ylabel='Nm', xlabel='Time [s]')
def PlotKiteMoments(self, e, c, s, params, axis='y'):
for what in ['aero', 'gravity', 'tether', 'rotors',
'disturb', 'blown_wing', 'total']:
plot(s['time'], s['wing']['fm_'+what]['moment'][axis], label='fm_'+what)
@MFig(title='Kite Azimuth and Elevation', ylabel='Angle [deg]',
xlabel='Time [s]')
def PlotKiteAzimuthAndElevation(self, e, c, s, params):
wing_pos_g = s['wing']['Xg']
plot(s['time'], np.rad2deg(np.arctan2(wing_pos_g['y'], wing_pos_g['x'])),
label='kite azimuth')
plot(s['time'], np.rad2deg(np.arctan2(-wing_pos_g['z'],
np.hypot(wing_pos_g['x'],
wing_pos_g['y']))),
label='kite elevation')
@MFig(title='Air Density (Measured at Ground Station)',
ylabel='Density [kg/m^3]', xlabel='Time [s]')
def PlotDensity(self, e, c, s, params):
plot(c['time'], c['state_est']['rho'], label='state_est.rho')
plot(c['time'],
np.full_like(c['time'],
params['system_params']['phys']['rho']),
label='hard-coded value')
@MFig(title='Tether Anchor Point', ylabel='[m]', xlabel='Time [s]')
def PlotTetherAnchorPoint(self, e, c, s, params):
mplot.PlotVec3(c['time'], c['state_est']['tether_anchor']['pos_g'],
label='pos_g [est]', linestyle='-')
mplot.PlotVec3(c['time'], c['state_est']['tether_anchor']['pos_g_f'],
label='pos_g_f [est]', linestyle='--')
| 46.047619 | 80 | 0.56828 | 17,057 | 0.928373 | 0 | 0 | 16,202 | 0.881837 | 0 | 0 | 6,807 | 0.370489 |
8d633804dd70bc9958af00b42a11e0de38e402fd | 4,122 | py | Python | scripts/old/modbus_server.py | SamKaiYang/ros_modbus_nex | b698cc73df65853866112f7501432a8509a2545c | [
"BSD-2-Clause"
]
| null | null | null | scripts/old/modbus_server.py | SamKaiYang/ros_modbus_nex | b698cc73df65853866112f7501432a8509a2545c | [
"BSD-2-Clause"
]
| null | null | null | scripts/old/modbus_server.py | SamKaiYang/ros_modbus_nex | b698cc73df65853866112f7501432a8509a2545c | [
"BSD-2-Clause"
]
| null | null | null | #!/usr/bin/env python
###########################################################################
# This software is graciously provided by HumaRobotics
# under the Simplified BSD License on
# github: [email protected]:baxter_tasker
# HumaRobotics is a trademark of Generation Robots.
# www.humarobotics.com
# Copyright (c) 2013, Generation Robots.
# All rights reserved.
# www.generationrobots.com
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of the FreeBSD Project.
import rospy
from modbus.modbus_wrapper_server import ModbusWrapperServer
from std_msgs.msg import Int32MultiArray as HoldingRegister
if __name__=="__main__":
rospy.init_node("modbus_server")
port = 1234 # custom modbus port without requirement of sudo rights
# port = 502 # default modbus port
if rospy.has_param("~port"):
port = rospy.get_param("~port")
else:
rospy.loginfo("For not using the default port %d, add an arg e.g.: '_port:=1234'",port)
# Init modbus server with specific port
mws = ModbusWrapperServer(port)
# Stop the server if ros is shutdown. This should show that the server is stoppable
rospy.on_shutdown(mws.stopServer)
# Starts the server in a non blocking call
mws.startServer()
print "Server started"
###############
# Example 1
# write to the Discrete Input
mws.setDigitalInput(0,1) # args: address , value. sets address to value
# Example 2
# read from clients coil output
print "waiting for line 0 to be set to True"
result = mws.waitForCoilOutput(0,5) # args: address,timeout in sec. timeout of 0 is infinite. waits until address is true
if result:
print "got line 0 is True from baxter"
else:
print "timeout waiting for signal on line 0"
###############
# Example 3
# Listen for the writeable modbus registers in any node
def callback(msg):
rospy.loginfo("Modbus register have been written: %s",str(msg.data))
rospy.sleep(2)
sub = rospy.Subscriber("modbus_server/read_from_registers",HoldingRegister,callback,queue_size=500)
###############
###############
# Example 4
# Publisher to write first 20 modbus registers from any node
pub = rospy.Publisher("modbus_server/write_to_registers",HoldingRegister,queue_size=500)
rospy.sleep(1)
msg = HoldingRegister()
msg.data = range(20)
msg2 = HoldingRegister()
msg2.data = range(20,0,-1)
while not rospy.is_shutdown():
pub.publish(msg)
rospy.sleep(1)
pub.publish(msg2)
rospy.sleep(1)
################
rospy.spin()
mws.stopServer()
| 40.411765 | 125 | 0.694081 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,916 | 0.707424 |
8d638991d71730377e930b6afff8fce13cde7b4a | 4,453 | py | Python | siptrackdlib/objectregistry.py | sii/siptrackd | f124f750c5c826156c31ae8699e90ff95a964a02 | [
"Apache-2.0"
]
| null | null | null | siptrackdlib/objectregistry.py | sii/siptrackd | f124f750c5c826156c31ae8699e90ff95a964a02 | [
"Apache-2.0"
]
| 14 | 2016-03-18T13:28:16.000Z | 2019-06-02T21:11:29.000Z | siptrackdlib/objectregistry.py | sii/siptrackd | f124f750c5c826156c31ae8699e90ff95a964a02 | [
"Apache-2.0"
]
| 7 | 2016-03-18T13:04:54.000Z | 2021-06-22T10:39:04.000Z | from siptrackdlib import errors
from siptrackdlib import log
class ObjectClass(object):
"""A class definition in the object registry.
Stores a reference to the class itself and also a list of valid child
classes (class_ids).
"""
def __init__(self, class_reference):
self.class_reference = class_reference
self.valid_children = {}
def registerChild(self, class_reference):
"""Register a class as a valid child class."""
self.valid_children[class_reference.class_id] = None
class ObjectRegistry(object):
"""Keeps track of registered classes and their valid children.
The object registry is used to keep track of valid classes and
what classes are valid children of a class.
It also allocates object ids and can be used to create new objects
based on the registry.
"""
def __init__(self):
self.object_classes = {}
self.object_classes_by_name = {}
self.next_oid = 0
def registerClass(self, class_reference):
"""Register a new class.
This creates a new ObjectClass and stores it in the registry,
enabling creation of objects of the given class.
The returned ObjectClass object can be used to register valid
children of the class.
"""
object_class = ObjectClass(class_reference)
self.object_classes[class_reference.class_id] = \
object_class
self.object_classes_by_name[class_reference.class_name] = \
object_class
return object_class
def isValidChild(self, parent_id, child_id):
"""Check if a class is a valid child of another class."""
if not parent_id in self.object_classes:
return False
parent = self.object_classes[parent_id]
if child_id not in parent.valid_children:
return False
return True
def getClass(self, class_name):
"""Returns the class reference for class registered with class_name."""
if class_name in self.object_classes_by_name:
return self.object_classes_by_name[class_name].class_reference
return None
def getClassById(self, class_id):
"""Returns the class reference for class registered with class_name."""
if class_id in self.object_classes:
return self.object_classes[class_id].class_reference
return None
def getIDByName(self, class_name):
"""Return a classes id given it's name."""
if class_name in self.object_classes_by_name:
object_class = self.object_classes_by_name[class_name]
return object_class.class_reference.class_id
return None
def allocateOID(self):
"""Allocate a new oid."""
ret = str(self.next_oid)
self.next_oid += 1
return ret
def revertOID(self):
"""Revert an oid allocation."""
self.next_oid -= 1
def createObject(self, class_id, parent_branch, *args, **kwargs):
"""Try to create a new object based on a registered class.
This will try to create a new object of 'class_id' type, allocating
it it's own oid. A new branch will also be created in the object
tree to hold the object.
"""
if class_id not in self.object_classes:
raise errors.SiptrackError(
'trying to create object with invalid class id \'%s\'' % (class_id))
object_class = self.object_classes[class_id]
oid = self.allocateOID()
branch = parent_branch.add(oid)
try:
obj = object_class.class_reference(oid, branch, *args, **kwargs)
except Exception as e:
branch.remove(recursive = False, callback_data = None)
self.revertOID()
raise
branch.ext_data = obj
return obj
def _createObject(self, class_id, branch):
"""Try to create _only_ a new object based on an oid and class id.
Similar to createObject, but takes a class id and an oid and only
creates a new object, no branch etc.
"""
if class_id not in self.object_classes:
raise errors.SiptrackError(
'trying to create object with invalid class id \'%s\'' % (class_id))
object_class = self.object_classes[class_id]
obj = object_class.class_reference(branch.oid, branch)
return obj
object_registry = ObjectRegistry()
| 37.108333 | 88 | 0.650348 | 4,352 | 0.977319 | 0 | 0 | 0 | 0 | 0 | 0 | 1,604 | 0.360207 |
8d66576529e5704ad9e6b2d90cc87687907b8c91 | 1,139 | py | Python | src/kol/request/CombatRequest.py | ZJ/pykol | c0523a4a4d09bcdf16f8c86c78da96914e961076 | [
"BSD-3-Clause"
]
| 1 | 2016-05-08T13:26:56.000Z | 2016-05-08T13:26:56.000Z | src/kol/request/CombatRequest.py | ZJ/pykol | c0523a4a4d09bcdf16f8c86c78da96914e961076 | [
"BSD-3-Clause"
]
| null | null | null | src/kol/request/CombatRequest.py | ZJ/pykol | c0523a4a4d09bcdf16f8c86c78da96914e961076 | [
"BSD-3-Clause"
]
| null | null | null | from GenericAdventuringRequest import GenericAdventuringRequest
class CombatRequest(GenericAdventuringRequest):
"""
A request used for a single round of combat. The user may attack, use an item or skill, or
attempt to run away.
"""
# What follows are a list of available actions.
ATTACK = 0
USE_ITEM = 1
USE_SKILL = 2
RUN_AWAY = 3
def __init__(self, session, action, param=None):
"""
In this constructor, action should be set to CombatRequest.ATTACK, CombatRequest.USE_ITEM,
CombatRequest.USE_SKILL, or CombatRequest.RUN_AWAY. If a skill or item is to be used, the
caller should also specify param to be the number of the item or skill the user wishes
to use.
"""
super(CombatRequest, self).__init__(session)
self.url = session.serverURL + "fight.php"
if action == ATTACK:
self.requestData["action"] = "attack"
elif action == USE_ITEM:
self.requestData["action"] = "useitem"
self.requestData["whichitem"] = param
elif action == USE_SKILL:
self.requestData["action"] = "skill"
self.requestData["whichskill"] = param
elif action == RUN_AWAY:
self.requestData["action"] = "runaway"
| 32.542857 | 92 | 0.72432 | 1,073 | 0.942054 | 0 | 0 | 0 | 0 | 0 | 0 | 561 | 0.492537 |
8d683b8c02d8d22cc3724afc4a6f8b486b4fd023 | 325 | py | Python | OLD.dir/myclient1.py | romchegue/Python | 444476088e64d5da66cb00174f3d1d30ebbe38f6 | [
"bzip2-1.0.6"
]
| null | null | null | OLD.dir/myclient1.py | romchegue/Python | 444476088e64d5da66cb00174f3d1d30ebbe38f6 | [
"bzip2-1.0.6"
]
| null | null | null | OLD.dir/myclient1.py | romchegue/Python | 444476088e64d5da66cb00174f3d1d30ebbe38f6 | [
"bzip2-1.0.6"
]
| null | null | null | '''
myclient1.py - imports mymod.py and check its operation.
'''
from mymod import test, countChars, countChars1, countLines, countLines1
text = 'test.txt'
file = open(text)
print(test(text), test(file))
print(countChars(text), countChars1(file))
print(countLines(text), countLines1(file))
print('\nedited again version')
| 23.214286 | 72 | 0.744615 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 98 | 0.301538 |
8d6a85cb3cf62644daa8bec049af6d5de6f147e2 | 632 | py | Python | src/modules/dates/searchDates.py | leonardoleyva/api-agenda-uas | 697740a0a3feebb2ada01133db020fcf5127e1de | [
"MIT"
]
| 1 | 2022-03-13T02:28:29.000Z | 2022-03-13T02:28:29.000Z | src/modules/dates/searchDates.py | leonardoleyva/api-agenda-uas | 697740a0a3feebb2ada01133db020fcf5127e1de | [
"MIT"
]
| null | null | null | src/modules/dates/searchDates.py | leonardoleyva/api-agenda-uas | 697740a0a3feebb2ada01133db020fcf5127e1de | [
"MIT"
]
| null | null | null | from .date import Date
from ..response import handleResponse
from datetime import datetime
def searchDates():
req = Date().searchAll()
message = "Listado de citas" if req['status'] == True else "No se pudo conseguir el listado de citas, inténtelo más tarde"
dateToday = datetime.now().isoformat().split('T')[0]
dates = []
for date in req['dates']:
dateDict = date.to_dict()
dateYYMMDD = dateDict['date'].split('T')[0]
if dateYYMMDD >= dateToday:
dates.append({**dateDict, 'id': date.id})
response = handleResponse(req['status'], message, dates)
return response
| 27.478261 | 126 | 0.643987 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 122 | 0.192429 |
8d6c38dd172b4fa935c4b931081b7a40d9bc40a8 | 6,045 | py | Python | Spark/spark_media_localidad.py | Dielam/Dielam.github.io | 19f01d693ef2c590f3ac35a3a143ae3dedf8594e | [
"MIT"
]
| null | null | null | Spark/spark_media_localidad.py | Dielam/Dielam.github.io | 19f01d693ef2c590f3ac35a3a143ae3dedf8594e | [
"MIT"
]
| null | null | null | Spark/spark_media_localidad.py | Dielam/Dielam.github.io | 19f01d693ef2c590f3ac35a3a143ae3dedf8594e | [
"MIT"
]
| 1 | 2020-12-23T16:45:20.000Z | 2020-12-23T16:45:20.000Z | #!/usr/bin/python
import sys
from pyspark import SparkContext
from shutil import rmtree
import os.path as path
def generar(line):
array = []
array.append(line[0])
array.append(line[1])
aux = line[2]
ini = 2
fin = 18
if aux != "I" and aux != "D" and aux != "N":
aux = line[3]
ini = 3
fin = 19
array.append(aux)
ini+=1
for i in range(ini, fin):
if line[i] == '':
array.append("0")
else:
array.append(line[i])
return array
if len(sys.argv) > 1:
if path.exists("output"):
rmtree("output")
sc = SparkContext()
localidad = sys.argv[1]
localidadRDD = sc.textFile("Gasolineras.csv")
localidadRDD = localidadRDD.map(lambda line: line.encode("ascii", "ignore"))
localidadRDD = localidadRDD.map(lambda rows: rows.split(","))
localidadRDD = localidadRDD.filter(lambda rows: localidad == rows[5])
localidadRDD = localidadRDD.map(lambda rows: (rows[5], rows[7], rows[8], rows[9],rows[10], rows[11], rows[12], rows[13], rows[14], rows[15], rows[16], rows[17], rows[18], rows[19], rows[20], rows[21], rows[22], rows[23], rows[24]))
datosRDD = localidadRDD.map(generar)
if datosRDD.isEmpty():
result = sc.parallelize("0")
result.saveAsTextFile("output")
else:
precioRDD = datosRDD.map(lambda rows: ([rows[0], float(rows[5])]))
precioRDD = precioRDD.reduceByKey(lambda x,y: x+y)
tamRDD = datosRDD.count()
mediaTotal = precioRDD.map(lambda rows: ([rows[1], int(tamRDD)]))
mediaTotal = mediaTotal.map(lambda calc:(calc[0]/calc[1]))
mediaTotal.saveAsTextFile("output/media_localidad_gasolina_95.txt")
precioRDD = datosRDD.map(lambda rows: ([rows[0], float(rows[6])]))
precioRDD = precioRDD.reduceByKey(lambda x,y: x+y)
tamRDD = datosRDD.count()
mediaTotal = precioRDD.map(lambda rows: ([rows[1], int(tamRDD)]))
mediaTotal = mediaTotal.map(lambda calc:(calc[0]/calc[1]))
mediaTotal.saveAsTextFile("output/media_localidad_gasoleo_a.txt")
precioRDD = datosRDD.map(lambda rows: ([rows[0], float(rows[7])]))
precioRDD = precioRDD.reduceByKey(lambda x,y: x+y)
tamRDD = datosRDD.count()
mediaTotal = precioRDD.map(lambda rows: ([rows[1], int(tamRDD)]))
mediaTotal = mediaTotal.map(lambda calc:(calc[0]/calc[1]))
mediaTotal.saveAsTextFile("output/media_localidad_gasoleo_b.txt")
precioRDD = datosRDD.map(lambda rows: ([rows[0], float(rows[8])]))
precioRDD = precioRDD.reduceByKey(lambda x,y: x+y)
tamRDD = datosRDD.count()
mediaTotal = precioRDD.map(lambda rows: ([rows[1], int(tamRDD)]))
mediaTotal = mediaTotal.map(lambda calc:(calc[0]/calc[1]))
mediaTotal.saveAsTextFile("output/media_localidad_bioetanol.txt")
precioRDD = datosRDD.map(lambda rows: ([rows[0], float(rows[9])]))
precioRDD = precioRDD.reduceByKey(lambda x,y: x+y)
tamRDD = datosRDD.count()
mediaTotal = precioRDD.map(lambda rows: ([rows[1], int(tamRDD)]))
mediaTotal = mediaTotal.map(lambda calc:(calc[0]/calc[1]))
mediaTotal.saveAsTextFile("output/media_localidad_nuevo_gasoleo_a.txt")
precioRDD = datosRDD.map(lambda rows: ([rows[0], float(rows[10])]))
precioRDD = precioRDD.reduceByKey(lambda x,y: x+y)
tamRDD = datosRDD.count()
mediaTotal = precioRDD.map(lambda rows: ([rows[1], int(tamRDD)]))
mediaTotal = mediaTotal.map(lambda calc:(calc[0]/calc[1]))
mediaTotal.saveAsTextFile("output/media_localidad_biodiesel.txt")
precioRDD = datosRDD.map(lambda rows: ([rows[0], float(rows[11])]))
precioRDD = precioRDD.reduceByKey(lambda x,y: x+y)
tamRDD = datosRDD.count()
mediaTotal = precioRDD.map(lambda rows: ([rows[1], int(tamRDD)]))
mediaTotal = mediaTotal.map(lambda calc:(calc[0]/calc[1]))
mediaTotal.saveAsTextFile("output/media_localidad_ester_metilico.txt")
precioRDD = datosRDD.map(lambda rows: ([rows[0], float(rows[12])]))
precioRDD = precioRDD.reduceByKey(lambda x,y: x+y)
tamRDD = datosRDD.count()
mediaTotal = precioRDD.map(lambda rows: ([rows[1], int(tamRDD)]))
mediaTotal = mediaTotal.map(lambda calc:(calc[0]/calc[1]))
mediaTotal.saveAsTextFile("output/media_localidad_bioalcohol.txt")
precioRDD = datosRDD.map(lambda rows: ([rows[0], float(rows[13])]))
precioRDD = precioRDD.reduceByKey(lambda x,y: x+y)
tamRDD = datosRDD.count()
mediaTotal = precioRDD.map(lambda rows: ([rows[1], int(tamRDD)]))
mediaTotal = mediaTotal.map(lambda calc:(calc[0]/calc[1]))
mediaTotal.saveAsTextFile("output/media_localidad_gasolina_98.txt")
precioRDD = datosRDD.map(lambda rows: ([rows[0], float(rows[14])]))
precioRDD = precioRDD.reduceByKey(lambda x,y: x+y)
tamRDD = datosRDD.count()
mediaTotal = precioRDD.map(lambda rows: ([rows[1], int(tamRDD)]))
mediaTotal = mediaTotal.map(lambda calc:(calc[0]/calc[1]))
mediaTotal.saveAsTextFile("output/media_localidad_gas_natural_comprimido.txt")
precioRDD = datosRDD.map(lambda rows: ([rows[0], float(rows[15])]))
precioRDD = precioRDD.reduceByKey(lambda x,y: x+y)
tamRDD = datosRDD.count()
mediaTotal = precioRDD.map(lambda rows: ([rows[1], int(tamRDD)]))
mediaTotal = mediaTotal.map(lambda calc:(calc[0]/calc[1]))
mediaTotal.saveAsTextFile("output/media_localidad_gas_natural_licuado.txt")
precioRDD = datosRDD.map(lambda rows: ([rows[0], float(rows[16])]))
precioRDD = precioRDD.reduceByKey(lambda x,y: x+y)
tamRDD = datosRDD.count()
mediaTotal = precioRDD.map(lambda rows: ([rows[1], int(tamRDD)]))
mediaTotal = mediaTotal.map(lambda calc:(calc[0]/calc[1]))
mediaTotal.saveAsTextFile("output/media_localidad_gas_licuados_del_petr.txt")
else:
print "Error no ha introducido localidad."
| 45.451128 | 235 | 0.643176 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 636 | 0.105211 |
8d6cc5852312640c236532b7026c1ac08efbc30f | 13,148 | py | Python | core/views/misc.py | ditttu/gymkhana-Nominations | 2a0e993c1b8362c456a9369b0b549d1c809a21df | [
"MIT"
]
| 3 | 2018-02-27T13:48:28.000Z | 2018-03-03T21:57:50.000Z | core/views/misc.py | ditttu/gymkhana-Nominations | 2a0e993c1b8362c456a9369b0b549d1c809a21df | [
"MIT"
]
| 6 | 2020-02-12T00:07:46.000Z | 2022-03-11T23:25:59.000Z | core/views/misc.py | ditttu/gymkhana-Nominations | 2a0e993c1b8362c456a9369b0b549d1c809a21df | [
"MIT"
]
| 1 | 2019-03-26T20:19:57.000Z | 2019-03-26T20:19:57.000Z | from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse, reverse_lazy
from django.http import HttpResponseRedirect
from django.shortcuts import render,HttpResponse
from django.views.generic.edit import CreateView, UpdateView, DeleteView
import csv, json
from datetime import date,datetime
from itertools import chain
from operator import attrgetter
from forms.models import Questionnaire
from forms.views import replicate
from core.models import *
from core.forms import *
from .nomi_cr import get_access_and_post_for_result, get_access_and_post
@login_required
def ratify(request, nomi_pk):
nomi = Nomination.objects.get(pk=nomi_pk)
access, view_post = get_access_and_post_for_result(request,nomi_pk)
if access:
if view_post.perms == "can ratify the post":
nomi.append()
return HttpResponseRedirect(reverse('applicants', kwargs={'pk': nomi_pk}))
else:
return render(request, 'no_access.html')
else:
return render(request, 'no_access.html')
@login_required
def request_ratify(request, nomi_pk):
nomi = Nomination.objects.get(pk=nomi_pk)
access, view_post = get_access_and_post_for_result(request,nomi_pk)
if access:
if view_post.parent:
to_add = view_post.parent
nomi.result_approvals.add(to_add)
nomi.nomi_approvals.add(to_add)
nomi.status = 'Sent for ratification'
nomi.save()
return HttpResponseRedirect(reverse('applicants', kwargs={'pk': nomi_pk}))
else:
return render(request, 'no_access.html')
@login_required
def cancel_ratify(request, nomi_pk):
nomi = Nomination.objects.get(pk=nomi_pk)
access, view_post = get_access_and_post_for_result(request,nomi_pk)
if access:
if view_post.parent:
to_remove = view_post.parent
nomi.result_approvals.remove(to_remove)
nomi.nomi_approvals.remove(to_remove)
nomi.status = 'Interview period'
nomi.save()
return HttpResponseRedirect(reverse('applicants', kwargs={'pk': nomi_pk}))
else:
return render(request, 'no_access.html')
@login_required
def cancel_result_approval(request, nomi_pk):
nomi = Nomination.objects.get(pk=nomi_pk)
access, view_post = get_access_and_post_for_result(request,nomi_pk)
if access:
to_remove = view_post.parent
if to_remove.parent not in nomi.result_approvals.all():
nomi.result_approvals.remove(to_remove)
return HttpResponseRedirect(reverse('applicants', kwargs={'pk': nomi_pk}))
else:
return render(request, 'no_access.html')
@login_required
def result_approval(request, nomi_pk):
nomi = Nomination.objects.get(pk=nomi_pk)
access, view_post = get_access_and_post_for_result(request,nomi_pk)
if access:
if view_post == nomi.nomi_post.parent:
nomi.show_result = True
to_add = view_post.parent
nomi.result_approvals.add(to_add)
return HttpResponseRedirect(reverse('applicants', kwargs={'pk': nomi_pk}))
else:
return render(request, 'no_access.html')
@login_required
def create_deratification_request(request, post_pk, user_pk ,type):
post = Post.objects.get(pk=post_pk)
user =User.objects.get(pk=user_pk)
if request.user in post.parent.post_holders.all():
Deratification.objects.create(name=user, post=post,status = type, deratify_approval=post.parent)
return HttpResponseRedirect(reverse('child_post', kwargs={'pk': post_pk}))
@login_required
def approve_deratification_request(request,pk):
to_deratify = Deratification.objects.get(pk = pk)
view = to_deratify.deratify_approval
if request.user in view.post_holders.all():
if view.perms == "can ratify the post":
to_deratify.post.post_holders.remove(to_deratify.name)
history=PostHistory.objects.filter(user=to_deratify.name).filter(post = to_deratify.post).first()
if to_deratify.status=='remove from post':
history.delete()
to_deratify.status = 'removed'
else:
history.end = date.today()
history.save()
to_deratify.status = 'deratified'
to_deratify.save()
else:
to_deratify.deratify_approval = view.parent
to_deratify.save()
return HttpResponseRedirect(reverse('post_view', kwargs={'pk':view.pk}))
else:
return render(request, 'no_access.html')
@login_required
def reject_deratification_request(request, pk):
to_deratify = Deratification.objects.get(pk=pk)
view = to_deratify.deratify_approval
if request.user in view.post_holders.all():
to_deratify.delete()
return HttpResponseRedirect(reverse('post_view', kwargs={'pk':view.pk}))
else:
return render(request, 'no_access.html')
'''
mark_as_interviewed, reject_nomination, accept_nomination: Changes the interview status/ nomination_instance status
of the applicant
'''
def get_access_and_post_for_selection(request, nomi_pk):
nomi =Nomination.objects.get(pk=nomi_pk)
access = False
view_post = None
for post in nomi.result_approvals.all():
if request.user in post.post_holders.all():
access = True
view_post = post
break
return access, view_post
@login_required
def mark_as_interviewed(request, pk):
application = NominationInstance.objects.get(pk=pk)
id_nomi = application.nomination.pk
nomination = Nomination.objects.get(pk=id_nomi)
access, view_post = get_access_and_post_for_selection(request,id_nomi)
if access or request.user in nomination.interview_panel.all():
application.interview_status = 'Interview Done'
application.save()
return HttpResponseRedirect(reverse('nomi_answer', kwargs={'pk': pk}))
else:
return render(request, 'no_access.html')
@login_required
def accept_nomination(request, pk):
application = NominationInstance.objects.get(pk=pk)
id_accept = application.nomination.pk
nomination = Nomination.objects.get(pk=id_accept)
access, view_post = get_access_and_post_for_selection(request, id_accept)
if access or request.user in nomination.interview_panel.all():
application.status = 'Accepted'
application.save()
comment = '<strong>' + str(request.user.userprofile.name) + '</strong>' + ' Accepted '\
+ '<strong>' + str(application.user.userprofile.name) + '</strong>'
status = Commment.objects.create(comments=comment, nomi_instance=application)
return HttpResponseRedirect(reverse('applicants', kwargs={'pk': id_accept}))
else:
return render(request, 'no_access.html')
@login_required
def reject_nomination(request, pk):
application = NominationInstance.objects.get(pk=pk)
id_reject = application.nomination.pk
nomination = Nomination.objects.get(pk=id_reject)
access, view_post = get_access_and_post_for_selection(request, id_reject)
if access or request.user in nomination.interview_panel.all():
application.status = 'Rejected'
application.save()
comment = '<strong>' + str(request.user.userprofile.name) + '</strong>' + ' Rejected ' \
+ '<strong>' + str(application.user.userprofile.name) + '</strong>'
status = Commment.objects.create(comments=comment, nomi_instance=application)
return HttpResponseRedirect(reverse('applicants', kwargs={'pk': id_reject}))
else:
return render(request, 'no_access.html')
'''
append_user, replace_user: Adds and Removes the current post-holders according to their selection status
'''
@login_required
def append_user(request, pk):
posts = request.user.posts.all()
access = False
for post in posts:
if post.perms == "can ratify the post":
access = True
break
if access:
nomi = Nomination.objects.get(pk=pk)
nomi.append()
return HttpResponseRedirect(reverse('applicants', kwargs={'pk': pk}))
else:
return render(request, 'no_access.html')
@login_required
def end_tenure(request):
posts = request.user.posts.all()
access = False
for post in posts:
if post.perms == "can ratify the post":
access = True
break
if access:
posts = Post.objects.all()
for post in posts:
for holder in post.post_holders.all():
try:
history = PostHistory.objects.get(post=post, user=holder)
if history.end:
if date.today() >= history.end:
post.post_holders.remove(holder)
except ObjectDoesNotExist:
pass
return HttpResponseRedirect(reverse('index'))
else:
return render(request, 'no_access.html')
# Import all posts of all clubs
# Check if their session has expired (31-3-2018 has passed)
# Remove them from the post
# Create the post history (No need, its already created)
## ------------------------------------------------------------------------------------------------------------------ ##
############################################ PROFILE VIEWS ##################################################
## ------------------------------------------------------------------------------------------------------------------ ##
@login_required
def profile_view(request):
pk = request.user.pk
my_posts = Post.objects.filter(post_holders=request.user)
history = PostHistory.objects.filter(user=request.user).order_by('start')
pending_nomi = NominationInstance.objects.filter(user=request.user).filter(nomination__status='Nomination out')
pending_re_nomi = NominationInstance.objects.filter(user=request.user).\
filter(nomination__status='Interview period and Nomination reopened')
pending_nomi = pending_nomi | pending_re_nomi
# show the instances that user finally submitted.. not the saved one
interview_re_nomi = NominationInstance.objects.filter(user=request.user).filter(submission_status = True).filter(nomination__status='Interview period and Reopening initiated')
interview_nomi = NominationInstance.objects.filter(user=request.user).filter(submission_status = True).filter(nomination__status='Interview period')
interview_nomi = interview_nomi | interview_re_nomi
declared_nomi = NominationInstance.objects.filter(user=request.user).filter(submission_status = True).filter(nomination__status='Sent for ratification')
try:
user_profile = UserProfile.objects.get(user__id=pk)
post_exclude_history = [] # In case a post is not registered in history
post_history = []
for his in history:
post_history.append(his.post)
for post in my_posts:
if post not in post_history:
post_exclude_history.append(post)
return render(request, 'profile.html', context={'user_profile': user_profile, 'history': history,
'pending_nomi': pending_nomi, 'declared_nomi': declared_nomi,
'interview_nomi': interview_nomi, 'my_posts': my_posts,
'excluded_posts': post_exclude_history})
except ObjectDoesNotExist:
return HttpResponseRedirect('create')
@login_required
def public_profile(request, pk):
student = UserProfile.objects.get(pk=pk)
student_user = student.user
history = PostHistory.objects.filter(user=student_user)
my_posts = Post.objects.filter(post_holders=student_user)
return render(request, 'public_profile.html', context={'student': student, 'history': history,
'my_posts': my_posts})
def UserProfileUpdate(request,pk):
profile = UserProfile.objects.get(pk = pk)
if profile.user == request.user:
form = ProfileForm(request.POST or None, instance=profile)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('profile'))
return render(request, 'nomi/userprofile_form.html', context={'form': form})
else:
return render(request, 'no_access.html')
class CommentUpdate(UpdateView):
model = Commment
fields = ['comments']
def get_success_url(self):
form_pk = self.kwargs['form_pk']
return reverse('nomi_answer', kwargs={'pk': form_pk})
class CommentDelete(DeleteView):
model = Commment
def get_success_url(self):
form_pk = self.kwargs['form_pk']
return reverse('nomi_answer', kwargs={'pk': form_pk})
def all_nominations(request):
all_nomi = Nomination.objects.all().exclude(status='Nomination created')
return render(request, 'all_nominations.html', context={'all_nomi': all_nomi})
| 34.783069 | 179 | 0.6601 | 402 | 0.030575 | 0 | 0 | 10,435 | 0.793657 | 0 | 0 | 2,098 | 0.159568 |
8d6deeb2db5e44e12af11dde00260d1e8aae607e | 29,706 | py | Python | make_paper_plots.py | mjbasso/asymptotic_formulae_examples | a1ba177426bf82e2a58e7b54e1874b088a86595f | [
"MIT"
]
| 1 | 2021-08-06T14:58:51.000Z | 2021-08-06T14:58:51.000Z | make_paper_plots.py | mjbasso/asymptotic_formulae_examples | a1ba177426bf82e2a58e7b54e1874b088a86595f | [
"MIT"
]
| null | null | null | make_paper_plots.py | mjbasso/asymptotic_formulae_examples | a1ba177426bf82e2a58e7b54e1874b088a86595f | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
import logging
import os
import pickle
import time
from os.path import join as pjoin
import matplotlib.pyplot as plt
import numpy as np
import scipy
from matplotlib import rc
from scipy.optimize import least_squares
import asymptotic_formulae
from asymptotic_formulae import GaussZ0
from asymptotic_formulae import GaussZ0_MC
from asymptotic_formulae import nCRZ0
from asymptotic_formulae import nCRZ0_MC
from asymptotic_formulae import nSRZ0
from asymptotic_formulae import nSRZ0_MC
rc('font', **{'family': 'sans-serif','sans-serif': ['Helvetica']})
rc('text', usetex = True)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
sh = logging.StreamHandler()
sh.setFormatter(logging.Formatter('%(asctime)s : %(name)s : %(levelname)s : %(message)s'))
logger.addHandler(sh)
# For creating a set of uniformly-spaced points on a log scale
def logVector(low, high, n):
low = np.log(low) / np.log(10)
high = np.log(high) / np.log(10)
step = (high - low) / n
vec = np.array([low + step * i for i in range(n + 1)])
return np.exp(np.log(10) * vec)
# As described in Section 2.1.4
def nCRZ0_DiagTau(s, b, tau):
''' Calculate the asymptotic significance for a 1 SR + N CRs, diagonal tau measurement
s := expected signal yield in SR (float)
b := expected background yields in SR (vector of floats, size N)
tau := transfer coefficients, tau[i] carries background i yield in SR to CR i (vector of floats, size N)
Returns Z0 (float) '''
# Argument checking
b, tau = np.array(b), np.array(tau)
s, b, tau = float(s), b.astype(float), tau.astype(float)
assert b.ndim == 1 # b should be a vector
assert tau.ndim == 1 # tau should be a vector
assert len(b) == len(tau)
assert (tau >= 0.).all() # Assert tau contains transfer factors (i.e., all positive)
n = s + np.sum(b)
# System of equations
def func(bhh):
eqns = []
for k in range(len(b)):
eqns.append(n / np.sum(bhh) - 1. + tau[k] * (b[k] / bhh[k] - 1.))
return eqns
# Perform our minimization
res = least_squares(func, x0 = b, bounds = [tuple(len(b) * [0.]), tuple(len(b) * [np.inf])])
if not res.success:
raise RuntimeError('Minimization failed: status = %s, message = \'%s\'' % (res.status, res.message))
bhh = np.array(res.x)
# Calculate our significance
Z0 = np.sqrt(-2. * np.log((np.sum(bhh) / n) ** n * np.prod([(bhh[k] / b[k]) ** (tau[k] * b[k]) for k in range(len(b))])))
return Z0
# As described in Section 2.4.2
def GaussZ0_Decorr(s, b, sigma):
''' Calculate the asymptotic significance for a 1 SR + N CRs, diagonal tau measurement
s := expected signal yield in SR (float)
b := expected background yields in SR (vector of floats, size N)
sigma := width of Gaussian constraint ("absolute uncertainty") for each background yield (vector of floats, size N)
Returns Z0 (float) '''
# Argument checking
b, sigma = np.array(b), np.array(sigma)
s, b, sigma = float(s), b.astype(float), sigma.astype(float)
assert b.ndim == 1 # b should be a vector
assert sigma.ndim == 1 # sigma should be a vector
assert len(b) == len(sigma)
assert (sigma >= 0.).all() # Assert sigma contains widths (i.e., all positive)
n = s + np.sum(b)
# System of equations
def func(bhh):
eqns = []
for k in range(len(b)):
eqns.append(sigma[k] * (n / np.sum(bhh) - 1.) - (bhh[k] - b[k]) / sigma[k])
return eqns
# Perform our minimization
res = least_squares(func, x0 = b, bounds = [tuple(len(b) * [0.]), tuple(len(b) * [np.inf])])
if not res.success:
raise RuntimeError('Minimization failed: status = %s, message = \'%s\'' % (res.status, res.message))
bhh = np.array(res.x)
# Calculate our significance
Z0 = np.sqrt(-2. * (n * np.log(np.sum(bhh) / n) + n - np.sum(bhh + 0.5 * ((b - bhh) / sigma) ** 2)))
return Z0
def makedir(path):
if not os.path.exists(path):
os.makedirs(path)
return path
def load_data_from_pickle(path):
if os.path.exists(path):
with open(path, 'rb') as f:
data = pickle.load(f)
else:
data = {}
return data
def dump_data_to_pickle(data, path):
if not os.path.exists(path):
with open(path, 'wb') as f:
data = pickle.dump(data, f)
pass
pass
def main():
basedir = os.path.dirname(os.path.abspath(__file__))
pickledir = makedir(pjoin(basedir, 'pickles/'))
plotdir = makedir(pjoin(basedir, 'plots/'))
#####################
### SECTION 2.1.1 ###
#####################
def Section2p1p1():
s = 50.
b1 = 100.
b2 = 50.
tau11 = 60.
tau22 = 40.
tau12 = np.linspace(0., b1 * tau11 / b2, 100)
tau21 = np.linspace(0., b2 * tau22 / b1, 100)
z0 = np.empty((len(tau12), len(tau21)))
for i in range(len(tau12)):
for j in range(len(tau21)):
z0[i, j] = nCRZ0(s, [b1, b2], [[tau11, tau12[i]], [tau21[j], tau22]])
fig = plt.figure()
ax = fig.add_subplot(111)
pcm = ax.pcolormesh(tau12 * b2 / (tau11 * b1), tau21 * b1 / (tau22 * b2), z0, cmap = 'magma', shading = 'nearest')
pcm.set_edgecolor('face')
cbar = plt.colorbar(pcm)
ax.set_xlabel('($b_2$ in CR 1) / ($b_1$ in CR 1) [a.u.]')
ax.set_ylabel('($b_1$ in CR 2) / ($b_2$ in CR 2) [a.u.]')
cbar.set_label('Significance of discovery $\\textrm{med}[Z_0|\\mu^\\prime=1]$ [a.u.]', rotation = 270, labelpad = 20)
# ax.set_title('Asymptotic significance for CRs with mixed background processes', pad = 10)
plt.savefig(pjoin(plotdir, '1SRNCR_mixed_processes.eps'), format = 'eps', dpi = 1200)
plt.close()
multi = logVector(1, 1000, 100)
z0 = np.empty((len(multi), len(multi)))
for i in range(len(multi)):
for j in range(len(multi)):
z0[i, j] = nCRZ0(s, [b1, b2], [[multi[i], 0.1 * multi[i] * b1 / b2], [0.1 * multi[j] * b2 / b1, multi[j]]])
fig = plt.figure()
ax = fig.add_subplot(111)
pcm = ax.pcolormesh(multi, multi, z0, cmap = 'magma', shading = 'nearest')
pcm.set_edgecolor('face')
cbar = plt.colorbar(pcm)
ax.set_xlabel('($b_1$ in CR 1) / ($b_1$ in SR) [a.u.]')
ax.set_ylabel('($b_2$ in CR 2) / ($b_2$ in SR) [a.u.]')
ax.set_xscale('log')
ax.set_yscale('log')
cbar.set_label('Significance of discovery $\\textrm{med}[Z_0|\\mu^\\prime=1]$ [a.u.]', rotation = 270, labelpad = 20)
# ax.set_title('Asymptotic significance for CRs varying transfer factors', pad = 10)
plt.savefig(pjoin(plotdir, '1SRNCR_varying_tau.eps'), format = 'eps', dpi = 1200)
plt.close()
#####################
### SECTION 2.1.2 ###
#####################
def Section2p1p2():
# Set the seed
np.random.seed(43)
datapath = pjoin(pickledir, 'Section2p1p2.pkl')
s = 10.
b1 = [round(n) for n in logVector(1., 1000., 10)]
b2 = [5., 25., 150.]
tau1 = 8.
tau2 = 5.
colours = ['g', 'b', 'r']
data = load_data_from_pickle(datapath)
for _b2, c in zip(b2, colours):
k = str(int(_b2))
if not data.get(k, {}):
data[k] = {'z0': [], 't0': [], 't1': []}
for _b1 in b1:
logger.info('On (b1, b2) = (%s, %s).' % (int(_b1), int(_b2)))
z0, t0, t1 = nCRZ0_MC(s, [_b1, _b2], [[tau1, 0.], [0., tau2]], return_t0_and_t1 = True, sleep = 0.001, ntoys = 50000)
data[k]['z0'].append(z0)
data[k]['t0'].append(t0)
data[k]['t1'].append(t1)
plt.plot(b1, data[k]['z0'], marker = 'o', color = c, linewidth = 0, label = 'Numerical: $b_2 = %s$' % int(_b2))
b1Fine = logVector(b1[0], b1[-1], 1000)
plt.plot(b1Fine, [nCRZ0_DiagTau(s, [_b1, _b2], [tau1, tau2]) for _b1 in b1Fine], linestyle = '-', markersize = 0, color = c, label = 'Asymptotic: $b_2 = %s$' % int(_b2))
plt.plot(b1Fine, s / np.sqrt(s + b1Fine + _b2), linestyle = '--', markersize = 0, color = c, label = 'Simple: $b_2 = %s$' % int(_b2))
plt.xlim((b1[0], b1[-1]))
plt.ylim((0., 3.5))
plt.xlabel('Background 1 yield in SR $b_1$ [a.u.]')
plt.ylabel('Significance of discovery $\\textrm{med}[Z_0|\\mu^\\prime=1]$ [a.u.]')
plt.xscale('log')
# plt.title('1 SR + 2 CRs Asymptotic Significance: $s = %s$, $\\tau_1 = %s$, $\\tau_2 = %s$' % (int(s), int(tau1), int(tau2)))
plt.legend(loc = 'upper right')
plt.savefig(pjoin(plotdir, '1SRplus2CR.eps'), format = 'eps', dpi = 1200)
plt.close()
axrange = (0., 25.)
bins = 100
for _b1 in [1., 1000.]:
t0, t1 = data['5']['t0'][b1.index(_b1)], data['5']['t1'][b1.index(_b1)]
plt.hist(t0, weights = len(t0) * [1. / len(t0)], range = axrange, bins = bins, histtype = 'step', color = 'b', label = '$f(t_0|\\mu^\\prime = 0)$')
plt.hist(t1, weights = len(t1) * [1. / len(t1)], range = axrange, bins = bins, histtype = 'step', color = 'r', label = '$f(t_0|\\mu^\\prime = 1)$')
plt.xlim(axrange)
plt.xlabel('Test statistic $t_0$ [a.u.]')
plt.ylabel('Normalized counts [a.u.]')
plt.yscale('log')
plt.legend()
plt.savefig(pjoin(plotdir, '1SRplus2CR_b1eq%s.eps' % int(_b1)), format = 'eps', dpi = 1200)
plt.close()
dump_data_to_pickle(data, datapath)
#####################
### SECTION 2.2.1 ###
#####################
def Section2p2p1():
# Set the seed
np.random.seed(44)
datapath = pjoin(pickledir, 'Section2p2p1.pkl')
s1 = [round(n) for n in logVector(1., 100., 10)]
s2 = [25., 10., 10.]
s3 = 12.
b = [1000., 1000., 3000.]
tau1 = 2.
tau2 = 10.
tau3 = 20.
colours = ['g', 'b', 'r']
data = load_data_from_pickle(datapath)
for _s2, _b, c in zip(s2, b, colours):
k = str(int(_s2)) + '_' + str(int(_b))
if not data.get(k, {}):
data[k] = {'z0': [], 't0': [], 't1': []}
for _s1 in s1:
logger.info('On (s1, s2, b) = (%s, %s, %s).' % (int(_s1), int(_s2), int(_b)))
ntoys = 100000 if (_s1 > 75.) else 50000
logger.info('Using %s toys.' % ntoys)
z0, t0, t1 = nSRZ0_MC([_s1, _s2, s3], _b, [tau1, tau2, tau3], return_t0_and_t1 = True, sleep = 0.001, ntoys = ntoys)
data[k]['z0'].append(z0)
data[k]['t0'].append(t0)
data[k]['t1'].append(t1)
plt.plot(s1, data[k]['z0'], marker = 'o', color = c, linewidth = 0, label = 'Numerical: $(s_2, b) = (%s, %s)$' % (int(_s2), int(_b)))
s1Fine = logVector(s1[0], s1[-1], 1000)
plt.plot(s1Fine, [nSRZ0([_s1, _s2, s3], _b, [tau1, tau2, tau3]) for _s1 in s1Fine], linestyle = '-', markersize = 0, color = c, label = 'Asymptotic: $(s_2, b) = (%s, %s)$' % (int(_s2), int(_b)))
plt.plot(s1Fine, np.sqrt((s1Fine / np.sqrt(s1Fine + _b / tau1)) ** 2 + (_s2 / np.sqrt(_s2 + _b / tau2)) ** 2 + (s3 / np.sqrt(s3 + _b / tau3)) ** 2), linestyle = '--', markersize = 0, color = c, label = 'Simple: $(s_2, b) = (%s, %s)$' % (int(_s2), int(_b)))
plt.xlim((s1[0], s1[-1]))
plt.ylim((0., 5.0))
plt.xlabel('Signal yield in SR 1 $s_1$ [a.u.]')
plt.ylabel('Significance of discovery $\\textrm{med}[Z_0|\\mu^\\prime=1]$ [a.u.]')
plt.xscale('log')
# plt.title('3 SRs + 1 CR Asymptotic Significance: $s_3 = %s$, $\\tau_1 = %s$, $\\tau_2 = %s$, $\\tau_3 = %s$' % (int(s3), int(tau1), int(tau2), int(tau3)))
plt.legend(loc = 'upper left', bbox_to_anchor = (1.0, 1.02))
plt.savefig(pjoin(plotdir, '3SRplus1CR.eps'), format = 'eps', dpi = 1200, bbox_inches = 'tight')
plt.close()
dump_data_to_pickle(data, datapath)
#####################
### SECTION 2.4.2 ###
#####################
def Section2p4p2_vsB1():
# Set the seed
np.random.seed(45)
datapath = pjoin(pickledir, 'Section2p4p2_vsB1.pkl')
sigma1 = 5.
sigma2 = 10.
s = 10.
b1 = [round(n) for n in logVector(1., 1000., 10)]
b2 = [5., 25., 150.]
R = [[lambda th: 1. + sigma1 / 100. * th, lambda th: 1.], [lambda th: 1., lambda th: 1. + sigma2 / 100. * th]]
S = [[1., 0.], [0., 1.]]
colours = ['g', 'b', 'r']
data = load_data_from_pickle(datapath)
for _b2, c in zip(b2, colours):
k = str(int(_b2))
if not data.get(k, {}):
data[k] = {'z0': [], 't0': [], 't1': []}
for _b1 in b1:
logger.info('On (b1, b2) = (%s, %s).' % (int(_b1), int(_b2)))
z0, t0, t1 = GaussZ0_MC(s, [_b1, _b2], R, S, return_t0_and_t1 = True, sleep = 0.001, ntoys = 50000)
data[k]['z0'].append(z0)
data[k]['t0'].append(t0)
data[k]['t1'].append(t1)
plt.plot(b1, data[k]['z0'], marker = 'o', color = c, linewidth = 0, label = 'Numerical: $b_2 = %s$' % int(_b2))
b1Fine = logVector(b1[0], b1[-1], 1000)
plt.plot(b1Fine, [GaussZ0_Decorr(s, [_b1, _b2], [_b1 * sigma1 / 100., _b2 * sigma2 / 100.]) for _b1 in b1Fine], linestyle = '-', markersize = 0, color = c, label = 'Asymptotic: $b_2 = %s$' % int(_b2))
plt.plot(b1Fine, s / np.sqrt(s + b1Fine + _b2 + (sigma1 / 100. * b1Fine) ** 2 + (sigma2 / 100. * _b2) ** 2), linestyle = '--', markersize = 0, color = c, label = 'Simple: $b_2 = %s$' % int(_b2))
plt.xlim((b1[0], b1[-1]))
plt.ylim((0., 3.5))
plt.xlabel('Background 1 yield in SR $b_1$ [a.u.]')
plt.ylabel('Significance of discovery $\\textrm{med}[Z_0|\\mu^\\prime=1]$ [a.u.]')
plt.xscale('log')
# plt.title('1 SR + 2 Gaussian Decorrelated Constraints Asymptotic Significance:\n$s = {}$, $\\sigma_1 = {}\\%$, $\\sigma_2 = {}\\%$'.format(int(s), int(sigma1), int(sigma2)))
plt.legend(loc = 'upper right')
plt.savefig(pjoin(plotdir, '1SRplus2GaussConst.eps'), format = 'eps', dpi = 1200)
plt.close()
dump_data_to_pickle(data, datapath)
def Section2p4p2_vsSigma():
sigma1 = np.hstack([logVector(0.1, 100., 15), logVector(100., 400., 3)[1:]])
sigma2 = [1., 10., 100.]
s = 10.
b1 = [25., 50., 50., 150.]
b2 = [25., 50., 150., 50.]
colours = ['gold', 'g', 'b', 'r']
fig, axs = plt.subplots(nrows = 2, ncols = 2, sharex = 'col', sharey = 'row', figsize = [2 * 6.0, 2 * 4.0])
axs[1, 1].axis('off')
for i, _sigma2 in enumerate(sigma2):
# Set the seed - let's use a fresh seed on each loop iteration, as we are saving separate pickles
# (this allows us to cleanly reproduce the results, per pickle, without throwing all of the toys in previous)
np.random.seed(60 + i)
# Dump a pickle for each sigma2 loop
datapath = pjoin(pickledir, 'Section2p4p2_vsSigma_sigma2eq%s.pkl' % int(_sigma2))
data = load_data_from_pickle(datapath)
if i == 0:
ax = axs[0, 0]
elif i == 1:
ax = axs[0, 1]
elif i == 2:
ax = axs[1, 0]
elif i == 3:
continue
else:
ax = None
for _b1, _b2, c in zip(b1, b2, colours):
k = str(int(_b1)) + '_' + str(int(_b2))
if not data.get(k, {}):
data[k] = {'z0': [], 't0': [], 't1': []}
for _sigma1 in sigma1:
logger.info('On (sigma1, sigma2, b1, b2) = (%s, %s, %s, %s).' % (round(_sigma1, 5), round(_sigma2, 5), int(_b1), int(_b2)))
z0, t0, t1 = GaussZ0_MC(s, [_b1, _b2], R(_sigma1, _sigma2), S, return_t0_and_t1 = True, sleep = 0.001, ntoys = 50000, retry_first = False, skip_failed_toys = True)
data[k]['z0'].append(z0)
data[k]['t0'].append(t0)
data[k]['t1'].append(t1)
ax.plot(sigma1, data[k]['z0'], marker = 'o', color = c, linewidth = 0, label = 'Numerical: $(b_1, b_2) = (%s, %s)$' % (int(_b1), int(_b2)) if i == 0 else '')
sigma1Fine = logVector(sigma1[0], sigma1[-1] if sigma1[-1] > 1000. else 1000., 1000)
ax.plot(sigma1Fine, [GaussZ0_Decorr(s, [_b1, _b2], [_b1 * _sigma1 / 100., _b2 * _sigma2 / 100.]) for _sigma1 in sigma1Fine], linestyle = '-', markersize = 0, color = c, label = 'Asymptotic: $(b_1, b_2) = (%s, %s)$' % (int(_b1), int(_b2)) if i == 0 else '')
ax.plot(sigma1Fine, s / np.sqrt(s + _b1 + _b2 + (sigma1Fine / 100. * _b1) ** 2 + (_sigma2 / 100. * _b2) ** 2), linestyle = '--', markersize = 0, color = c, label = 'Simple: $(b_1, b_2) = (%s, %s)$' % (int(_b1), int(_b2)) if i == 0 else '')
ax.set_ylim((0., 1.4))
if i != 1: ax.set_ylabel('Significance of discovery $\\textrm{med}[Z_0|\\mu^\\prime=1]$ [a.u.]')
ax.text(40, 1.2, '$s = {}$, $\\sigma_2 = {}\\%$'.format(int(s), int(_sigma2)), fontsize = 12, bbox = {'facecolor': 'white', 'pad': 10})
if i != 0:
ax.set_xlim((sigma1[0], sigma1[-1] if sigma1[-1] > 1000. else 1000.))
ax.set_xlabel('Background 1 yield uncertainty in SR $\\sigma_1$ [\\%]')
ax.set_xscale('log')
if i == 1: ax.xaxis.set_tick_params(labelbottom = True)
dump_data_to_pickle(data, datapath)
# fig.suptitle('1 SR + 2 Decorrelated Gaussian Constraints Asymptotic Significance')
axs[0, 0].legend(loc = 'upper left', bbox_to_anchor = (1.05, -0.15))
plt.subplots_adjust(hspace = 0.05, wspace = 0.05) # , top = 0.95, bottom = 0.05)
plt.savefig(pjoin(plotdir, '1SRplus2GaussConst_err.eps'), format = 'eps', dpi = 1200, bbox_inches = 'tight')
plt.close()
#####################
### SECTION 2.4.4 ###
#####################
def Section2p4p4_Corr():
# Set the seed
np.random.seed(47)
datapath = pjoin(pickledir, 'Section2p4p4_Corr.pkl')
s = 10.
b1 = [round(n) for n in logVector(1., 1000., 10)]
b2 = 5.
sigma1 = 35.
sigma2 = 70.
R = [[lambda th: 1. + sigma1 / 100. * th, lambda th: 1.], [lambda th: 1., lambda th: 1. + sigma2 / 100. * th]]
S = [[1., 0.75], [0.75, 1.]]
data = load_data_from_pickle(datapath)
if not all(data.get(k, []) for k in ['z0', 't0', 't1']):
data.update({'z0': [], 't0': [], 't1': []})
for _b1 in b1:
logger.info('On b1 = %s.' % int(_b1))
z0, t0, t1 = GaussZ0_MC(s, [_b1, b2], R, S, return_t0_and_t1 = True, sleep = 0.002, ntoys = 50000)
data['z0'].append(z0)
data['t0'].append(t0)
data['t1'].append(t1)
plt.plot(b1, data['z0'], marker = 'o', color = 'r', linewidth = 0, label = 'Numerical')
b1Fine = logVector(b1[0], b1[-1], 1000)
plt.plot(b1Fine, [GaussZ0(s = s, b = [_b1, b2], R = R, S = S) for _b1 in b1Fine], linestyle = '-', markersize = 0, color = 'r', label = 'Asymptotic (corr.)')
plt.plot(b1Fine, [GaussZ0(s = s, b = [_b1, b2], R = R, S = [[1., 0.], [0., 1.]]) for _b1 in b1Fine], linestyle = ':', markersize = 0, color = 'darkred', label = 'Asymptotic (decorr.)')
plt.plot(b1Fine, s / np.sqrt(s + b1Fine + b2 + (sigma1 / 100. * b1Fine) ** 2 + (sigma2 / 100. * b2) ** 2), linestyle = '--', markersize = 0, color = 'lightcoral', label = 'Simple')
plt.xlim((b1[0], b1[-1]))
plt.ylim((0., 2.))
plt.xlabel('Background 1 yield in SR $b_1$ [a.u.]')
plt.ylabel('Significance of discovery $\\textrm{med}[Z_0|\\mu^\\prime=1]$ [a.u.]')
plt.xscale('log')
# plt.title('1 SR + 2 Gaussian Correlated Constraints Asymptotic Significance:\n$s = {}$, $b_2 = {}$, $\\sigma_1 = {}\\%$, $\\sigma_2 = {}\\%$'.format(int(s), int(b2), int(sigma1), int(sigma2)))
plt.legend(loc = 'upper right')
plt.savefig(pjoin(plotdir, '1SRplus2GaussConst_corr.eps'), format = 'eps', dpi = 1200)
plt.close()
dump_data_to_pickle(data, datapath)
#####################
### SECTION 2.4.5 ###
#####################
def Section2p4p5_Response():
# Set the seed
np.random.seed(49)
def smooth_interpolate(th, func1, func2, weight):
return weight(th) * func1(th) + (1. - weight(th)) * func2(th)
def heaviside(th, sigma_lo, sigma_hi):
return smooth_interpolate(th, lambda th: 1. + sigma_lo * th, lambda th: 1. + sigma_hi * th, lambda th: 1. - np.heaviside(th, 1.))
def arctan(th, sigma_lo, sigma_hi, k = 10.):
return smooth_interpolate(th, lambda th: 1. + sigma_lo * th, lambda th: 1. + sigma_hi * th, lambda th: (1. - 2. / np.pi * np.arctan(np.pi / 2. * k * th)) / 2.)
def tanh(th, sigma_lo, sigma_hi, k = 10.):
return smooth_interpolate(th, lambda th: 1. + sigma_lo * th, lambda th: 1. + sigma_hi * th, lambda th: (1. - np.tanh(k * th)) / 2.)
def erf(th, sigma_lo, sigma_hi, k = 10.):
return smooth_interpolate(th, lambda th: 1. + sigma_lo * th, lambda th: 1. + sigma_hi * th, lambda th: (1. - scipy.special.erf(k * th)) / 2.)
def sigmoid(th, sigma_lo, sigma_hi, k = 10.):
return smooth_interpolate(th, lambda th: 1. + sigma_lo * th, lambda th: 1. + sigma_hi * th, lambda th: 1. - 1. / (1. + np.exp(-k * th)))
response_functions = {'Heaviside': (heaviside, 'k', '-'), 'arctan': (arctan, 'g', '--'), 'tanh': (tanh, 'b', ':'), 'erf': (erf, 'r', '-.'), 'sigmoid': (sigmoid, 'gold', '-')}
sigma_lo = 0.20
sigma_hi = 0.35
th = np.linspace(-1., +1., 1000)
for l, (f, c, ls) in response_functions.items():
plt.plot(th, f(th, sigma_lo, sigma_hi), color = c, label = l, linestyle = ls)
plt.xlim((th[0], th[-1]))
plt.ylim((1. - sigma_lo, 1. + sigma_hi))
plt.xlabel('Nuisance parameter $\\theta$ [a.u.]')
plt.ylabel('Response function $R(\\theta)$ [a.u.]')
# plt.title('Different Response Functions')
plt.legend(loc = 'upper left')
plt.savefig(pjoin(plotdir, 'response_functions.eps'), format = 'eps', dpi = 1200)
plt.xlim((-0.2, +0.2))
plt.ylim((0.95, 1.075))
plt.savefig(pjoin(plotdir, 'response_functions_zoomed.eps'), format = 'eps', dpi = 1200)
plt.close()
# 1st derivatives:
th = np.linspace(-1., +1., 1000)
for l, (f, c, ls) in response_functions.items():
plt.plot(th, scipy.misc.derivative(lambda th: f(th, sigma_lo, sigma_hi), th, dx = 1e-6), color = c, label = l, linestyle = ls)
plt.xlim((th[0], th[-1]))
plt.ylim((0.15, 0.40))
plt.xlabel('Nuisance parameter $\\theta$ [a.u.]')
plt.ylabel('Derivative of response function $dR(\\theta)/d\\theta$ [a.u.]')
# plt.title('Dervatives of Different Response Functions')
plt.legend(loc = 'upper left')
plt.savefig(pjoin(plotdir, 'response_functions_derivatives.eps'), format = 'eps', dpi = 1200)
plt.close()
s = 10.
b1 = logVector(1., 10000., 100)
b2 = 5.
sigma1_lo = 20. / 100.
sigma1_hi = 35. / 100.
sigma2_lo = 70. / 100.
sigma2_hi = 90. / 100.
R = lambda sigma1_lo, sigma1_hi, sigma2_lo, sigma2_hi: [[lambda th: f(th, sigma1_lo, sigma1_hi), lambda th: 1.], [lambda th: 1., lambda th: f(th, sigma2_lo, sigma2_hi)]]
S = [[1., 0.75], [0.75, 1.]]
for l, (f, c, ls) in response_functions.items():
plt.plot(b1, [GaussZ0(s = s, b = [_b1, b2], R = R(sigma1_lo, sigma1_hi, sigma2_lo, sigma2_hi), S = S) for _b1 in b1], linestyle = ls, markersize = 0, color = c, label = l)
plt.xlim((b1[0], b1[-1]))
plt.ylim((0.001, 10.))
plt.xlabel('Background 1 yield in SR $b_1$ [a.u.]')
plt.ylabel('Significance of discovery $\\textrm{med}[Z_0|\\mu^\\prime=1]$ [a.u.]')
plt.xscale('log')
plt.yscale('log')
# plt.title('Sensitivities for Different Response Functions:\n$s = {}$, $b_2 = {}$'.format(int(s), int(b2)))
plt.legend(loc = 'upper right')
plt.savefig(pjoin(plotdir, 'response_functions_z0_b2eq%s.eps' % int(b2)), format = 'eps', dpi = 1200, bbox_inches = 'tight')
plt.close()
s = 100.
b2 = 10000.
for l, (f, c, ls) in response_functions.items():
plt.plot(b1, [GaussZ0(s = s, b = [_b1, b2], R = R(sigma1_lo, sigma1_hi, sigma2_lo, sigma2_hi), S = S) for _b1 in b1], linestyle = ls, markersize = 0, color = c, label = l)
plt.xlim((b1[0], b1[-1]))
plt.ylim((0.008, 0.02))
plt.xlabel('Background 1 yield in SR $b_1$ [a.u.]')
plt.ylabel('Significance of discovery $\\textrm{med}[Z_0|\\mu^\\prime=1]$ [a.u.]')
plt.xscale('log')
plt.yscale('log')
# plt.title('Sensitivities for Different Response Functions:\n$s = {}$, $b_2 = {}$'.format(int(s), int(b2)))
plt.legend(loc = 'upper right')
plt.savefig(pjoin(plotdir, 'response_functions_z0_b2eq%s.eps' % int(b2)), format = 'eps', dpi = 1200, bbox_inches = 'tight')
plt.close()
#####################
### SECTION 2.4.6 ###
#####################
def Section2p4p6_CPU():
# Set the seed
np.random.seed(48)
datapath = pjoin(pickledir, 'Section2p4p6_CPU.pkl')
s = 10.
b1 = 10.
b2 = 5.
sigma1 = 35.
sigma2 = 70.
R = [[lambda th: 1. + sigma1 / 100. * th, lambda th: 1.], [lambda th: 1., lambda th: 1. + sigma2 / 100. * th]]
S = [[1., 0.75], [0.75, 1.]]
ntoys = [round(n) for n in logVector(1000, 1000000, 40)]
data = load_data_from_pickle(datapath)
if not all(data.get(k, []) for k in ['z0', 't0', 't1', 'cpu']):
data.update({'z0': [], 't0': [], 't1': [], 'cpu': []})
for _ntoys in ntoys:
logger.info('On ntoys = %s.' % int(_ntoys))
logging.getLogger(asymptotic_formulae.__name__).setLevel(level = logging.WARNING)
start = time.clock()
z0, t0, t1 = GaussZ0_MC(s, [b1, b2], R, S, return_t0_and_t1 = True, sleep = 0.001, ntoys = _ntoys, retry_first = False, skip_failed_toys = True)
stop = time.clock()
logging.getLogger(asymptotic_formulae.__name__).setLevel(level = logging.DEBUG)
data['z0'].append(z0)
data['t0'].append(t0)
data['t1'].append(t1)
delta = stop - start
logger.info('Z0 = %s, CPU time = %s s.' % (z0, delta))
data['cpu'].append(delta)
if not all(data.get(k, []) for k in ['cpu_asymptotic', 'z0_asymptotic']):
data['cpu_asymptotic'] = []
data['z0_asymptotic'] = []
for i in range(len(ntoys)):
logger.info('On iteration %s.' % i)
logging.getLogger(asymptotic_formulae.__name__).setLevel(level = logging.WARNING)
start = time.clock()
z0 = GaussZ0(s = s, b = [b1, b2], R = R, S = S)
stop = time.clock()
logging.getLogger(asymptotic_formulae.__name__).setLevel(level = logging.DEBUG)
delta = stop - start
logger.info('CPU time = %s s.' % delta)
data['cpu_asymptotic'].append(delta)
data['z0_asymptotic'].append(z0)
z0 = GaussZ0(s = s, b = [b1, b2], R = R, S = S)
fig = plt.figure()
fig, axs = plt.subplots(2, 1, sharex = True)
fig.subplots_adjust(hspace = 0.1)
# fig.suptitle('CPU Comparisons: Numerical vs. Asymptotic for Gaussian Constraints')
axs[0].plot(ntoys, data['z0'], color = 'darkorange', label = 'Numerical')
axs[0].plot(ntoys, data['z0_asymptotic'], color = 'navy', label = 'Asymptotic')
axs[0].set_ylabel('Significance of discovery [a.u.]')
axs[0].set_ylim((1.15, 1.30))
axs[0].legend(loc = 'upper right')
axs[1].plot(ntoys, data['cpu'], color = 'darkorange', label = 'Numerical')
axs[1].plot(ntoys, data['cpu_asymptotic'], color = 'navy', label = 'Asymptotic')
axs[1].set_xlabel('Number of toys [a.u.]')
axs[1].set_ylabel('CPU time [s]')
axs[1].set_xlim((ntoys[0], ntoys[-1]))
axs[1].set_ylim((1e-3, 1e4))
axs[1].set_xscale('log')
axs[1].set_yscale('log')
plt.savefig(pjoin(plotdir, 'Section2p4p2_CPU.eps'), format = 'eps', dpi = 1200)
plt.close()
dump_data_to_pickle(data, datapath)
Section2p1p1()
Section2p1p2()
Section2p2p1()
Section2p4p2_vsB1()
Section2p4p2_vsSigma()
Section2p4p4_Corr()
Section2p4p5_Response()
Section2p4p6_CPU()
if __name__ == '__main__':
main()
| 46.85489 | 272 | 0.523564 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,290 | 0.245405 |
8d6e5ae82deb7b5311529c66cb9a669824faeec2 | 2,645 | py | Python | tests/test_compliance.py | simongarisch/pytrade | 6245c0a47017a880299fa7704a49580f394fa87b | [
"MIT"
]
| 2 | 2020-10-19T02:44:57.000Z | 2021-11-08T10:45:25.000Z | tests/test_compliance.py | simongarisch/pytrade | 6245c0a47017a880299fa7704a49580f394fa87b | [
"MIT"
]
| 1 | 2020-12-24T02:59:58.000Z | 2020-12-24T02:59:58.000Z | tests/test_compliance.py | simongarisch/pytrade | 6245c0a47017a880299fa7704a49580f394fa87b | [
"MIT"
]
| null | null | null | import pytest
from pxtrade.assets import reset, Stock, Portfolio
from pxtrade.compliance import (
Compliance,
UnitLimit,
WeightLimit,
)
class TestCompliance(object):
def setup_method(self, *args):
reset()
stock1 = self.stock1 = Stock("BBB US", 2.00, currency_code="USD")
stock2 = self.stock2 = Stock("CCC US", 2.00, currency_code="USD")
portfolio = self.portfolio = Portfolio("USD")
portfolio.transfer(stock1, 200)
portfolio.transfer(stock2, 200)
self.compliance = Compliance()
def test_compliance(self):
with pytest.raises(TypeError):
# Requires a ComplianceRule instance.
self.compliance.add_rule("ZZZ")
with pytest.raises(TypeError):
# Requires a Portfolio instance.
self.compliance.passes("ZZZ")
def teardown_method(self, *args):
del self.stock1
del self.stock2
del self.portfolio
del self.compliance
def test_unit_limit(self):
with pytest.raises(TypeError):
# Requires an Asset instance
UnitLimit("ABC US", 100)
with pytest.raises(TypeError):
# Limit must be an integer.
UnitLimit(self.stock1, "100")
rule = UnitLimit(self.stock1, 200)
compliance = self.compliance
portfolio = self.portfolio
compliance.add_rule(rule)
assert compliance.passes(portfolio)
portfolio.transfer(self.stock1, 1)
assert not compliance.passes(portfolio)
compliance.remove_rule(rule)
assert compliance.passes(portfolio)
def test_unit_limit_str(self):
rule = UnitLimit(self.stock1, 200)
assert str(rule) == "UnitLimit('BBB US', 200)"
rule = UnitLimit(self.stock1, 1000)
assert str(rule) == "UnitLimit('BBB US', 1,000)"
def test_weight_limit(self):
with pytest.raises(TypeError):
# Requires an Asset instance
WeightLimit("ABC US", 0.5)
with pytest.raises(TypeError):
# Limit must be an integer.
WeightLimit(self.stock2, "0.5")
rule = WeightLimit(self.stock2, 0.50)
compliance = self.compliance
portfolio = self.portfolio
compliance.add_rule(rule)
assert compliance.passes(portfolio)
portfolio.transfer(self.stock2, 1)
assert not compliance.passes(portfolio)
compliance.remove_rule(rule)
assert compliance.passes(portfolio)
def test_weight_limit_str(self):
rule = WeightLimit(self.stock2, 0.50)
assert str(rule) == "WeightLimit('CCC US', 0.50)"
| 32.654321 | 73 | 0.625331 | 2,494 | 0.942911 | 0 | 0 | 0 | 0 | 0 | 0 | 329 | 0.124386 |
8d7113f4a3fa2caf2cf878a899bd18ce82a24a1b | 103 | py | Python | article/serializers/__init__.py | mentix02/medialist-backend | 397b1a382b12bab273360dadb0b3c32de43747cd | [
"MIT"
]
| 1 | 2019-11-22T19:29:39.000Z | 2019-11-22T19:29:39.000Z | article/serializers/__init__.py | mentix02/medialist-backend | 397b1a382b12bab273360dadb0b3c32de43747cd | [
"MIT"
]
| 1 | 2019-11-25T09:50:07.000Z | 2021-07-15T07:05:28.000Z | article/serializers/__init__.py | mentix02/medialist-backend | 397b1a382b12bab273360dadb0b3c32de43747cd | [
"MIT"
]
| null | null | null | from article.serializers.serializers import (
ArticleListSerializer,
ArticleDetailSerializer
)
| 20.6 | 45 | 0.805825 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
8d73a34deeb4110e24d2f659a64dcdc60d79219a | 1,447 | py | Python | delong_functions/initialization.py | braddelong/22-jupyter-ps01 | 95e8714e1723fb8328380a5d14aafabe2ee0795a | [
"MIT"
]
| null | null | null | delong_functions/initialization.py | braddelong/22-jupyter-ps01 | 95e8714e1723fb8328380a5d14aafabe2ee0795a | [
"MIT"
]
| null | null | null | delong_functions/initialization.py | braddelong/22-jupyter-ps01 | 95e8714e1723fb8328380a5d14aafabe2ee0795a | [
"MIT"
]
| null | null | null | # set up the environment by reading in libraries:
# os... graphics... data manipulation... time... math... statistics...
import sys
import os
from urllib.request import urlretrieve
import matplotlib as mpl
import matplotlib.pyplot as plt
import PIL as pil
from IPython.display import Image
import pandas as pd
from pandas import DataFrame, Series
import pandas_datareader
from datetime import datetime
import scipy as sp
import numpy as np
import math
import random
import seaborn as sns
import statsmodels
import statsmodels.api as sm
import statsmodels.formula.api as smf
# graphics setup: seaborn-darkgrid and figure size...
plt.style.use('seaborn-darkgrid')
figure_size = plt.rcParams["figure.figsize"]
figure_size[0] = 7
figure_size[1] = 7
plt.rcParams["figure.figsize"] = figure_size
# import delong functions
from delong_functions.data_functions import getdata_read_or_download # get or download data file
from delong_functions.stat_functions import initialize_basic_figure # initialize graphics
from delong_functions.data_functions import data_FREDseries # construct a useful dict with source
# and notes info from a previously
# downloaded FRED csv file
# check to see if functions successfully created...
# NOW COMMENTED OUT: getdata_read_or_download? initialize_basic_figure? | 32.155556 | 106 | 0.724948 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 517 | 0.357291 |
8d74d9562cd8858adb9b65b43c92263f531590a9 | 608 | py | Python | sdk/bento/graph/value.py | bentobox-dev/bento-box | 3e10c62f586c1251529e059b6af515d4d03c60e9 | [
"MIT"
]
| 1 | 2021-01-02T02:50:15.000Z | 2021-01-02T02:50:15.000Z | sdk/bento/graph/value.py | joeltio/bento-box | 3e10c62f586c1251529e059b6af515d4d03c60e9 | [
"MIT"
]
| 48 | 2020-10-21T07:42:30.000Z | 2021-02-15T19:34:55.000Z | sdk/bento/graph/value.py | joeltio/bento-box | 3e10c62f586c1251529e059b6af515d4d03c60e9 | [
"MIT"
]
| null | null | null | #
# Bentobox
# SDK - Graph
# Graph Value
#
from typing import Any
from bento.value import wrap
from bento.protos.graph_pb2 import Node
def wrap_const(val: Any):
"""Wrap the given native value as a Constant graph node.
If val is a Constant node, returns value as is.
Args:
val: Native value to wrap.
Returns:
The given value wrapped as a constant graph node.
"""
# check if already constant node, return as is if true.
if isinstance(val, Node) and val.WhichOneof("op") == "const_op":
return val
return Node(const_op=Node.Const(held_value=wrap(val)))
| 25.333333 | 68 | 0.677632 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 339 | 0.557566 |
8d76f8f9957c274ab98fcb861cac123b90567879 | 771 | py | Python | app/validators/user_validator.py | allanzi/truck-challenge | 7734a011de899184b673e99fd1c2ff92a6af65b9 | [
"CECILL-B"
]
| null | null | null | app/validators/user_validator.py | allanzi/truck-challenge | 7734a011de899184b673e99fd1c2ff92a6af65b9 | [
"CECILL-B"
]
| null | null | null | app/validators/user_validator.py | allanzi/truck-challenge | 7734a011de899184b673e99fd1c2ff92a6af65b9 | [
"CECILL-B"
]
| null | null | null | from marshmallow import Schema, fields
from marshmallow.validate import Length, Range
class UserCreateValidator(Schema):
name = fields.Str(required=True, validate=Length(max=60))
age = fields.Integer(required=True, validate=Range(min=18, max=100))
driver_license_type = fields.Str(required=True, validate=Length(max=5))
is_busy = fields.Bool(required=True)
has_vehicle = fields.Bool(required=True)
vehicle_type_id = fields.Integer(required=True)
class UserUpdateValidator(Schema):
name = fields.Str(validate=Length(max=60))
age = fields.Integer(validate=Range(min=18, max=100))
driver_license_type = fields.Str(validate=Length(max=5))
is_busy = fields.Bool()
has_vehicle = fields.Bool()
vehicle_type_id = fields.Integer() | 42.833333 | 75 | 0.743191 | 682 | 0.884565 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
8d783ab1b46b55a24509d554110a68bdbb340935 | 11,660 | py | Python | montecarlo/mcpy/monte_carlo.py | v-asatha/EconML | eb9ac829e93abbc8a163ab09d905b40370b21b1a | [
"MIT"
]
| null | null | null | montecarlo/mcpy/monte_carlo.py | v-asatha/EconML | eb9ac829e93abbc8a163ab09d905b40370b21b1a | [
"MIT"
]
| null | null | null | montecarlo/mcpy/monte_carlo.py | v-asatha/EconML | eb9ac829e93abbc8a163ab09d905b40370b21b1a | [
"MIT"
]
| null | null | null | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
import sys
import numpy as np
from joblib import Parallel, delayed
import joblib
import argparse
import importlib
from itertools import product
import collections
from copy import deepcopy
from mcpy.utils import filesafe
from mcpy import plotting
def check_valid_config(config):
"""
Performs a basic check of the config file, checking if the necessary
subsections are present.
If multiple config files are being made that use the same dgps and/or methods,
it may be helpful to tailor the config check to those dgps and methods. That way,
one can check that the correct parameters are being provided for those dgps and methods.
This is specific to one's implementation, however.
"""
assert 'type' in config, "config dict must specify config type"
assert 'dgps' in config, "config dict must contain dgps"
assert 'dgp_opts' in config, "config dict must contain dgp_opts"
assert 'method_opts' in config, "config dict must contain method_opts"
assert 'mc_opts' in config, "config dict must contain mc_opts"
assert 'metrics' in config, "config dict must contain metrics"
assert 'methods' in config, "config dict must contain methods"
assert 'plots' in config, "config dict must contain plots"
assert 'single_summary_metrics' in config, "config dict must specify which metrics are plotted in a y-x plot vs. as a single value per dgp and method"
assert 'target_dir' in config, "config must contain target_dir"
assert 'reload_results' in config, "config must contain reload_results"
assert 'n_experiments' in config['mc_opts'], "config[mc_opts] must contain n_experiments"
assert 'seed' in config['mc_opts'], "config[mc_opts] must contain seed"
class MonteCarlo:
"""
This class contains methods to run (multiple) monte carlo experiments
Experiments are constructed from a config file, which mainly consists of
references to the implementations of four different kinds of items, in
addition to various parameters for the experiment. See the README for
a descriptoin of the config file, or look at an example in the configs directory.
The four main items are:
- data generating processes (dgps): functions that generate data according to
some assumed underlying model
- methods: functions that take in data and produce other data. In our case,
they train on data produced by DGPs and then produce counterfactual estimates
- metrics: functions that take in the results of estimators and calculate metrics
- plots: functions that take in the metric results, etc. and generate plots
"""
def __init__(self, config):
self.config = config
check_valid_config(self.config)
# these param strings are for properly naming results saved to disk
config['param_str'] = '_'.join(['{}_{}'.format(filesafe(k), v) for k,v in self.config['mc_opts'].items()])
config['param_str'] += '_' + '_'.join(['{}_{}'.format(filesafe(k), v) for k,v in self.config['dgp_opts'].items()])
config['param_str'] += '_' + '_'.join(['{}_{}'.format(filesafe(k), v) for k,v in self.config['method_opts'].items()])
def experiment(self, instance_params, seed):
"""
Given instance parameters to pass on to the data generating processes,
runs an experiment on a single randomly generated instance of data and returns the
parameter estimates for each method and the evaluated metrics for each method.
Parameters
----------
instance_params : dictionary
instance paramaters that DGP functions may use
seed : int
random seed for random data generation
Returns
-------
experiment_results : dictionary
results of the experiment, depending on what the methods return.
These are stored by dgp_name and then by method_name.
true_params : dictionary
true parameters of the DGP, indexed by dgp_name, used for metrics
calculation downstream
"""
np.random.seed(seed)
experiment_results = {}
true_params = {}
for dgp_name, dgp_fn in self.config['dgps'].items():
data, true_param = dgp_fn(self.config['dgp_opts'][dgp_name], instance_params[dgp_name], seed)
true_params[dgp_name] = true_param
experiment_results[dgp_name] = {}
for method_name, method in self.config['methods'].items():
experiment_results[dgp_name][method_name] = method(data, self.config['method_opts'][method_name], seed)
return experiment_results, true_params
def run(self):
"""
Runs multiple experiments in parallel on randomly generated instances and samples and returns
the results for each method and the evaluated metrics for each method across all
experiments.
Returns
-------
simulation_results : dictionary
dictionary indexed by [dgp_name][method_name] for individual experiment results
metric_results : dictionary
dictionary indexed by [dgp_name][method_name][metric_name]
true_param : dictinoary
dictionary indexed by [dgp_name]
"""
random_seed = self.config['mc_opts']['seed']
if not os.path.exists(self.config['target_dir']):
os.makedirs(self.config['target_dir'])
instance_params = {}
for dgp_name in self.config['dgps']:
instance_params[dgp_name] = self.config['dgp_instance_fns'][dgp_name](self.config['dgp_opts'][dgp_name], random_seed)
# results_file = os.path.join(self.config['target_dir'], 'results_{}.jbl'.format(self.config['param_str']))
results_file = os.path.join(self.config['target_dir'], 'results_seed{}.jbl'.format(random_seed))
if self.config['reload_results'] and os.path.exists(results_file):
results = joblib.load(results_file)
else:
results = Parallel(n_jobs=-1, verbose=1)(
delayed(self.experiment)(instance_params, random_seed + exp_id)
for exp_id in range(self.config['mc_opts']['n_experiments']))
joblib.dump(results, results_file)
simulation_results = {} # note that simulation_results is a vector of individual experiment_results. from experiment()
metric_results = {}
true_params = {}
for dgp_name in self.config['dgps'].keys():
simulation_results[dgp_name] = {}
metric_results[dgp_name] = {}
for method_name in self.config['methods'].keys():
simulation_results[dgp_name][method_name] = [results[i][0][dgp_name][method_name] for i in range(self.config['mc_opts']['n_experiments'])]
true_params[dgp_name] = [results[i][1][dgp_name] for i in range(self.config['mc_opts']['n_experiments'])]
metric_results[dgp_name][method_name] = {}
for metric_name, metric_fn in self.config['metrics'].items():
# for metric_name, metric_fn in self.config['metrics'][method_name].items(): # for method specific parameters
metric_results[dgp_name][method_name][metric_name] = metric_fn(simulation_results[dgp_name][method_name], true_params[dgp_name])
for plot_name, plot_fn in self.config['plots'].items():
# for plot_name, plot_fn in self.config['plots'][method_name].items(): # for method specific plots
if isinstance(plot_fn, dict):
plotting.instance_plot(plot_name, simulation_results, metric_results, self.config, plot_fn)
else:
plot_fn(plot_name, simulation_results, metric_results, true_params, self.config)
return simulation_results, metric_results, true_params
class MonteCarloSweep:
"""
This class contains methods to run sets of multiple monte carlo experiments
where each set of experiments has different parameters (for the dgps and methods, etc.).
This enables sweeping through parameter values to generate results for each permutation
of parameters. For example, running a simulation when the number of samples a specific DGP
generates is 100, 1000, or 10000.
"""
def __init__(self, config):
self.config = config
check_valid_config(self.config)
config['param_str'] = '_'.join(['{}_{}'.format(filesafe(k), self.stringify_param(v)) for k,v in self.config['mc_opts'].items()])
config['param_str'] += '_' + '_'.join(['{}_{}'.format(filesafe(k), self.stringify_param(v)) for k,v in self.config['dgp_opts'].items()])
config['param_str'] += '_' + '_'.join(['{}_{}'.format(filesafe(k), self.stringify_param(v)) for k,v in self.config['method_opts'].items()])
def stringify_param(self, param):
"""
Parameters
----------
param : list
list denoting the various values a parameter should take
Returns
-------
A string representation of the range of the values that parameter will take
"""
if hasattr(param, "__len__"):
return '{}_to_{}'.format(np.min(param), np.max(param))
else:
return param
def run(self):
"""
Runs many monte carlo simulations for all the permutations of parameters
specified in the config file.
Returns
-------
sweep_keys : list
list of all the permutations of parameters for each dgp
sweep_sim_results : list
list of simulation results for each permutation of parameters for each dgp
sweep_metrics : list
list of metric results for each permutation of parameters for each dgp
sweep_true_params : list
list of true parameters for each permutation of parameters for each dgp
"""
# currently duplicates computation for the dgps because all only one dgp param changed each config
# need to make it so that every inst_config is different for each dgp
for dgp_name in self.config['dgp_opts'].keys():
dgp_sweep_params = []
dgp_sweep_param_vals = []
for dgp_key, dgp_val in self.config['dgp_opts'][dgp_name].items():
if hasattr(dgp_val, "__len__"):
dgp_sweep_params.append(dgp_key)
dgp_sweep_param_vals.append(dgp_val)
sweep_keys = []
sweep_sim_results = []
sweep_metrics = []
sweep_true_params = []
inst_config = deepcopy(self.config)
for vec in product(*dgp_sweep_param_vals):
setting = list(zip(dgp_sweep_params, vec))
for k,v in setting:
inst_config['dgp_opts'][dgp_name][k] = v
simulation_results, metrics, true_params = MonteCarlo(inst_config).run()
sweep_keys.append(setting)
sweep_sim_results.append(simulation_results)
sweep_metrics.append(metrics)
sweep_true_params.append(true_params)
for plot_name, plot_fn in self.config['sweep_plots'].items():
if isinstance(plot_fn, dict):
plotting.sweep_plot(plot_key, sweep_keys, sweep_sim_results, sweep_metrics, self.config, plot_fn)
else:
plot_fn(plot_name, sweep_keys, sweep_sim_results, sweep_metrics, sweep_true_params, self.config)
return sweep_keys, sweep_sim_results, sweep_metrics, sweep_true_params
| 48.786611 | 154 | 0.660806 | 9,828 | 0.842882 | 0 | 0 | 0 | 0 | 0 | 0 | 5,962 | 0.511321 |
8d7ad5ef06de97e8b617443c00cdb60123831b97 | 5,845 | py | Python | MusicGame.py | kfparri/MusicGame | f2914cae7a68585ca1a569c78ac13f68c1adb827 | [
"MIT"
]
| null | null | null | MusicGame.py | kfparri/MusicGame | f2914cae7a68585ca1a569c78ac13f68c1adb827 | [
"MIT"
]
| null | null | null | MusicGame.py | kfparri/MusicGame | f2914cae7a68585ca1a569c78ac13f68c1adb827 | [
"MIT"
]
| null | null | null | #------------------------------------------------------------------------------------------------------
# File Name: MusicGame.py
# Author: Kyle Parrish
# Date: 7/4/2014
# Description: This is a simple program that I wrote for the raspberry pi so that my daughter can
# play with. It is a simple program that plays a different sound with every keystroke. It also
# displays a simple shape pattern on the screen with each keypress. The pi can also be setup to
# allow users to change the sounds by uploading them to a web form on the pi itself. This code
# will be included when it is finished.
# Change log:
# 4.30.15 - Updated the header to test out Visual Studio Code git integration
# 9.18.15 - Started making some changes to the application. Natalie is starting to enjoy
# the application so I'm starting to make it do more:
# - Updated the code to put circles as well as squares on the screen.
#------------------------------------------------------------------------------------------------------
# Basic imports for the game
import os,sys,datetime, sqlite3
import pygame
# I don't believe that I need the time references anymore, to be removed with next commit
#from time import strftime, localtime
from random import randint
from pygame.locals import *
# Setup basic constants
test = 640
# Screen height and width
SCREEN_WIDTH = test
SCREEN_HEIGHT = 480
#CENTER_POINT = (SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2)
#LOWER_CENTER = (SCREEN_WIDTH / 2, SCREEN_HEIGHT / 4)
#CENTER_RECT_HEIGHT = 40
#CLOCK_TEXT_FONT = 48
# Colors, any of these can be used in the program
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
MATRIX_GREEN = (0, 255, 21)
# Code taken from: http://code.activestate.com/recipes/521884-play-sound-files-with-pygame-in-a-cross-platform-m/
# global constants
FREQ = 44100 # same as audio CD
BITSIZE = -16 # unsigned 16 bit
CHANNELS = 2 # 1 == mono, 2 == stereo
BUFFER = 1024 # audio buffer size in no. of samples
FRAMERATE = 30 # how often to check if playback has finished
sounds = ["Typewrit-Intermed-538_hifi.ogg",
"Typewrit-Bell-Patrick-8344_hifi.ogg",
"Arcade_S-wwwbeat-8528_hifi.ogg",
"Arcade_S-wwwbeat-8529_hifi.ogg",
"Arcade_S-wwwbeat-8530_hifi.ogg",
"Arcade_S-wwwbeat-8531_hifi.ogg",
"PowerUp-Mark_E_B-8070_hifi.ogg",
"PulseGun-Mark_E_B-7843_hifi.ogg",
"PulseSho-Mark_E_B-8071_hifi.ogg",
"SineySpa-Mark_E_B-7844_hifi.ogg",
"ToySpace-Mark_E_B-7846_hifi.ogg",
"ZipUp-Mark_E_B-8079_hifi.ogg"]
soundFiles = []
def playsound(soundfile):
"""Play sound through default mixer channel in blocking manner.
This will load the whole sound into memory before playback
"""
soundfile.play()
#sound = pygame.mixer.Sound(soundfile)
#clock = pygame.time.Clock()
#sound.play()
#while pygame.mixer.get_busy():
#clock.tick(FRAMERATE)
def drawMyRect(surface):
#pygame.draw.rect(screen, color, (x,y,width,height), thickness)
pygame.draw.rect(surface, RED, (randint(0,600), randint(0,440), 40,40), 5)
return surface
def drawMyCircle(surface):
pygame.draw.circle(surface, GREEN, (randint(0,600), randint(0,440)), 20, 5)
return surface
def main():
pygame.mixer.pre_init(44100,-16,2, 1024)
pygame.init()
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
pygame.display.set_caption('Music Game')
drawCircle = True
# create background
background = pygame.Surface(screen.get_size())
background = background.convert()
#allocate all the sound files, this should make it work better...
for file in sounds:
tempsound = pygame.mixer.Sound(file)
soundFiles.append(tempsound)
# hide the mouse
# not used while developing
#pygame.mouse.set_visible(False)
#pygame.draw.rect(screen, color, (x,y,width,height), thickness)
#pygame.draw.rect(background, RED, (10,10,40,40), 5)
#drawMyRect(background)
screen.blit(background, (0,0))
pygame.display.update()
soundfile = "Typewrit-Intermed-538_hifi.ogg"
soundfile3 = "Typewrit-Bell-Patrick-8344_hifi.ogg"
# main loop
while 1:
# This needs to change to match the new way of checking that I found on the web
# http://stackoverflow.com/questions/12556535/programming-pygame-so-that-i-can-press-multiple-keys-at-once-to-get-my-character
updateScreen = False
resetScreen = False
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
return
elif event.type == KEYDOWN:
keys = pygame.key.get_pressed()
#print(len(keys))
if keys[K_ESCAPE] and keys[K_LCTRL]:
pygame.quit()
sys.exit()
elif keys[K_ESCAPE]:
resetScreen = True;
soundFiles[1].play()
#playsound(soundFiles[1])
else:
updateScreen = True
soundFiles[0].play()
#playsound(soundFiles[0])
if resetScreen:
background = pygame.Surface(screen.get_size())
background = background.convert()
screen.blit(background, (0,0))
pygame.display.update()
if updateScreen:
if drawCircle:
drawMyCircle(background)
else:
drawMyRect(background)
drawCircle = not drawCircle
screen.blit(background, (0,0))
pygame.display.update()
if __name__ == '__main__': main()
| 34.791667 | 134 | 0.609239 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,129 | 0.535329 |
8d7d0cccfbda47460eb1aeba6470425e3ed12174 | 243 | py | Python | tests/utils/img_processing_utils_test.py | venkatakolagotla/robin | 4497bf8ffcd03182f68f9a6d7c806bfdaa4791cb | [
"MIT"
]
| 4 | 2019-12-20T05:37:51.000Z | 2020-03-18T16:32:59.000Z | tests/utils/img_processing_utils_test.py | venkatakolagotla/robin | 4497bf8ffcd03182f68f9a6d7c806bfdaa4791cb | [
"MIT"
]
| null | null | null | tests/utils/img_processing_utils_test.py | venkatakolagotla/robin | 4497bf8ffcd03182f68f9a6d7c806bfdaa4791cb | [
"MIT"
]
| null | null | null | from __future__ import print_function
from robin.utils import img_processing_utils
import numpy as np
def test_normalize_gt(in_img_array):
out_img = img_processing_utils.normalize_gt(in_img_array)
assert type(out_img) == np.ndarray
| 24.3 | 61 | 0.814815 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
8d7e6b625734d32d6eb3ec106401a004caa7962c | 5,763 | py | Python | DeepLearning/DeepLearning/07_Deep_LeeYS/Week_1/4. Single-Layer NN/4) Neural Network.py | ghost9023/DeepLearningPythonStudy | 4d319c8729472cc5f490935854441a2d4b4e8818 | [
"MIT"
]
| 1 | 2019-06-27T04:05:59.000Z | 2019-06-27T04:05:59.000Z | DeepLearning/DeepLearning/07_Deep_LeeYS/Week_1/4. Single-Layer NN/4) Neural Network.py | ghost9023/DeepLearningPythonStudy | 4d319c8729472cc5f490935854441a2d4b4e8818 | [
"MIT"
]
| null | null | null | DeepLearning/DeepLearning/07_Deep_LeeYS/Week_1/4. Single-Layer NN/4) Neural Network.py | ghost9023/DeepLearningPythonStudy | 4d319c8729472cc5f490935854441a2d4b4e8818 | [
"MIT"
]
| null | null | null | #4) 신경망 구현하기
##########KEYWORD###############
################################
#신경망은 입력층에서 출력층으로 기본적으로 한 방향으로 흐른다. 한 싸이클이 끝나면 역전파 알고리즘을 통해
#계속 학습으 진행하지만 역전파 알고리즘과 같은 고급 알고리즘은 다음장에서..
#한 방향으로만 정보가 전방으로 전달되는 신경망을 피드포워드 신경망(Feed Forward NN)이라고 한다.
#기본적으로 신경망은 입력층에서 데이터 입력을 받은 뒤 은닉층에서 데이터를 학습하고 출력층으로 결과를 내보낸다.
#입력층의 역할은 입력 데이터를 받아들이는 것이고 이를 위해서 입력층의 노드(뉴런) 개수는 입력데이터의 특성 갯수와 일치해야 한다.
#은닉층은 학습을 진행하는 층으로 은닉층의 노드 수와 은닉층 Layer 수는 설계자가 경험으로 얻어내야 한다.
#뉴런의 수가 너무 많으면 오버피팅이 발생하고 너무 적으면 언더피팅이 발생하여 학습이 되지 않음.
#또한 은닉층의 개수가 지나치게 많은 경우 비효율적이다.
#단순히 은닉층의 개수를 2배 늘리면 연산에 걸리는 시간은 400% 증가하지만 학습효율은 10%만 증가하기도 한다.
#출력층은 은닉층을 거쳐서 얻어낸 결과를 해결하고자 하는 문제에 맞게 만들어 준다.
#필기체 숫자 0부터 9까지를 인식하는 신경망이면 출력층이 10개가 될 것이고 개와 고양이를 분류하는 신경망이라면 3개의 출력층이 된다.
#다차원 배열을 이용하여 층이 3개인 다층 신경망을 간단하게 구현하자.행렬곱과 각 행렬의 원소의 위치를 잘 확인하면 어렵지 않다.
#그림25 P35
#
#Layer Node 수 Node Shape Weight Shape Bias Shape 계산식
#입력층 2 2차원 벡터 2 X 3 Matrix 3차원 Vector 은닉층(1) = 활성화함수((입력층*가중치1) + 편향1)
#은닉층(1) 3 3차원 벡터 3 X 2 Matrix 2차원 Vector 은닉층(2) = 활성화함수((은닉층1) * 가중치2 + 편향2)
#은닉층(2) 2 2차원 벡터 2 X 2 Matrix 2차원 Vector 출력층 = 활성화함수((은닉층2) * 가중치3 + 편향3)
#출력층 2 2차원 벡터
#그림을 확인해보면 3층 신경망이 어떻게 구성되어 있는지 확인할 수 있다.
#입력층은 2개이며 각 층마다 편향이 존재한다. 은닉층은 2개 층으로 구성되어 있고 출력층의 출력값은 2개이다.
#위 그림을 확인해보면
#w12^(1), a2(1) 와 같은 형식으로 표기되어 있는 것을 확인 할 수 있다. 우측 상단의 (1)은 1층의 가중치를 의미한다.
#우측 하단의 12에서 1은 다음층의 뉴런번호 2는 앞층의 뉴런 번호를 의미한다. 따라서 w12^(!)은 앞의 1번 뉴런에서 2번 뉴런으로 이동하는 신경망 1층의 가중치
#를 의미한다.
#예제 3층 신경망의 구조를 보면 입력층은 2개로 구성되어 있고 1층에서 편향이 1로 존재한다. 여기서 가중치에 의해
#입력 값은 a1(1) ... 에 입력된다. 이 입력값을 수식으로 나타내면
#a1(1) = w11^(1)x1 + w12^(1)x2 + b1^(1) 으로 표현할 수 있다.
#이를 행렬 내적으로 표현하면 1층의 노드를 A^(1) = (a1^(1),a2^(1),a3^(1)), 1층의 가중치를
#W^(1) ...
#이를 이용해서 numpy의 다차원 배열을 이용하면 신경망 1층을 파이선 코드로 짤 수 있다.
#마찬가지로 1층의 출력값을 다시 2층의 입력값으로 넣고 똑같은 방식으로 입력노드 행렬(1층의 출력노드 행렬), 가중치 행렬, 편향 행렬의
#행렬 연산을 통해 2층의 출력 노드 행렬을 구할 수 있게 된다.
#마찬가지로 신경망 1층에서 행렬 연산식을 통해 출력값을 구했던 것처럼 1층의 출력값을 2층의 입력값으로 연결해주고 2층의 가중치와 2층의 편향을
#더해주면 2층의 출력값이 완성된다.
#마지막으로 그림30 처럼 2층의 출력값을 동일한 방법으로 출력층의 입력값으로 넣고 출력층 사이의 가중치와 편향을 더해준 동일한 방법으로
#식을 계산하면 최정적인 출력값이 뽑히게 된다. 한가지 위 과정과 다른 점이 있다면 출력층의 활성함수는 풀고자하는 문제의 성질에 맞게 정한다.
#회귀가 목적인 신경망은 출력층에 항등함수를 사용하고 이중클래스 분류에는 시그모이드 함수를 다중 클래스에는 소프트맥스 함수를 일반적으로 사용.
#그럼 출력층에 사용하는 활성함수를 알아보자.
#회귀에는 항등함수, 분류에는 소프트맥스 함수를 보통 사용한다. 회귀는 입력데이터의 연속적인 수치를 예측하는 것을 의미하고 분류는 각 데이터가 어떤
#범주에 속하는지 나누는 것을 의미한다. 항등함수는 입력값이 그대로 출력되는 함수로 흔히 알고 있는 f(x) = x 를 의미한다.
#파이선 코드로는
def identity_function(x):
return x
#소프트맥스 함수는 자연상수를 밑수로 하는 지수함수로 이루어진 하나의 함수이다.
#소프트맥스 함수가 가지는 의미는 바로 시그모이드 함수를 일반화 한 것.
#이를 통해 각 클래스에 대한 확률을 계산 할 수도 있게 됨.
#시그모이드 함수를 일반화해서 각 클래스에 대한 확률을 계산 할 수 있다는 것은 모든 소프트맥스 함수의 출력값을 더하면 1이 나온다는 의미이다.
#소프트맥스 함수 출력값은 0과 1사이의 값이고 각각의 출력 값은 개별 출력 값에 대한 확률 값이기 때문에 전체 소프트맥스 함수의 합은 항상
#1이 되는 특별한 성질을 가진다.
#때문에 소프트 맥스 함수를 출력층의 활성함수로 사용하면 출력결과를 확률적으로 결론낼 수 있다.
#예를 들어
#y[0] = 0.018, y[1] = 0.245, y[2] = 0.737로 결과가 출력되었다면 1.8%의 확률로 0번 클래스, 24.5%의 확률로 1번 클래스, 73.7%의 확률로 2번
#클래스일 것이므로 2번 클래스일 확률이 가장 높고 따라서 답은 2번 클래스다. 라는 결과를 도출 할 수 있다.
#소프트맥스 함수를 이용해서 통계적(확률적)으로 문제를 대응할 수 있게 되는 것이다. \
#소프트맥스 함수는 단조 증가 함수인 지수함수 exp()를 기반으로 하므로 소프트맥스 함수의 출력값의 대소관계가 그대로 입력된 원소의 대소관계를 이어받는다.
#따라서 역으로 소프트맥스 함수를 통해 나온 출력값의 대소관계를 입력값의 대소관계로 판단해도 된다.
#그래서 신경망 학습과정에선 출력층의 활성함수로 소프트맥스 함수를 사용하고 학습된 모델을 이용해서 추론(분류 및 회귀)하는 과정에선 소프트맥스 함수를
#활성함수에서 생략해도 된다. 이러한 소프트맥스 함수의 구현엔 주의사항이 있다.
#지수함수는 입력값이 커지면 급격하게 무한히 증가한다. 이를 오버플로우(Overflow)라고 한다.
#입력값이 100인 exp(100)은 10의 40승이 넘는 수이다. 오버플로를 해결하기 위해선 해당 값을 전체 데이터 셋에서의 최대값으로 뺀 값으로 치환하는 방법을 사용한다.
#위 과정을 수식으로 나타낼 수 있다. [수식 13] P40
#소프트맥스 함수의 분모 분자에 C라는 상수를 곱해준다. 같은 상수값을 곱해주었으므로 전체 값엔 변화가 없다.
#그리고 여기에 지수함수와 로그함수의 성질 중 하나인 x = a ^ log(a,x)를 이용하여 상수 C를 exp() 함수 안으로 넣는다.
#그럼 상수 C는 exp() 함수 내에서 log(e,C) = ln C 로 변화되고 ln C를 상수 C` 로 받게 되면 아래의 수식으로 변형된다.
#파이선 코드
import numpy as np
a = np.array([1010,1000,990])
np.exp(a) / np.sum(np.exp(a)) #오버플로 발생
#변경된 softmax 함수식
c = np.max(a)
np.exp(a-c) / np.sum(np.exp(a-c)) #정상적으로 계산됨
#이처럼 같은 스케일의 변화는 아무런 결과값에 아무런 영향을 주지 않는 점을 이용해서 소프트맥스 함수의 오버플로 현상을 해결할 수 있다.
#이를 이용하여 소프트맥스 함수를 파이썬으로 구현하면 아래와 같다.
def softmax(a):
c=np.max(a)
exp_a = np.exp(a-c)
sum_exp_a = np.sum(exp_a)
y = exp_a / sum_exp_a
return y
#마지막으로 출력층의 노드 개수를 정하는 방법은 간단하다. 입력한 데이터의 클래스 갯수만큼 출력층의 노드 갯수를 정해주면 된다.
#다른 예로 개와 고양이를 분류하고 싶다면 개, 고양이 총 2개의 출력 노드를 만들면 된다.
#은닉층이 2개인 다층 신경망(보통 입력층을 제외한 층수로 신경망을 부른다. 따라서 이 경우는 3층 신경망)
#을 간단하게 파이선으로 코딩.
#이 신경망 모델은 출력층의 활성 함수로 항등함수로 정의한다.
#결과적으로 위 과정을 모두 합한 전체적인 은닉층이 2층인 다층 신경망의 파이썬 구현코드는 아래와 같다.
import numpy as np
#시그모이드 함수
def sigmoid(x):
return 1 / (1 + np.exp(-x))
#identify function 항등함수 사용
def identify_function(x):
return x
#신경망을 초기화. 여기서 가중치와 편향의 다차원 배열을 선언해준다.
def init_network():
network = {}
network['w1'] = np.array([[0.1,0.3,0.5],[0.2,0.4,0.6]])
network['b1'] = np.array([0.1,0.2,0.3])
network['w2'] = np.array([[0.1,0.4],[0.2,0.5],[0.3,0.6]])
network['b2'] = np.array([0.1,0.2])
network['w3'] = np.array([[0.1,0.3],[0.2,0.4]])
network['b3'] = np.array([0.1,0.2])
return network
#순전파 신경망 함수. 가중치와 편향을 입력받아 입력층과 은닉층의 활성함수는 시그모이드 함수로,
#출력층의 활성함수는 항등함수를 사용하는 3층 신경망을 함수로 구현
def forward(network,x):
w1,w2,w3 = network['w1'],network['w2'],network['w3']
b1,b2,b3 = network['b1'],network['bw'],network['b3']
a1 = np.dot(x,w1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1,w2) + b2
z2 = sigmoid(a2)
a3 = np.dot(z2,w3) + b3
y = identify_function(a3)
return y
network = init_network() #신경망의 가중치와 편향값 인스턴스화
x = np.array([1.0,0.5])
y = forward(network ,x)
print(y)
#단순한 신경망을 설계하는 것은 어렵지 않다. 다차원 배열을 잘 사용해서 가중치와 입력값 그리고 편향을 잘 도출해서 어떤 활성함수를 사용할지 정해서
#구현한 다음 구현한 활성함수에 값을 잘 넣어준 다음 이전 층의 출력값으로 잘 연결해서 원하는 층만큼 이어주면 된다.
| 36.707006 | 120 | 0.640986 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9,944 | 0.891599 |
8d7eb5aaefc17250eb9787e23ab1f5200d2d65f8 | 466 | py | Python | label_gen.py | avasid/gaze_detection | dbb76a2b3f3eedff5801b53bc95b3a95bc715bc5 | [
"MIT"
]
| 1 | 2020-02-07T21:34:10.000Z | 2020-02-07T21:34:10.000Z | label_gen.py | avasid/gaze_detection | dbb76a2b3f3eedff5801b53bc95b3a95bc715bc5 | [
"MIT"
]
| 8 | 2020-11-13T18:37:12.000Z | 2022-03-12T00:14:04.000Z | label_gen.py | avasid/gaze_detection | dbb76a2b3f3eedff5801b53bc95b3a95bc715bc5 | [
"MIT"
]
| null | null | null | import os
import pandas as pd
dictt = {}
i = 0
for label in ['down', 'up', 'left', 'right']:
img_lst = os.listdir("./data/img_data/" + label + "/")
temp_label = [0] * 4
temp_label[i] = 1
for img in img_lst:
print(label + " " + img)
dictt[img] = temp_label
i += 1
label_df = pd.DataFrame(data=dictt, index=['down', 'up', 'left', 'right']).transpose()
label_df = label_df.sample(frac=1)
label_df.to_csv("./data/label_data.csv")
| 23.3 | 86 | 0.592275 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 93 | 0.199571 |
8d7fb31d8d0c397a081d7685e96fa1bf8414f9a6 | 2,398 | py | Python | rubik_race/rubiks_race/solver_test.py | ZengLawrence/rubiks_race | 3d78484f0a68c7e483953cea68130f1edde2739a | [
"MIT"
]
| null | null | null | rubik_race/rubiks_race/solver_test.py | ZengLawrence/rubiks_race | 3d78484f0a68c7e483953cea68130f1edde2739a | [
"MIT"
]
| null | null | null | rubik_race/rubiks_race/solver_test.py | ZengLawrence/rubiks_race | 3d78484f0a68c7e483953cea68130f1edde2739a | [
"MIT"
]
| null | null | null | '''
Created on Jun 27, 2017
@author: lawrencezeng
'''
import unittest
from rubiks_race import solver
class Test(unittest.TestCase):
def setUp(self):
self.initial_position = [
['g', 'g', 'y', 'r', 'r' ],
['w', 'g', 'w', 'w', 'y' ],
['g', 'o', ' ', 'r', 'o' ],
['o', 'b', 'b', 'y', 'y' ],
['b', 'o', 'w', 'r', 'b' ]
]
self.pattern = [
['g', 'w', 'w'],
['g', 'o', 'r'],
['b', 'b', 'y']
]
final_positions = [
['g', 'g', 'y', 'r', 'r' ],
['w', 'g', 'w', 'w', 'y' ],
[' ', 'g', 'o', 'r', 'o' ],
['o', 'b', 'b', 'y', 'y' ],
['b', 'o', 'w', 'r', 'b' ]
]
moves = [
[[2, 1], [2, 2]],
[[2, 0], [2, 1]],
]
self.result = [final_positions, moves]
def tearDown(self):
return unittest.TestCase.tearDown(self)
def test_solve(self):
initial_position = [
['g', 'g', 'y', 'r', 'r' ],
['w', 'g', 'w', 'w', 'y' ],
['g', 'o', ' ', 'r', 'o' ],
['o', 'b', 'b', 'y', 'y' ],
['b', 'o', 'w', 'r', 'b' ]
]
pattern = [
['g', 'w', 'w'],
['g', 'o', 'r'],
['b', 'b', 'y']
]
final_positions = [
['g', 'g', 'y', 'r', 'r' ],
['w', 'g', 'w', 'w', 'y' ],
[' ', 'g', 'o', 'r', 'o' ],
['o', 'b', 'b', 'y', 'y' ],
['b', 'o', 'w', 'r', 'b' ]
]
moves = [
[[2, 1], [2, 2]],
[[2, 0], [2, 1]],
]
self.assertItemsEqual([final_positions, moves], solver.solve(initial_position, pattern))
def test_solve_pq(self):
self.assertItemsEqual(self.result, solver.solve_pq(self.initial_position, self.pattern))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.test_solve']
unittest.main()
| 31.142857 | 96 | 0.27648 | 2,183 | 0.910342 | 0 | 0 | 0 | 0 | 0 | 0 | 464 | 0.193495 |
8d80488b5bce65f6332a7212b2c16986023812ef | 1,625 | py | Python | wagtail_translation/migrations/0001_initial.py | patroqueeet/wagtail2-translation | 6a7ad4eea5d900c8640f965ebf7a442dd7bc7e74 | [
"MIT"
]
| null | null | null | wagtail_translation/migrations/0001_initial.py | patroqueeet/wagtail2-translation | 6a7ad4eea5d900c8640f965ebf7a442dd7bc7e74 | [
"MIT"
]
| null | null | null | wagtail_translation/migrations/0001_initial.py | patroqueeet/wagtail2-translation | 6a7ad4eea5d900c8640f965ebf7a442dd7bc7e74 | [
"MIT"
]
| 1 | 2021-01-08T19:25:46.000Z | 2021-01-08T19:25:46.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from modeltranslation import settings as mt_settings
from modeltranslation.utils import build_localized_fieldname, get_translation_fields
from django.db import migrations, models
def url_path_fix(apps, schema_editor):
# cannot use apps.get_model here
# because Page instances wouldn't have set_url_path method
from wagtail.core.models import Page
url_path_fields = get_translation_fields('url_path')
for page in Page.objects.order_by('path').iterator():
page.set_url_path(page.get_parent())
# make sure descendant page url paths are not updated at this point
# because it would fail
page.save(update_fields=url_path_fields)
class Migration(migrations.Migration):
"""
This migration fixes whatever pages you already have in DB
so that their titles and slugs in default language are not empty
and url_path field translations are updated accordingly.
"""
dependencies = [
('wagtailtranslation', '9999_wagtail_translation'),
]
operations = [
# 1. copy slugs and titles to corresponding default language fields
migrations.RunSQL(
['UPDATE wagtailcore_page SET {}=slug, {}=title'.format(
build_localized_fieldname('slug', mt_settings.DEFAULT_LANGUAGE),
build_localized_fieldname('title', mt_settings.DEFAULT_LANGUAGE))],
migrations.RunSQL.noop),
# 2. update url_path in all existing pages for all translations
migrations.RunPython(url_path_fix, migrations.RunPython.noop),
]
| 37.790698 | 84 | 0.715692 | 878 | 0.540308 | 0 | 0 | 0 | 0 | 0 | 0 | 659 | 0.405538 |
8d8293dd05c195d7acdf3af64d74eb27c71ed3fc | 99,195 | py | Python | WORC/WORC.py | MStarmans91/WORC | b6b8fc2ccb7d443a69b5ca20b1d6efb65b3f0fc7 | [
"ECL-2.0",
"Apache-2.0"
]
| 47 | 2018-01-28T14:08:15.000Z | 2022-03-24T16:10:07.000Z | WORC/WORC.py | JZK00/WORC | 14e8099835eccb35d49b52b97c0be64ecca3809c | [
"ECL-2.0",
"Apache-2.0"
]
| 13 | 2018-08-28T13:32:57.000Z | 2020-10-26T16:35:59.000Z | WORC/WORC.py | JZK00/WORC | 14e8099835eccb35d49b52b97c0be64ecca3809c | [
"ECL-2.0",
"Apache-2.0"
]
| 16 | 2017-11-13T10:53:36.000Z | 2022-03-18T17:02:04.000Z | #!/usr/bin/env python
# Copyright 2016-2021 Biomedical Imaging Group Rotterdam, Departments of
# Medical Informatics and Radiology, Erasmus MC, Rotterdam, The Netherlands
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import yaml
import fastr
import graphviz
import configparser
from pathlib import Path
from random import randint
import WORC.IOparser.file_io as io
from fastr.api import ResourceLimit
from WORC.tools.Slicer import Slicer
from WORC.tools.Elastix import Elastix
from WORC.tools.Evaluate import Evaluate
import WORC.addexceptions as WORCexceptions
import WORC.IOparser.config_WORC as config_io
from WORC.detectors.detectors import DebugDetector
from WORC.export.hyper_params_exporter import export_hyper_params_to_latex
from urllib.parse import urlparse
from urllib.request import url2pathname
class WORC(object):
"""Workflow for Optimal Radiomics Classification.
A Workflow for Optimal Radiomics Classification (WORC) object that
serves as a pipeline spawner and manager for optimizating radiomics
studies. Depending on the attributes set, the object will spawn an
appropriate pipeline and manage it.
Note that many attributes are lists and can therefore contain multiple
instances. For example, when providing two sequences per patient,
the "images" list contains two items. The type of items in the lists
is described below.
All objects that serve as source for your network, i.e. refer to
actual files to be used, should be formatted as fastr sources suited for
one of the fastr plugings, see also
http://fastr.readthedocs.io/en/stable/fastr.reference.html#ioplugin-reference
The objects should be lists of these fastr sources or dictionaries with the
sample ID's, e.g.
images_train = [{'Patient001': vfs://input/CT001.nii.gz,
'Patient002': vfs://input/CT002.nii.gz},
{'Patient001': vfs://input/MR001.nii.gz,
'Patient002': vfs://input/MR002.nii.gz}]
Attributes
------------------
name: String, default 'WORC'
name of the network.
configs: list, required
Configuration parameters, either ConfigParser objects
created through the defaultconfig function or paths of config .ini
files. (list, required)
labels: list, required
Paths to files containing patient labels (.txt files).
network: automatically generated
The FASTR network generated through the "build" function.
images: list, optional
Paths refering to the images used for Radiomics computation. Images
should be of the ITK Image type.
segmentations: list, optional
Paths refering to the segmentations used for Radiomics computation.
Segmentations should be of the ITK Image type.
semantics: semantic features per image type (list, optional)
masks: state which pixels of images are valid (list, optional)
features: input Radiomics features for classification (list, optional)
metadata: DICOM headers belonging to images (list, optional)
Elastix_Para: parameter files for Elastix (list, optional)
fastr_plugin: plugin to use for FASTR execution
fastr_tempdir: temporary directory to use for FASTR execution
additions: additional inputs for your network (dict, optional)
source_data: data to use as sources for FASTR (dict)
sink_data: data to use as sinks for FASTR (dict)
CopyMetadata: Boolean, default True
when using elastix, copy metadata from image to segmentation or not
"""
def __init__(self, name='test'):
"""Initialize WORC object.
Set the initial variables all to None, except for some defaults.
Arguments:
name: name of the nework (string, optional)
"""
self.name = 'WORC_' + name
# Initialize several objects
self.configs = list()
self.fastrconfigs = list()
self.images_train = list()
self.segmentations_train = list()
self.semantics_train = list()
self.labels_train = list()
self.masks_train = list()
self.masks_normalize_train = list()
self.features_train = list()
self.metadata_train = list()
self.images_test = list()
self.segmentations_test = list()
self.semantics_test = list()
self.labels_test = list()
self.masks_test = list()
self.masks_normalize_test = list()
self.features_test = list()
self.metadata_test = list()
self.Elastix_Para = list()
self.label_names = 'Label1, Label2'
self.fixedsplits = list()
# Set some defaults, name
self.fastr_plugin = 'LinearExecution'
if name == '':
name = [randint(0, 9) for p in range(0, 5)]
self.fastr_tmpdir = os.path.join(fastr.config.mounts['tmp'], self.name)
self.additions = dict()
self.CopyMetadata = True
self.segmode = []
self._add_evaluation = False
self.TrainTest = False
# Memory settings for all fastr nodes
self.fastr_memory_parameters = dict()
self.fastr_memory_parameters['FeatureCalculator'] = '14G'
self.fastr_memory_parameters['Classification'] = '6G'
self.fastr_memory_parameters['WORCCastConvert'] = '4G'
self.fastr_memory_parameters['Preprocessing'] = '4G'
self.fastr_memory_parameters['Elastix'] = '4G'
self.fastr_memory_parameters['Transformix'] = '4G'
self.fastr_memory_parameters['Segmentix'] = '6G'
self.fastr_memory_parameters['ComBat'] = '12G'
self.fastr_memory_parameters['PlotEstimator'] = '12G'
if DebugDetector().do_detection():
print(fastr.config)
def defaultconfig(self):
"""Generate a configparser object holding all default configuration values.
Returns:
config: configparser configuration file
"""
config = configparser.ConfigParser()
config.optionxform = str
# General configuration of WORC
config['General'] = dict()
config['General']['cross_validation'] = 'True'
config['General']['Segmentix'] = 'True'
config['General']['FeatureCalculators'] = '[predict/CalcFeatures:1.0, pyradiomics/Pyradiomics:1.0]'
config['General']['Preprocessing'] = 'worc/PreProcess:1.0'
config['General']['RegistrationNode'] = "elastix4.8/Elastix:4.8"
config['General']['TransformationNode'] = "elastix4.8/Transformix:4.8"
config['General']['Joblib_ncores'] = '1'
config['General']['Joblib_backend'] = 'threading'
config['General']['tempsave'] = 'False'
config['General']['AssumeSameImageAndMaskMetadata'] = 'False'
config['General']['ComBat'] = 'False'
# Options for the object/patient labels that are used
config['Labels'] = dict()
config['Labels']['label_names'] = 'Label1, Label2'
config['Labels']['modus'] = 'singlelabel'
config['Labels']['url'] = 'WIP'
config['Labels']['projectID'] = 'WIP'
# Preprocessing
config['Preprocessing'] = dict()
config['Preprocessing']['CheckSpacing'] = 'False'
config['Preprocessing']['Clipping'] = 'False'
config['Preprocessing']['Clipping_Range'] = '-1000.0, 3000.0'
config['Preprocessing']['Normalize'] = 'True'
config['Preprocessing']['Normalize_ROI'] = 'Full'
config['Preprocessing']['Method'] = 'z_score'
config['Preprocessing']['ROIDetermine'] = 'Provided'
config['Preprocessing']['ROIdilate'] = 'False'
config['Preprocessing']['ROIdilateradius'] = '10'
config['Preprocessing']['Resampling'] = 'False'
config['Preprocessing']['Resampling_spacing'] = '1, 1, 1'
config['Preprocessing']['BiasCorrection'] = 'False'
config['Preprocessing']['BiasCorrection_Mask'] = 'False'
config['Preprocessing']['CheckOrientation'] = 'False'
config['Preprocessing']['OrientationPrimaryAxis'] = 'axial'
# Segmentix
config['Segmentix'] = dict()
config['Segmentix']['mask'] = 'subtract'
config['Segmentix']['segtype'] = 'None'
config['Segmentix']['segradius'] = '5'
config['Segmentix']['N_blobs'] = '1'
config['Segmentix']['fillholes'] = 'True'
config['Segmentix']['remove_small_objects'] = 'False'
config['Segmentix']['min_object_size'] = '2'
# PREDICT - Feature calculation
# Determine which features are calculated
config['ImageFeatures'] = dict()
config['ImageFeatures']['shape'] = 'True'
config['ImageFeatures']['histogram'] = 'True'
config['ImageFeatures']['orientation'] = 'True'
config['ImageFeatures']['texture_Gabor'] = 'True'
config['ImageFeatures']['texture_LBP'] = 'True'
config['ImageFeatures']['texture_GLCM'] = 'True'
config['ImageFeatures']['texture_GLCMMS'] = 'True'
config['ImageFeatures']['texture_GLRLM'] = 'False'
config['ImageFeatures']['texture_GLSZM'] = 'False'
config['ImageFeatures']['texture_NGTDM'] = 'False'
config['ImageFeatures']['coliage'] = 'False'
config['ImageFeatures']['vessel'] = 'True'
config['ImageFeatures']['log'] = 'True'
config['ImageFeatures']['phase'] = 'True'
# Parameter settings for PREDICT feature calculation
# Defines only naming of modalities
config['ImageFeatures']['image_type'] = 'CT'
# Define frequencies for gabor filter in pixels
config['ImageFeatures']['gabor_frequencies'] = '0.05, 0.2, 0.5'
# Gabor, GLCM angles in degrees and radians, respectively
config['ImageFeatures']['gabor_angles'] = '0, 45, 90, 135'
config['ImageFeatures']['GLCM_angles'] = '0, 0.79, 1.57, 2.36'
# GLCM discretization levels, distances in pixels
config['ImageFeatures']['GLCM_levels'] = '16'
config['ImageFeatures']['GLCM_distances'] = '1, 3'
# LBP radius, number of points in pixels
config['ImageFeatures']['LBP_radius'] = '3, 8, 15'
config['ImageFeatures']['LBP_npoints'] = '12, 24, 36'
# Phase features minimal wavelength and number of scales
config['ImageFeatures']['phase_minwavelength'] = '3'
config['ImageFeatures']['phase_nscale'] = '5'
# Log features sigma of Gaussian in pixels
config['ImageFeatures']['log_sigma'] = '1, 5, 10'
# Vessel features scale range, steps for the range
config['ImageFeatures']['vessel_scale_range'] = '1, 10'
config['ImageFeatures']['vessel_scale_step'] = '2'
# Vessel features radius for erosion to determine boudnary
config['ImageFeatures']['vessel_radius'] = '5'
# Tags from which to extract features, and how to name them
config['ImageFeatures']['dicom_feature_tags'] = '0010 1010, 0010 0040'
config['ImageFeatures']['dicom_feature_labels'] = 'age, sex'
# PyRadiomics - Feature calculation
# Addition to the above, specifically for PyRadiomics
# Mostly based on specific MR Settings: see https://github.com/Radiomics/pyradiomics/blob/master/examples/exampleSettings/exampleMR_NoResampling.yaml
config['PyRadiomics'] = dict()
config['PyRadiomics']['geometryTolerance'] = '0.0001'
config['PyRadiomics']['normalize'] = 'False'
config['PyRadiomics']['normalizeScale'] = '100'
config['PyRadiomics']['resampledPixelSpacing'] = 'None'
config['PyRadiomics']['interpolator'] = 'sitkBSpline'
config['PyRadiomics']['preCrop'] = 'True'
config['PyRadiomics']['binCount'] = config['ImageFeatures']['GLCM_levels'] # BinWidth to sensitive for normalization, thus use binCount
config['PyRadiomics']['binWidth'] = 'None'
config['PyRadiomics']['force2D'] = 'False'
config['PyRadiomics']['force2Ddimension'] = '0' # axial slices, for coronal slices, use dimension 1 and for sagittal, dimension 2.
config['PyRadiomics']['voxelArrayShift'] = '300'
config['PyRadiomics']['Original'] = 'True'
config['PyRadiomics']['Wavelet'] = 'False'
config['PyRadiomics']['LoG'] = 'False'
if config['General']['Segmentix'] == 'True':
config['PyRadiomics']['label'] = '1'
else:
config['PyRadiomics']['label'] = '255'
# Enabled PyRadiomics features
config['PyRadiomics']['extract_firstorder'] = 'False'
config['PyRadiomics']['extract_shape'] = 'True'
config['PyRadiomics']['texture_GLCM'] = 'False'
config['PyRadiomics']['texture_GLRLM'] = 'True'
config['PyRadiomics']['texture_GLSZM'] = 'True'
config['PyRadiomics']['texture_GLDM'] = 'True'
config['PyRadiomics']['texture_NGTDM'] = 'True'
# ComBat Feature Harmonization
config['ComBat'] = dict()
config['ComBat']['language'] = 'python'
config['ComBat']['batch'] = 'Hospital'
config['ComBat']['mod'] = '[]'
config['ComBat']['par'] = '1'
config['ComBat']['eb'] = '1'
config['ComBat']['per_feature'] = '0'
config['ComBat']['excluded_features'] = 'sf_, of_, semf_, pf_'
config['ComBat']['matlab'] = 'C:\\Program Files\\MATLAB\\R2015b\\bin\\matlab.exe'
# Feature OneHotEncoding
config['OneHotEncoding'] = dict()
config['OneHotEncoding']['Use'] = 'False'
config['OneHotEncoding']['feature_labels_tofit'] = ''
# Feature imputation
config['Imputation'] = dict()
config['Imputation']['use'] = 'True'
config['Imputation']['strategy'] = 'mean, median, most_frequent, constant, knn'
config['Imputation']['n_neighbors'] = '5, 5'
# Feature scaling options
config['FeatureScaling'] = dict()
config['FeatureScaling']['scaling_method'] = 'robust_z_score'
config['FeatureScaling']['skip_features'] = 'semf_, pf_'
# Feature preprocessing before the whole HyperOptimization
config['FeatPreProcess'] = dict()
config['FeatPreProcess']['Use'] = 'False'
config['FeatPreProcess']['Combine'] = 'False'
config['FeatPreProcess']['Combine_method'] = 'mean'
# Feature selection
config['Featsel'] = dict()
config['Featsel']['Variance'] = '1.0'
config['Featsel']['GroupwiseSearch'] = 'True'
config['Featsel']['SelectFromModel'] = '0.275'
config['Featsel']['SelectFromModel_estimator'] = 'Lasso, LR, RF'
config['Featsel']['SelectFromModel_lasso_alpha'] = '0.1, 1.4'
config['Featsel']['SelectFromModel_n_trees'] = '10, 90'
config['Featsel']['UsePCA'] = '0.275'
config['Featsel']['PCAType'] = '95variance, 10, 50, 100'
config['Featsel']['StatisticalTestUse'] = '0.275'
config['Featsel']['StatisticalTestMetric'] = 'MannWhitneyU'
config['Featsel']['StatisticalTestThreshold'] = '-3, 2.5'
config['Featsel']['ReliefUse'] = '0.275'
config['Featsel']['ReliefNN'] = '2, 4'
config['Featsel']['ReliefSampleSize'] = '0.75, 0.2'
config['Featsel']['ReliefDistanceP'] = '1, 3'
config['Featsel']['ReliefNumFeatures'] = '10, 40'
# Groupwise Featureselection options
config['SelectFeatGroup'] = dict()
config['SelectFeatGroup']['shape_features'] = 'True, False'
config['SelectFeatGroup']['histogram_features'] = 'True, False'
config['SelectFeatGroup']['orientation_features'] = 'True, False'
config['SelectFeatGroup']['texture_Gabor_features'] = 'True, False'
config['SelectFeatGroup']['texture_GLCM_features'] = 'True, False'
config['SelectFeatGroup']['texture_GLDM_features'] = 'True, False'
config['SelectFeatGroup']['texture_GLCMMS_features'] = 'True, False'
config['SelectFeatGroup']['texture_GLRLM_features'] = 'True, False'
config['SelectFeatGroup']['texture_GLSZM_features'] = 'True, False'
config['SelectFeatGroup']['texture_GLDZM_features'] = 'True, False'
config['SelectFeatGroup']['texture_NGTDM_features'] = 'True, False'
config['SelectFeatGroup']['texture_NGLDM_features'] = 'True, False'
config['SelectFeatGroup']['texture_LBP_features'] = 'True, False'
config['SelectFeatGroup']['dicom_features'] = 'False'
config['SelectFeatGroup']['semantic_features'] = 'False'
config['SelectFeatGroup']['coliage_features'] = 'False'
config['SelectFeatGroup']['vessel_features'] = 'True, False'
config['SelectFeatGroup']['phase_features'] = 'True, False'
config['SelectFeatGroup']['fractal_features'] = 'True, False'
config['SelectFeatGroup']['location_features'] = 'True, False'
config['SelectFeatGroup']['rgrd_features'] = 'True, False'
# Select features per toolbox, or simply all
config['SelectFeatGroup']['toolbox'] = 'All, PREDICT, PyRadiomics'
# Select original features, or after transformation of feature space
config['SelectFeatGroup']['original_features'] = 'True'
config['SelectFeatGroup']['wavelet_features'] = 'True, False'
config['SelectFeatGroup']['log_features'] = 'True, False'
# Resampling options
config['Resampling'] = dict()
config['Resampling']['Use'] = '0.20'
config['Resampling']['Method'] =\
'RandomUnderSampling, RandomOverSampling, NearMiss, ' +\
'NeighbourhoodCleaningRule, ADASYN, BorderlineSMOTE, SMOTE, ' +\
'SMOTEENN, SMOTETomek'
config['Resampling']['sampling_strategy'] = 'auto, majority, minority, not minority, not majority, all'
config['Resampling']['n_neighbors'] = '3, 12'
config['Resampling']['k_neighbors'] = '5, 15'
config['Resampling']['threshold_cleaning'] = '0.25, 0.5'
# Classification
config['Classification'] = dict()
config['Classification']['fastr'] = 'True'
config['Classification']['fastr_plugin'] = self.fastr_plugin
config['Classification']['classifiers'] =\
'SVM, RF, LR, LDA, QDA, GaussianNB, ' +\
'AdaBoostClassifier, ' +\
'XGBClassifier'
config['Classification']['max_iter'] = '100000'
config['Classification']['SVMKernel'] = 'linear, poly, rbf'
config['Classification']['SVMC'] = '0, 6'
config['Classification']['SVMdegree'] = '1, 6'
config['Classification']['SVMcoef0'] = '0, 1'
config['Classification']['SVMgamma'] = '-5, 5'
config['Classification']['RFn_estimators'] = '10, 90'
config['Classification']['RFmin_samples_split'] = '2, 3'
config['Classification']['RFmax_depth'] = '5, 5'
config['Classification']['LRpenalty'] = 'l1, l2, elasticnet'
config['Classification']['LRC'] = '0.01, 0.99'
config['Classification']['LR_solver'] = 'lbfgs, saga'
config['Classification']['LR_l1_ratio'] = '0, 1'
config['Classification']['LDA_solver'] = 'svd, lsqr, eigen'
config['Classification']['LDA_shrinkage'] = '-5, 5'
config['Classification']['QDA_reg_param'] = '-5, 5'
config['Classification']['ElasticNet_alpha'] = '-5, 5'
config['Classification']['ElasticNet_l1_ratio'] = '0, 1'
config['Classification']['SGD_alpha'] = '-5, 5'
config['Classification']['SGD_l1_ratio'] = '0, 1'
config['Classification']['SGD_loss'] = 'squared_loss, huber, epsilon_insensitive, squared_epsilon_insensitive'
config['Classification']['SGD_penalty'] = 'none, l2, l1'
config['Classification']['CNB_alpha'] = '0, 1'
config['Classification']['AdaBoost_n_estimators'] = config['Classification']['RFn_estimators']
config['Classification']['AdaBoost_learning_rate'] = '0.01, 0.99'
# Based on https://towardsdatascience.com/doing-xgboost-hyper-parameter-tuning-the-smart-way-part-1-of-2-f6d255a45dde
# and https://www.analyticsvidhya.com/blog/2016/03/complete-guide-parameter-tuning-xgboost-with-codes-python/
# and https://medium.com/data-design/xgboost-hi-im-gamma-what-can-i-do-for-you-and-the-tuning-of-regularization-a42ea17e6ab6
config['Classification']['XGB_boosting_rounds'] = config['Classification']['RFn_estimators']
config['Classification']['XGB_max_depth'] = '3, 12'
config['Classification']['XGB_learning_rate'] = config['Classification']['AdaBoost_learning_rate']
config['Classification']['XGB_gamma'] = '0.01, 9.99'
config['Classification']['XGB_min_child_weight'] = '1, 6'
config['Classification']['XGB_colsample_bytree'] = '0.3, 0.7'
# CrossValidation
config['CrossValidation'] = dict()
config['CrossValidation']['Type'] = 'random_split'
config['CrossValidation']['N_iterations'] = '100'
config['CrossValidation']['test_size'] = '0.2'
config['CrossValidation']['fixed_seed'] = 'False'
# Hyperparameter optimization options
config['HyperOptimization'] = dict()
config['HyperOptimization']['scoring_method'] = 'f1_weighted'
config['HyperOptimization']['test_size'] = '0.2'
config['HyperOptimization']['n_splits'] = '5'
config['HyperOptimization']['N_iterations'] = '1000'
config['HyperOptimization']['n_jobspercore'] = '200' # only relevant when using fastr in classification
config['HyperOptimization']['maxlen'] = '100'
config['HyperOptimization']['ranking_score'] = 'test_score'
config['HyperOptimization']['memory'] = '3G'
config['HyperOptimization']['refit_workflows'] = 'False'
# Ensemble options
config['Ensemble'] = dict()
config['Ensemble']['Use'] = '100'
config['Ensemble']['Metric'] = 'Default'
# Evaluation options
config['Evaluation'] = dict()
config['Evaluation']['OverfitScaler'] = 'False'
# Bootstrap options
config['Bootstrap'] = dict()
config['Bootstrap']['Use'] = 'False'
config['Bootstrap']['N_iterations'] = '1000'
return config
def add_tools(self):
"""Add several tools to the WORC object."""
self.Tools = Tools()
def build(self, wtype='training'):
"""Build the network based on the given attributes.
Parameters
----------
wtype: string, default 'training'
Specify the WORC execution type.
- testing: use if you have a trained classifier and want to
train it on some new images.
- training: use if you want to train a classifier from a dataset.
"""
self.wtype = wtype
if wtype == 'training':
self.build_training()
elif wtype == 'testing':
self.build_testing()
def build_training(self):
"""Build the training network based on the given attributes."""
# We either need images or features for Radiomics
if self.images_test or self.features_test:
self.TrainTest = True
if self.images_train or self.features_train:
print('Building training network...')
# We currently require labels for supervised learning
if self.labels_train:
if not self.configs:
print("No configuration given, assuming default")
if self.images_train:
self.configs = [self.defaultconfig()] * len(self.images_train)
else:
self.configs = [self.defaultconfig()] * len(self.features_train)
self.network = fastr.create_network(self.name)
# BUG: We currently use the first configuration as general config
image_types = list()
for c in range(len(self.configs)):
if type(self.configs[c]) == str:
# Probably, c is a configuration file
self.configs[c] = config_io.load_config(self.configs[c])
image_types.append(self.configs[c]['ImageFeatures']['image_type'])
# Create config source
self.source_class_config = self.network.create_source('ParameterFile', id='config_classification_source', node_group='conf', step_id='general_sources')
# Classification tool and label source
self.source_patientclass_train = self.network.create_source('PatientInfoFile', id='patientclass_train', node_group='pctrain', step_id='train_sources')
if self.labels_test:
self.source_patientclass_test = self.network.create_source('PatientInfoFile', id='patientclass_test', node_group='pctest', step_id='test_sources')
memory = self.fastr_memory_parameters['Classification']
self.classify = self.network.create_node('worc/TrainClassifier:1.0',
tool_version='1.0',
id='classify',
resources=ResourceLimit(memory=memory),
step_id='WorkflowOptimization')
if self.fixedsplits:
self.fixedsplits_node = self.network.create_source('CSVFile', id='fixedsplits_source', node_group='conf', step_id='general_sources')
self.classify.inputs['fixedsplits'] = self.fixedsplits_node.output
self.source_Ensemble =\
self.network.create_constant('String', [self.configs[0]['Ensemble']['Use']],
id='Ensemble',
step_id='Evaluation')
self.source_LabelType =\
self.network.create_constant('String', [self.configs[0]['Labels']['label_names']],
id='LabelType',
step_id='Evaluation')
memory = self.fastr_memory_parameters['PlotEstimator']
self.plot_estimator =\
self.network.create_node('worc/PlotEstimator:1.0', tool_version='1.0',
id='plot_Estimator',
resources=ResourceLimit(memory=memory),
step_id='Evaluation')
# Outputs
self.sink_classification = self.network.create_sink('HDF5', id='classification', step_id='general_sinks')
self.sink_performance = self.network.create_sink('JsonFile', id='performance', step_id='general_sinks')
self.sink_class_config = self.network.create_sink('ParameterFile', id='config_classification_sink', node_group='conf', step_id='general_sinks')
# Links
self.sink_class_config.input = self.source_class_config.output
self.link_class_1 = self.network.create_link(self.source_class_config.output, self.classify.inputs['config'])
self.link_class_2 = self.network.create_link(self.source_patientclass_train.output, self.classify.inputs['patientclass_train'])
self.link_class_1.collapse = 'conf'
self.link_class_2.collapse = 'pctrain'
self.plot_estimator.inputs['ensemble'] = self.source_Ensemble.output
self.plot_estimator.inputs['label_type'] = self.source_LabelType.output
if self.labels_test:
pinfo = self.source_patientclass_test.output
else:
pinfo = self.source_patientclass_train.output
self.plot_estimator.inputs['prediction'] = self.classify.outputs['classification']
self.plot_estimator.inputs['pinfo'] = pinfo
if self.TrainTest:
# FIXME: the naming here is ugly
self.link_class_3 = self.network.create_link(self.source_patientclass_test.output, self.classify.inputs['patientclass_test'])
self.link_class_3.collapse = 'pctest'
self.sink_classification.input = self.classify.outputs['classification']
self.sink_performance.input = self.plot_estimator.outputs['output_json']
if self.masks_normalize_train:
self.sources_masks_normalize_train = dict()
if self.masks_normalize_test:
self.sources_masks_normalize_test = dict()
# -----------------------------------------------------
# Optionally, add ComBat Harmonization. Currently done
# on full dataset, not in a cross-validation
if self.configs[0]['General']['ComBat'] == 'True':
self.add_ComBat()
if not self.features_train:
# Create nodes to compute features
# General
self.sources_parameters = dict()
self.source_config_pyradiomics = dict()
self.source_toolbox_name = dict()
# Training only
self.calcfeatures_train = dict()
self.featureconverter_train = dict()
self.preprocessing_train = dict()
self.sources_images_train = dict()
self.sinks_features_train = dict()
self.converters_im_train = dict()
self.converters_seg_train = dict()
self.links_C1_train = dict()
self.featurecalculators = dict()
if self.TrainTest:
# A test set is supplied, for which nodes also need to be created
self.calcfeatures_test = dict()
self.featureconverter_test = dict()
self.preprocessing_test = dict()
self.sources_images_test = dict()
self.sinks_features_test = dict()
self.converters_im_test = dict()
self.converters_seg_test = dict()
self.links_C1_test = dict()
# Check which nodes are necessary
if not self.segmentations_train:
message = "No automatic segmentation method is yet implemented."
raise WORCexceptions.WORCNotImplementedError(message)
elif len(self.segmentations_train) == len(image_types):
# Segmentations provided
self.sources_segmentations_train = dict()
self.sources_segmentations_test = dict()
self.segmode = 'Provided'
elif len(self.segmentations_train) == 1:
# Assume segmentations need to be registered to other modalities
print('\t - Adding Elastix node for image registration.')
self.add_elastix_sourcesandsinks()
pass
else:
nseg = len(self.segmentations_train)
nim = len(image_types)
m = f'Length of segmentations for training is ' +\
f'{nseg}: should be equal to number of images' +\
f' ({nim}) or 1 when using registration.'
raise WORCexceptions.WORCValueError(m)
# BUG: We assume that first type defines if we use segmentix
if self.configs[0]['General']['Segmentix'] == 'True':
# Use the segmentix toolbox for segmentation processing
print('\t - Adding segmentix node for segmentation preprocessing.')
self.sinks_segmentations_segmentix_train = dict()
self.sources_masks_train = dict()
self.converters_masks_train = dict()
self.nodes_segmentix_train = dict()
if self.TrainTest:
# Also use segmentix on the tes set
self.sinks_segmentations_segmentix_test = dict()
self.sources_masks_test = dict()
self.converters_masks_test = dict()
self.nodes_segmentix_test = dict()
if self.semantics_train:
# Semantic features are supplied
self.sources_semantics_train = dict()
if self.metadata_train:
# Metadata to extract patient features from is supplied
self.sources_metadata_train = dict()
if self.semantics_test:
# Semantic features are supplied
self.sources_semantics_test = dict()
if self.metadata_test:
# Metadata to extract patient features from is supplied
self.sources_metadata_test = dict()
# Create a part of the pipeline for each modality
self.modlabels = list()
for nmod, mod in enumerate(image_types):
# Create label for each modality/image
num = 0
label = mod + '_' + str(num)
while label in self.calcfeatures_train.keys():
# if label already exists, add number to label
num += 1
label = mod + '_' + str(num)
self.modlabels.append(label)
# Create required sources and sinks
self.sources_parameters[label] = self.network.create_source('ParameterFile', id='config_' + label, step_id='general_sources')
self.sources_images_train[label] = self.network.create_source('ITKImageFile', id='images_train_' + label, node_group='train', step_id='train_sources')
if self.TrainTest:
self.sources_images_test[label] = self.network.create_source('ITKImageFile', id='images_test_' + label, node_group='test', step_id='test_sources')
if self.metadata_train and len(self.metadata_train) >= nmod + 1:
self.sources_metadata_train[label] = self.network.create_source('DicomImageFile', id='metadata_train_' + label, node_group='train', step_id='train_sources')
if self.metadata_test and len(self.metadata_test) >= nmod + 1:
self.sources_metadata_test[label] = self.network.create_source('DicomImageFile', id='metadata_test_' + label, node_group='test', step_id='test_sources')
if self.masks_train and len(self.masks_train) >= nmod + 1:
# Create mask source and convert
self.sources_masks_train[label] = self.network.create_source('ITKImageFile', id='mask_train_' + label, node_group='train', step_id='train_sources')
memory = self.fastr_memory_parameters['WORCCastConvert']
self.converters_masks_train[label] =\
self.network.create_node('worc/WORCCastConvert:0.3.2',
tool_version='0.1',
id='convert_mask_train_' + label,
node_group='train',
resources=ResourceLimit(memory=memory),
step_id='FileConversion')
self.converters_masks_train[label].inputs['image'] = self.sources_masks_train[label].output
if self.masks_test and len(self.masks_test) >= nmod + 1:
# Create mask source and convert
self.sources_masks_test[label] = self.network.create_source('ITKImageFile', id='mask_test_' + label, node_group='test', step_id='test_sources')
memory = self.fastr_memory_parameters['WORCCastConvert']
self.converters_masks_test[label] =\
self.network.create_node('worc/WORCCastConvert:0.3.2',
tool_version='0.1',
id='convert_mask_test_' + label,
node_group='test',
resources=ResourceLimit(memory=memory),
step_id='FileConversion')
self.converters_masks_test[label].inputs['image'] = self.sources_masks_test[label].output
# First convert the images
if any(modality in mod for modality in ['MR', 'CT', 'MG', 'PET']):
# Use WORC PXCastConvet for converting image formats
memory = self.fastr_memory_parameters['WORCCastConvert']
self.converters_im_train[label] =\
self.network.create_node('worc/WORCCastConvert:0.3.2',
tool_version='0.1',
id='convert_im_train_' + label,
resources=ResourceLimit(memory=memory),
step_id='FileConversion')
if self.TrainTest:
self.converters_im_test[label] =\
self.network.create_node('worc/WORCCastConvert:0.3.2',
tool_version='0.1',
id='convert_im_test_' + label,
resources=ResourceLimit(memory=memory),
step_id='FileConversion')
else:
raise WORCexceptions.WORCTypeError(('No valid image type for modality {}: {} provided.').format(str(nmod), mod))
# Create required links
self.converters_im_train[label].inputs['image'] = self.sources_images_train[label].output
if self.TrainTest:
self.converters_im_test[label].inputs['image'] = self.sources_images_test[label].output
# -----------------------------------------------------
# Preprocessing
preprocess_node = str(self.configs[nmod]['General']['Preprocessing'])
print('\t - Adding preprocessing node for image preprocessing.')
self.add_preprocessing(preprocess_node, label, nmod)
# -----------------------------------------------------
# Feature calculation
feature_calculators =\
self.configs[nmod]['General']['FeatureCalculators']
feature_calculators = feature_calculators.strip('][').split(', ')
self.featurecalculators[label] = [f.split('/')[0] for f in feature_calculators]
# Add lists for feature calculation and converter objects
self.calcfeatures_train[label] = list()
self.featureconverter_train[label] = list()
if self.TrainTest:
self.calcfeatures_test[label] = list()
self.featureconverter_test[label] = list()
for f in feature_calculators:
print(f'\t - Adding feature calculation node: {f}.')
self.add_feature_calculator(f, label, nmod)
# -----------------------------------------------------
# Create the neccesary nodes for the segmentation
if self.segmode == 'Provided':
# Segmentation ----------------------------------------------------
# Use the provided segmantions for each modality
memory = self.fastr_memory_parameters['WORCCastConvert']
self.sources_segmentations_train[label] =\
self.network.create_source('ITKImageFile',
id='segmentations_train_' + label,
node_group='train',
step_id='train_sources')
self.converters_seg_train[label] =\
self.network.create_node('worc/WORCCastConvert:0.3.2',
tool_version='0.1',
id='convert_seg_train_' + label,
resources=ResourceLimit(memory=memory),
step_id='FileConversion')
self.converters_seg_train[label].inputs['image'] =\
self.sources_segmentations_train[label].output
if self.TrainTest:
self.sources_segmentations_test[label] =\
self.network.create_source('ITKImageFile',
id='segmentations_test_' + label,
node_group='test',
step_id='test_sources')
self.converters_seg_test[label] =\
self.network.create_node('worc/WORCCastConvert:0.3.2',
tool_version='0.1',
id='convert_seg_test_' + label,
resources=ResourceLimit(memory=memory),
step_id='FileConversion')
self.converters_seg_test[label].inputs['image'] =\
self.sources_segmentations_test[label].output
elif self.segmode == 'Register':
# ---------------------------------------------
# Registration nodes: Align segmentation of first
# modality to others using registration ith Elastix
self.add_elastix(label, nmod)
# -----------------------------------------------------
# Optionally, add segmentix, the in-house segmentation
# processor of WORC
if self.configs[nmod]['General']['Segmentix'] == 'True':
self.add_segmentix(label, nmod)
elif self.configs[nmod]['Preprocessing']['Resampling'] == 'True':
raise WORCexceptions.WORCValueError('If you use resampling, ' +
'have to use segmentix to ' +
' make sure the mask is ' +
'also resampled. Please ' +
'set ' +
'config["General"]["Segmentix"]' +
'to "True".')
else:
# Provide source or elastix segmentations to
# feature calculator
for i_node in range(len(self.calcfeatures_train[label])):
if self.segmode == 'Provided':
self.calcfeatures_train[label][i_node].inputs['segmentation'] =\
self.converters_seg_train[label].outputs['image']
elif self.segmode == 'Register':
if nmod > 0:
self.calcfeatures_train[label][i_node].inputs['segmentation'] =\
self.transformix_seg_nodes_train[label].outputs['image']
else:
self.calcfeatures_train[label][i_node].inputs['segmentation'] =\
self.converters_seg_train[label].outputs['image']
if self.TrainTest:
if self.segmode == 'Provided':
self.calcfeatures_test[label][i_node].inputs['segmentation'] =\
self.converters_seg_test[label].outputs['image']
elif self.segmode == 'Register':
if nmod > 0:
self.calcfeatures_test[label][i_node].inputs['segmentation'] =\
self.transformix_seg_nodes_test[label].outputs['image']
else:
self.calcfeatures_test[label][i_node].inputs['segmentation'] =\
self.converters_seg_test[label].outputs['image']
# -----------------------------------------------------
# Optionally, add ComBat Harmonization
if self.configs[0]['General']['ComBat'] == 'True':
# Link features to ComBat
self.links_Combat1_train[label] = list()
for i_node, fname in enumerate(self.featurecalculators[label]):
self.links_Combat1_train[label].append(self.ComBat.inputs['features_train'][f'{label}_{self.featurecalculators[label][i_node]}'] << self.featureconverter_train[label][i_node].outputs['feat_out'])
self.links_Combat1_train[label][i_node].collapse = 'train'
if self.TrainTest:
self.links_Combat1_test[label] = list()
for i_node, fname in enumerate(self.featurecalculators[label]):
self.links_Combat1_test[label].append(self.ComBat.inputs['features_test'][f'{label}_{self.featurecalculators[label][i_node]}'] << self.featureconverter_test[label][i_node].outputs['feat_out'])
self.links_Combat1_test[label][i_node].collapse = 'test'
# -----------------------------------------------------
# Classification nodes
# Add the features from this modality to the classifier node input
self.links_C1_train[label] = list()
self.sinks_features_train[label] = list()
if self.TrainTest:
self.links_C1_test[label] = list()
self.sinks_features_test[label] = list()
for i_node, fname in enumerate(self.featurecalculators[label]):
# Create sink for feature outputs
self.sinks_features_train[label].append(self.network.create_sink('HDF5', id='features_train_' + label + '_' + fname, step_id='train_sinks'))
# Append features to the classification
if not self.configs[0]['General']['ComBat'] == 'True':
self.links_C1_train[label].append(self.classify.inputs['features_train'][f'{label}_{self.featurecalculators[label][i_node]}'] << self.featureconverter_train[label][i_node].outputs['feat_out'])
self.links_C1_train[label][i_node].collapse = 'train'
# Save output
self.sinks_features_train[label][i_node].input = self.featureconverter_train[label][i_node].outputs['feat_out']
# Similar for testing workflow
if self.TrainTest:
# Create sink for feature outputs
self.sinks_features_test[label].append(self.network.create_sink('HDF5', id='features_test_' + label + '_' + fname, step_id='test_sinks'))
# Append features to the classification
if not self.configs[0]['General']['ComBat'] == 'True':
self.links_C1_test[label].append(self.classify.inputs['features_test'][f'{label}_{self.featurecalculators[label][i_node]}'] << self.featureconverter_test[label][i_node].outputs['feat_out'])
self.links_C1_test[label][i_node].collapse = 'test'
# Save output
self.sinks_features_test[label][i_node].input = self.featureconverter_test[label][i_node].outputs['feat_out']
else:
# Features already provided: hence we can skip numerous nodes
self.sources_features_train = dict()
self.links_C1_train = dict()
if self.features_test:
self.sources_features_test = dict()
self.links_C1_test = dict()
# Create label for each modality/image
self.modlabels = list()
for num, mod in enumerate(image_types):
num = 0
label = mod + str(num)
while label in self.sources_features_train.keys():
# if label exists, add number to label
num += 1
label = mod + str(num)
self.modlabels.append(label)
# Create a node for the feature computation
self.sources_features_train[label] = self.network.create_source('HDF5', id='features_train_' + label, node_group='train', step_id='train_sources')
# Add the features from this modality to the classifier node input
self.links_C1_train[label] = self.classify.inputs['features_train'][str(label)] << self.sources_features_train[label].output
self.links_C1_train[label].collapse = 'train'
if self.features_test:
# Create a node for the feature computation
self.sources_features_test[label] = self.network.create_source('HDF5', id='features_test_' + label, node_group='test', step_id='test_sources')
# Add the features from this modality to the classifier node input
self.links_C1_test[label] = self.classify.inputs['features_test'][str(label)] << self.sources_features_test[label].output
self.links_C1_test[label].collapse = 'test'
else:
raise WORCexceptions.WORCIOError("Please provide labels.")
else:
raise WORCexceptions.WORCIOError("Please provide either images or features.")
def add_ComBat(self):
"""Add ComBat harmonization to the network.
Note: applied on all objects, not in a train-test or cross-val setting.
"""
memory = self.fastr_memory_parameters['ComBat']
self.ComBat =\
self.network.create_node('combat/ComBat:1.0',
tool_version='1.0',
id='ComBat',
resources=ResourceLimit(memory=memory),
step_id='ComBat')
# Create sink for ComBat output
self.sinks_features_train_ComBat = self.network.create_sink('HDF5', id='features_train_ComBat', step_id='ComBat')
# Create links for inputs
self.link_combat_1 = self.network.create_link(self.source_class_config.output, self.ComBat.inputs['config'])
self.link_combat_2 = self.network.create_link(self.source_patientclass_train.output, self.ComBat.inputs['patientclass_train'])
self.link_combat_1.collapse = 'conf'
self.link_combat_2.collapse = 'pctrain'
self.links_Combat1_train = dict()
self.links_Combat1_test = dict()
# Link Combat output to both sink and classify node
self.links_Combat_out_train = self.network.create_link(self.ComBat.outputs['features_train_out'], self.classify.inputs['features_train'])
self.links_Combat_out_train.collapse = 'ComBat'
self.sinks_features_train_ComBat.input = self.ComBat.outputs['features_train_out']
if self.TrainTest:
# Create sink for ComBat output
self.sinks_features_test_ComBat = self.network.create_sink('HDF5', id='features_test_ComBat', step_id='ComBat')
# Create links for inputs
self.link_combat_3 = self.network.create_link(self.source_patientclass_test.output, self.ComBat.inputs['patientclass_test'])
self.link_combat_3.collapse = 'pctest'
# Link Combat output to both sink and classify node
self.links_Combat_out_test = self.network.create_link(self.ComBat.outputs['features_test_out'], self.classify.inputs['features_test'])
self.links_Combat_out_test.collapse = 'ComBat'
self.sinks_features_test_ComBat.input = self.ComBat.outputs['features_test_out']
def add_preprocessing(self, preprocess_node, label, nmod):
"""Add nodes required for preprocessing of images."""
memory = self.fastr_memory_parameters['Preprocessing']
self.preprocessing_train[label] = self.network.create_node(preprocess_node, tool_version='1.0', id='preprocessing_train_' + label, resources=ResourceLimit(memory=memory), step_id='Preprocessing')
if self.TrainTest:
self.preprocessing_test[label] = self.network.create_node(preprocess_node, tool_version='1.0', id='preprocessing_test_' + label, resources=ResourceLimit(memory=memory), step_id='Preprocessing')
# Create required links
self.preprocessing_train[label].inputs['parameters'] = self.sources_parameters[label].output
self.preprocessing_train[label].inputs['image'] = self.converters_im_train[label].outputs['image']
if self.TrainTest:
self.preprocessing_test[label].inputs['parameters'] = self.sources_parameters[label].output
self.preprocessing_test[label].inputs['image'] = self.converters_im_test[label].outputs['image']
if self.metadata_train and len(self.metadata_train) >= nmod + 1:
self.preprocessing_train[label].inputs['metadata'] = self.sources_metadata_train[label].output
if self.metadata_test and len(self.metadata_test) >= nmod + 1:
self.preprocessing_test[label].inputs['metadata'] = self.sources_metadata_test[label].output
# If there are masks to use in normalization, add them here
if self.masks_normalize_train:
self.sources_masks_normalize_train[label] = self.network.create_source('ITKImageFile', id='masks_normalize_train_' + label, node_group='train', step_id='Preprocessing')
self.preprocessing_train[label].inputs['mask'] = self.sources_masks_normalize_train[label].output
if self.masks_normalize_test:
self.sources_masks_normalize_test[label] = self.network.create_source('ITKImageFile', id='masks_normalize_test_' + label, node_group='test', step_id='Preprocessing')
self.preprocessing_test[label].inputs['mask'] = self.sources_masks_normalize_test[label].output
def add_feature_calculator(self, calcfeat_node, label, nmod):
"""Add a feature calculation node to the network."""
# Name of fastr node has to exclude some specific symbols, which
# are used in the node name
node_ID = '_'.join([calcfeat_node.replace(':', '_').replace('.', '_').replace('/', '_'),
label])
memory = self.fastr_memory_parameters['FeatureCalculator']
node_train =\
self.network.create_node(calcfeat_node,
tool_version='1.0',
id='calcfeatures_train_' + node_ID,
resources=ResourceLimit(memory=memory),
step_id='Feature_Extraction')
if self.TrainTest:
node_test =\
self.network.create_node(calcfeat_node,
tool_version='1.0',
id='calcfeatures_test_' + node_ID,
resources=ResourceLimit(memory=memory),
step_id='Feature_Extraction')
# Check if we need to add pyradiomics specific sources
if 'pyradiomics' in calcfeat_node.lower():
# Add a config source
self.source_config_pyradiomics[label] =\
self.network.create_source('YamlFile',
id='config_pyradiomics_' + label,
node_group='train',
step_id='Feature_Extraction')
# Add a format source, which we are going to set to a constant
# And attach to the tool node
self.source_format_pyradiomics =\
self.network.create_constant('String', 'csv',
id='format_pyradiomics_' + label,
node_group='train',
step_id='Feature_Extraction')
node_train.inputs['format'] =\
self.source_format_pyradiomics.output
if self.TrainTest:
node_test.inputs['format'] =\
self.source_format_pyradiomics.output
# Create required links
# We can have a different config for different tools
if 'pyradiomics' in calcfeat_node.lower():
node_train.inputs['parameters'] =\
self.source_config_pyradiomics[label].output
else:
node_train.inputs['parameters'] =\
self.sources_parameters[label].output
node_train.inputs['image'] =\
self.preprocessing_train[label].outputs['image']
if self.TrainTest:
if 'pyradiomics' in calcfeat_node.lower():
node_test.inputs['parameters'] =\
self.source_config_pyradiomics[label].output
else:
node_test.inputs['parameters'] =\
self.sources_parameters[label].output
node_test.inputs['image'] =\
self.preprocessing_test[label].outputs['image']
# PREDICT can extract semantic and metadata features
if 'predict' in calcfeat_node.lower():
if self.metadata_train and len(self.metadata_train) >= nmod + 1:
node_train.inputs['metadata'] =\
self.sources_metadata_train[label].output
if self.metadata_test and len(self.metadata_test) >= nmod + 1:
node_test.inputs['metadata'] =\
self.sources_metadata_test[label].output
# If a semantics file is provided, connect to feature extraction tool
if self.semantics_train and len(self.semantics_train) >= nmod + 1:
self.sources_semantics_train[label] =\
self.network.create_source('CSVFile',
id='semantics_train_' + label,
step_id='train_sources')
node_train.inputs['semantics'] =\
self.sources_semantics_train[label].output
if self.semantics_test and len(self.semantics_test) >= nmod + 1:
self.sources_semantics_test[label] =\
self.network.create_source('CSVFile',
id='semantics_test_' + label,
step_id='test_sources')
node_test.inputs['semantics'] =\
self.sources_semantics_test[label].output
# Add feature converter to make features WORC compatible
conv_train =\
self.network.create_node('worc/FeatureConverter:1.0',
tool_version='1.0',
id='featureconverter_train_' + node_ID,
resources=ResourceLimit(memory='4G'),
step_id='Feature_Extraction')
conv_train.inputs['feat_in'] = node_train.outputs['features']
# Add source to tell converter which toolbox we use
if 'pyradiomics' in calcfeat_node.lower():
toolbox = 'PyRadiomics'
elif 'predict' in calcfeat_node.lower():
toolbox = 'PREDICT'
else:
message = f'Toolbox {calcfeat_node} not recognized!'
raise WORCexceptions.WORCKeyError(message)
self.source_toolbox_name[label] =\
self.network.create_constant('String', toolbox,
id=f'toolbox_name_{toolbox}_{label}',
step_id='Feature_Extraction')
conv_train.inputs['toolbox'] = self.source_toolbox_name[label].output
conv_train.inputs['config'] = self.sources_parameters[label].output
if self.TrainTest:
conv_test =\
self.network.create_node('worc/FeatureConverter:1.0',
tool_version='1.0',
id='featureconverter_test_' + node_ID,
resources=ResourceLimit(memory='4G'),
step_id='Feature_Extraction')
conv_test.inputs['feat_in'] = node_test.outputs['features']
conv_test.inputs['toolbox'] = self.source_toolbox_name[label].output
conv_test.inputs['config'] = self.sources_parameters[label].output
# Append to nodes to list
self.calcfeatures_train[label].append(node_train)
self.featureconverter_train[label].append(conv_train)
if self.TrainTest:
self.calcfeatures_test[label].append(node_test)
self.featureconverter_test[label].append(conv_test)
def add_elastix_sourcesandsinks(self):
"""Add sources and sinks required for image registration."""
self.sources_segmentation = dict()
self.segmode = 'Register'
self.source_Elastix_Parameters = dict()
self.elastix_nodes_train = dict()
self.transformix_seg_nodes_train = dict()
self.sources_segmentations_train = dict()
self.sinks_transformations_train = dict()
self.sinks_segmentations_elastix_train = dict()
self.sinks_images_elastix_train = dict()
self.converters_seg_train = dict()
self.edittransformfile_nodes_train = dict()
self.transformix_im_nodes_train = dict()
self.elastix_nodes_test = dict()
self.transformix_seg_nodes_test = dict()
self.sources_segmentations_test = dict()
self.sinks_transformations_test = dict()
self.sinks_segmentations_elastix_test = dict()
self.sinks_images_elastix_test = dict()
self.converters_seg_test = dict()
self.edittransformfile_nodes_test = dict()
self.transformix_im_nodes_test = dict()
def add_elastix(self, label, nmod):
""" Add image registration through elastix to network."""
# Create sources and converter for only for the given segmentation,
# which should be on the first modality
if nmod == 0:
memory = self.fastr_memory_parameters['WORCCastConvert']
self.sources_segmentations_train[label] =\
self.network.create_source('ITKImageFile',
id='segmentations_train_' + label,
node_group='input',
step_id='train_sources')
self.converters_seg_train[label] =\
self.network.create_node('worc/WORCCastConvert:0.3.2',
tool_version='0.1',
id='convert_seg_train_' + label,
resources=ResourceLimit(memory=memory),
step_id='FileConversion')
self.converters_seg_train[label].inputs['image'] =\
self.sources_segmentations_train[label].output
if self.TrainTest:
self.sources_segmentations_test[label] =\
self.network.create_source('ITKImageFile',
id='segmentations_test_' + label,
node_group='input',
step_id='test_sources')
self.converters_seg_test[label] =\
self.network.create_node('worc/WORCCastConvert:0.3.2',
tool_version='0.1',
id='convert_seg_test_' + label,
resources=ResourceLimit(memory=memory),
step_id='FileConversion')
self.converters_seg_test[label].inputs['image'] =\
self.sources_segmentations_test[label].output
# Assume provided segmentation is on first modality
if nmod > 0:
# Use elastix and transformix for registration
# NOTE: Assume elastix node type is on first configuration
elastix_node =\
str(self.configs[0]['General']['RegistrationNode'])
transformix_node =\
str(self.configs[0]['General']['TransformationNode'])
memory_elastix = self.fastr_memory_parameters['Elastix']
self.elastix_nodes_train[label] =\
self.network.create_node(elastix_node,
tool_version='0.2',
id='elastix_train_' + label,
resources=ResourceLimit(memory=memory_elastix),
step_id='Image_Registration')
memory_transformix = self.fastr_memory_parameters['Elastix']
self.transformix_seg_nodes_train[label] =\
self.network.create_node(transformix_node,
tool_version='0.2',
id='transformix_seg_train_' + label,
resources=ResourceLimit(memory=memory_transformix),
step_id='Image_Registration')
self.transformix_im_nodes_train[label] =\
self.network.create_node(transformix_node,
tool_version='0.2',
id='transformix_im_train_' + label,
resources=ResourceLimit(memory=memory_transformix),
step_id='Image_Registration')
if self.TrainTest:
self.elastix_nodes_test[label] =\
self.network.create_node(elastix_node,
tool_version='0.2',
id='elastix_test_' + label,
resources=ResourceLimit(memory=memory_elastix),
step_id='Image_Registration')
self.transformix_seg_nodes_test[label] =\
self.network.create_node(transformix_node,
tool_version='0.2',
id='transformix_seg_test_' + label,
resources=ResourceLimit(memory=memory_transformix),
step_id='Image_Registration')
self.transformix_im_nodes_test[label] =\
self.network.create_node(transformix_node,
tool_version='0.2',
id='transformix_im_test_' + label,
resources=ResourceLimit(memory=memory_transformix),
step_id='Image_Registration')
# Create sources_segmentation
# M1 = moving, others = fixed
self.elastix_nodes_train[label].inputs['fixed_image'] =\
self.converters_im_train[label].outputs['image']
self.elastix_nodes_train[label].inputs['moving_image'] =\
self.converters_im_train[self.modlabels[0]].outputs['image']
# Add node that copies metadata from the image to the
# segmentation if required
if self.CopyMetadata:
# Copy metadata from the image which was registered to
# the segmentation, if it is not created yet
if not hasattr(self, "copymetadata_nodes_train"):
# NOTE: Do this for first modality, as we assume
# the segmentation is on that one
self.copymetadata_nodes_train = dict()
self.copymetadata_nodes_train[self.modlabels[0]] =\
self.network.create_node('itktools/0.3.2/CopyMetadata:1.0',
tool_version='1.0',
id='CopyMetadata_train_' + self.modlabels[0],
step_id='Image_Registration')
self.copymetadata_nodes_train[self.modlabels[0]].inputs["source"] =\
self.converters_im_train[self.modlabels[0]].outputs['image']
self.copymetadata_nodes_train[self.modlabels[0]].inputs["destination"] =\
self.converters_seg_train[self.modlabels[0]].outputs['image']
self.transformix_seg_nodes_train[label].inputs['image'] =\
self.copymetadata_nodes_train[self.modlabels[0]].outputs['output']
else:
self.transformix_seg_nodes_train[label].inputs['image'] =\
self.converters_seg_train[self.modlabels[0]].outputs['image']
if self.TrainTest:
self.elastix_nodes_test[label].inputs['fixed_image'] =\
self.converters_im_test[label].outputs['image']
self.elastix_nodes_test[label].inputs['moving_image'] =\
self.converters_im_test[self.modlabels[0]].outputs['image']
if self.CopyMetadata:
# Copy metadata from the image which was registered
# to the segmentation
if not hasattr(self, "copymetadata_nodes_test"):
# NOTE: Do this for first modality, as we assume
# the segmentation is on that one
self.copymetadata_nodes_test = dict()
self.copymetadata_nodes_test[self.modlabels[0]] =\
self.network.create_node('itktools/0.3.2/CopyMetadata:1.0',
tool_version='1.0',
id='CopyMetadata_test_' + self.modlabels[0],
step_id='Image_Registration')
self.copymetadata_nodes_test[self.modlabels[0]].inputs["source"] =\
self.converters_im_test[self.modlabels[0]].outputs['image']
self.copymetadata_nodes_test[self.modlabels[0]].inputs["destination"] =\
self.converters_seg_test[self.modlabels[0]].outputs['image']
self.transformix_seg_nodes_test[label].inputs['image'] =\
self.copymetadata_nodes_test[self.modlabels[0]].outputs['output']
else:
self.transformix_seg_nodes_test[label].inputs['image'] =\
self.converters_seg_test[self.modlabels[0]].outputs['image']
# Apply registration to input modalities
self.source_Elastix_Parameters[label] =\
self.network.create_source('ElastixParameterFile',
id='Elastix_Para_' + label,
node_group='elpara',
step_id='Image_Registration')
self.link_elparam_train =\
self.network.create_link(self.source_Elastix_Parameters[label].output,
self.elastix_nodes_train[label].inputs['parameters'])
self.link_elparam_train.collapse = 'elpara'
if self.TrainTest:
self.link_elparam_test =\
self.network.create_link(self.source_Elastix_Parameters[label].output,
self.elastix_nodes_test[label].inputs['parameters'])
self.link_elparam_test.collapse = 'elpara'
if self.masks_train:
self.elastix_nodes_train[label].inputs['fixed_mask'] =\
self.converters_masks_train[label].outputs['image']
self.elastix_nodes_train[label].inputs['moving_mask'] =\
self.converters_masks_train[self.modlabels[0]].outputs['image']
if self.TrainTest:
if self.masks_test:
self.elastix_nodes_test[label].inputs['fixed_mask'] =\
self.converters_masks_test[label].outputs['image']
self.elastix_nodes_test[label].inputs['moving_mask'] =\
self.converters_masks_test[self.modlabels[0]].outputs['image']
# Change the FinalBSpline Interpolation order to 0 as required for binarie images: see https://github.com/SuperElastix/elastix/wiki/FAQ
self.edittransformfile_nodes_train[label] =\
self.network.create_node('elastixtools/EditElastixTransformFile:0.1',
tool_version='0.1',
id='EditElastixTransformFile_train_' + label,
step_id='Image_Registration')
self.edittransformfile_nodes_train[label].inputs['set'] =\
["FinalBSplineInterpolationOrder=0"]
self.edittransformfile_nodes_train[label].inputs['transform'] =\
self.elastix_nodes_train[label].outputs['transform'][-1]
if self.TrainTest:
self.edittransformfile_nodes_test[label] =\
self.network.create_node('elastixtools/EditElastixTransformFile:0.1',
tool_version='0.1',
id='EditElastixTransformFile_test_' + label,
step_id='Image_Registration')
self.edittransformfile_nodes_test[label].inputs['set'] =\
["FinalBSplineInterpolationOrder=0"]
self.edittransformfile_nodes_test[label].inputs['transform'] =\
self.elastix_nodes_test[label].outputs['transform'][-1]
# Link data and transformation to transformix and source
self.transformix_seg_nodes_train[label].inputs['transform'] =\
self.edittransformfile_nodes_train[label].outputs['transform']
self.transformix_im_nodes_train[label].inputs['transform'] =\
self.elastix_nodes_train[label].outputs['transform'][-1]
self.transformix_im_nodes_train[label].inputs['image'] =\
self.converters_im_train[self.modlabels[0]].outputs['image']
if self.TrainTest:
self.transformix_seg_nodes_test[label].inputs['transform'] =\
self.edittransformfile_nodes_test[label].outputs['transform']
self.transformix_im_nodes_test[label].inputs['transform'] =\
self.elastix_nodes_test[label].outputs['transform'][-1]
self.transformix_im_nodes_test[label].inputs['image'] =\
self.converters_im_test[self.modlabels[0]].outputs['image']
if self.configs[nmod]['General']['Segmentix'] != 'True':
# These segmentations serve as input for the feature calculation
for i_node in range(len(self.calcfeatures_train[label])):
self.calcfeatures_train[label][i_node].inputs['segmentation'] =\
self.transformix_seg_nodes_train[label].outputs['image']
if self.TrainTest:
self.calcfeatures_test[label][i_node].inputs['segmentation'] =\
self.transformix_seg_nodes_test[label].outputs['image']
# Save outputfor the training set
self.sinks_transformations_train[label] =\
self.network.create_sink('ElastixTransformFile',
id='transformations_train_' + label,
step_id='train_sinks')
self.sinks_segmentations_elastix_train[label] =\
self.network.create_sink('ITKImageFile',
id='segmentations_out_elastix_train_' + label,
step_id='train_sinks')
self.sinks_images_elastix_train[label] =\
self.network.create_sink('ITKImageFile',
id='images_out_elastix_train_' + label,
step_id='train_sinks')
self.sinks_transformations_train[label].input =\
self.elastix_nodes_train[label].outputs['transform']
self.sinks_segmentations_elastix_train[label].input =\
self.transformix_seg_nodes_train[label].outputs['image']
self.sinks_images_elastix_train[label].input =\
self.transformix_im_nodes_train[label].outputs['image']
# Save output for the test set
if self.TrainTest:
self.sinks_transformations_test[label] =\
self.network.create_sink('ElastixTransformFile',
id='transformations_test_' + label,
step_id='test_sinks')
self.sinks_segmentations_elastix_test[label] =\
self.network.create_sink('ITKImageFile',
id='segmentations_out_elastix_test_' + label,
step_id='test_sinks')
self.sinks_images_elastix_test[label] =\
self.network.create_sink('ITKImageFile',
id='images_out_elastix_test_' + label,
step_id='test_sinks')
self.sinks_transformations_test[label].input =\
self.elastix_nodes_test[label].outputs['transform']
self.sinks_segmentations_elastix_test[label].input =\
self.transformix_seg_nodes_test[label].outputs['image']
self.sinks_images_elastix_test[label].input =\
self.transformix_im_nodes_test[label].outputs['image']
def add_segmentix(self, label, nmod):
"""Add segmentix to the network."""
# Segmentix nodes -------------------------------------------------
# Use segmentix node to convert input segmentation into
# correct contour
if label not in self.sinks_segmentations_segmentix_train:
self.sinks_segmentations_segmentix_train[label] =\
self.network.create_sink('ITKImageFile',
id='segmentations_out_segmentix_train_' + label,
step_id='train_sinks')
memory = self.fastr_memory_parameters['Segmentix']
self.nodes_segmentix_train[label] =\
self.network.create_node('segmentix/Segmentix:1.0',
tool_version='1.0',
id='segmentix_train_' + label,
resources=ResourceLimit(memory=memory),
step_id='Preprocessing')
# Input the image
self.nodes_segmentix_train[label].inputs['image'] =\
self.converters_im_train[label].outputs['image']
# Input the metadata
if self.metadata_train and len(self.metadata_train) >= nmod + 1:
self.nodes_segmentix_train[label].inputs['metadata'] = self.sources_metadata_train[label].output
# Input the segmentation
if hasattr(self, 'transformix_seg_nodes_train'):
if label in self.transformix_seg_nodes_train.keys():
# Use output of registration in segmentix
self.nodes_segmentix_train[label].inputs['segmentation_in'] =\
self.transformix_seg_nodes_train[label].outputs['image']
else:
# Use original segmentation
self.nodes_segmentix_train[label].inputs['segmentation_in'] =\
self.converters_seg_train[label].outputs['image']
else:
# Use original segmentation
self.nodes_segmentix_train[label].inputs['segmentation_in'] =\
self.converters_seg_train[label].outputs['image']
# Input the parameters
self.nodes_segmentix_train[label].inputs['parameters'] =\
self.sources_parameters[label].output
self.sinks_segmentations_segmentix_train[label].input =\
self.nodes_segmentix_train[label].outputs['segmentation_out']
if self.TrainTest:
self.sinks_segmentations_segmentix_test[label] =\
self.network.create_sink('ITKImageFile',
id='segmentations_out_segmentix_test_' + label,
step_id='test_sinks')
self.nodes_segmentix_test[label] =\
self.network.create_node('segmentix/Segmentix:1.0',
tool_version='1.0',
id='segmentix_test_' + label,
resources=ResourceLimit(memory=memory),
step_id='Preprocessing')
# Input the image
self.nodes_segmentix_test[label].inputs['image'] =\
self.converters_im_test[label].outputs['image']
# Input the metadata
if self.metadata_test and len(self.metadata_test) >= nmod + 1:
self.nodes_segmentix_test[label].inputs['metadata'] = self.sources_metadata_test[label].output
if hasattr(self, 'transformix_seg_nodes_test'):
if label in self.transformix_seg_nodes_test.keys():
# Use output of registration in segmentix
self.nodes_segmentix_test[label].inputs['segmentation_in'] =\
self.transformix_seg_nodes_test[label].outputs['image']
else:
# Use original segmentation
self.nodes_segmentix_test[label].inputs['segmentation_in'] =\
self.converters_seg_test[label].outputs['image']
else:
# Use original segmentation
self.nodes_segmentix_test[label].inputs['segmentation_in'] =\
self.converters_seg_test[label].outputs['image']
self.nodes_segmentix_test[label].inputs['parameters'] =\
self.sources_parameters[label].output
self.sinks_segmentations_segmentix_test[label].input =\
self.nodes_segmentix_test[label].outputs['segmentation_out']
for i_node in range(len(self.calcfeatures_train[label])):
self.calcfeatures_train[label][i_node].inputs['segmentation'] =\
self.nodes_segmentix_train[label].outputs['segmentation_out']
if self.TrainTest:
self.calcfeatures_test[label][i_node].inputs['segmentation'] =\
self.nodes_segmentix_test[label].outputs['segmentation_out']
if self.masks_train and len(self.masks_train) >= nmod + 1:
# Use masks
self.nodes_segmentix_train[label].inputs['mask'] =\
self.converters_masks_train[label].outputs['image']
if self.masks_test and len(self.masks_test) >= nmod + 1:
# Use masks
self.nodes_segmentix_test[label].inputs['mask'] =\
self.converters_masks_test[label].outputs['image']
def set(self):
"""Set the FASTR source and sink data based on the given attributes."""
self.fastrconfigs = list()
self.source_data = dict()
self.sink_data = dict()
# Save the configurations as files
self.save_config()
# fixed splits
if self.fixedsplits:
self.source_data['fixedsplits_source'] = self.fixedsplits
# Generate gridsearch parameter files if required
self.source_data['config_classification_source'] = self.fastrconfigs[0]
# Set source and sink data
self.source_data['patientclass_train'] = self.labels_train
self.source_data['patientclass_test'] = self.labels_test
self.sink_data['classification'] = ("vfs://output/{}/estimator_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
self.sink_data['performance'] = ("vfs://output/{}/performance_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
self.sink_data['config_classification_sink'] = ("vfs://output/{}/config_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
self.sink_data['features_train_ComBat'] = ("vfs://output/{}/ComBat/features_ComBat_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
self.sink_data['features_test_ComBat'] = ("vfs://output/{}/ComBat/features_ComBat_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name)
# Set the source data from the WORC objects you created
for num, label in enumerate(self.modlabels):
self.source_data['config_' + label] = self.fastrconfigs[num]
if self.pyradiomics_configs:
self.source_data['config_pyradiomics_' + label] = self.pyradiomics_configs[num]
# Add train data sources
if self.images_train and len(self.images_train) - 1 >= num:
self.source_data['images_train_' + label] = self.images_train[num]
if self.masks_train and len(self.masks_train) - 1 >= num:
self.source_data['mask_train_' + label] = self.masks_train[num]
if self.masks_normalize_train and len(self.masks_normalize_train) - 1 >= num:
self.source_data['masks_normalize_train_' + label] = self.masks_normalize_train[num]
if self.metadata_train and len(self.metadata_train) - 1 >= num:
self.source_data['metadata_train_' + label] = self.metadata_train[num]
if self.segmentations_train and len(self.segmentations_train) - 1 >= num:
self.source_data['segmentations_train_' + label] = self.segmentations_train[num]
if self.semantics_train and len(self.semantics_train) - 1 >= num:
self.source_data['semantics_train_' + label] = self.semantics_train[num]
if self.features_train and len(self.features_train) - 1 >= num:
self.source_data['features_train_' + label] = self.features_train[num]
if self.Elastix_Para:
# First modality does not need to be registered
if num > 0:
if len(self.Elastix_Para) > 1:
# Each modality has its own registration parameters
self.source_data['Elastix_Para_' + label] = self.Elastix_Para[num]
else:
# Use one fileset for all modalities
self.source_data['Elastix_Para_' + label] = self.Elastix_Para[0]
# Add test data sources
if self.images_test and len(self.images_test) - 1 >= num:
self.source_data['images_test_' + label] = self.images_test[num]
if self.masks_test and len(self.masks_test) - 1 >= num:
self.source_data['mask_test_' + label] = self.masks_test[num]
if self.masks_normalize_test and len(self.masks_normalize_test) - 1 >= num:
self.source_data['masks_normalize_test_' + label] = self.masks_normalize_test[num]
if self.metadata_test and len(self.metadata_test) - 1 >= num:
self.source_data['metadata_test_' + label] = self.metadata_test[num]
if self.segmentations_test and len(self.segmentations_test) - 1 >= num:
self.source_data['segmentations_test_' + label] = self.segmentations_test[num]
if self.semantics_test and len(self.semantics_test) - 1 >= num:
self.source_data['semantics_test_' + label] = self.semantics_test[num]
if self.features_test and len(self.features_test) - 1 >= num:
self.source_data['features_test_' + label] = self.features_test[num]
self.sink_data['segmentations_out_segmentix_train_' + label] = ("vfs://output/{}/Segmentations/seg_{}_segmentix_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name, label)
self.sink_data['segmentations_out_elastix_train_' + label] = ("vfs://output/{}/Elastix/seg_{}_elastix_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name, label)
self.sink_data['images_out_elastix_train_' + label] = ("vfs://output/{}/Elastix/im_{}_elastix_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name, label)
if hasattr(self, 'featurecalculators'):
for f in self.featurecalculators[label]:
self.sink_data['features_train_' + label + '_' + f] = ("vfs://output/{}/Features/features_{}_{}_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name, f, label)
if self.labels_test:
self.sink_data['segmentations_out_segmentix_test_' + label] = ("vfs://output/{}/Segmentations/seg_{}_segmentix_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name, label)
self.sink_data['segmentations_out_elastix_test_' + label] = ("vfs://output/{}/Elastix/seg_{}_elastix_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name, label)
self.sink_data['images_out_elastix_test_' + label] = ("vfs://output/{}/Images/im_{}_elastix_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name, label)
if hasattr(self, 'featurecalculators'):
for f in self.featurecalculators[label]:
self.sink_data['features_test_' + label + '_' + f] = ("vfs://output/{}/Features/features_{}_{}_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name, f, label)
# Add elastix sinks if used
if self.segmode:
# Segmode is only non-empty if segmentations are provided
if self.segmode == 'Register':
self.sink_data['transformations_train_' + label] = ("vfs://output/{}/Elastix/transformation_{}_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name, label)
if self.TrainTest:
self.sink_data['transformations_test_' + label] = ("vfs://output/{}/Elastix/transformation_{}_{{sample_id}}_{{cardinality}}{{ext}}").format(self.name, label)
if self._add_evaluation:
self.Evaluate.set()
def execute(self):
"""Execute the network through the fastr.network.execute command."""
# Draw and execute nwtwork
try:
self.network.draw(file_path=self.network.id + '.svg', draw_dimensions=True)
except graphviz.backend.ExecutableNotFound:
print('[WORC WARNING] Graphviz executable not found: not drawing network diagram. Make sure the Graphviz executables are on your systems PATH.')
except graphviz.backend.CalledProcessError as e:
print(f'[WORC WARNING] Graphviz executable gave an error: not drawing network diagram. Original error: {e}')
# export hyper param. search space to LaTeX table
for config in self.fastrconfigs:
config_path = Path(url2pathname(urlparse(config).path))
tex_path = f'{config_path.parent.absolute() / config_path.stem}_hyperparams_space.tex'
export_hyper_params_to_latex(config_path, tex_path)
if DebugDetector().do_detection():
print("Source Data:")
for k in self.source_data.keys():
print(f"\t {k}: {self.source_data[k]}.")
print("\n Sink Data:")
for k in self.sink_data.keys():
print(f"\t {k}: {self.sink_data[k]}.")
# When debugging, set the tempdir to the default of fastr + name
self.fastr_tmpdir = os.path.join(fastr.config.mounts['tmp'],
self.name)
self.network.execute(self.source_data, self.sink_data, execution_plugin=self.fastr_plugin, tmpdir=self.fastr_tmpdir)
def add_evaluation(self, label_type, modus='binary_classification'):
"""Add branch for evaluation of performance to network.
Note: should be done after build, before set:
WORC.build()
WORC.add_evaluation(label_type)
WORC.set()
WORC.execute()
"""
self.Evaluate =\
Evaluate(label_type=label_type, parent=self, modus=modus)
self._add_evaluation = True
def save_config(self):
"""Save the config files to physical files and add to network."""
# If the configuration files are confiparse objects, write to file
self.pyradiomics_configs = list()
# Make sure we can dump blank values for PyRadiomics
yaml.SafeDumper.add_representer(type(None),
lambda dumper, value: dumper.represent_scalar(u'tag:yaml.org,2002:null', ''))
for num, c in enumerate(self.configs):
if type(c) != configparser.ConfigParser:
# A filepath (not a fastr source) is provided. Hence we read
# the config file and convert it to a configparser object
config = configparser.ConfigParser()
config.read(c)
c = config
cfile = os.path.join(self.fastr_tmpdir, f"config_{self.name}_{num}.ini")
if not os.path.exists(os.path.dirname(cfile)):
os.makedirs(os.path.dirname(cfile))
with open(cfile, 'w') as configfile:
c.write(configfile)
# If PyRadiomics is used, also write a config for PyRadiomics
if 'pyradiomics' in c['General']['FeatureCalculators']:
cfile_pyradiomics = os.path.join(self.fastr_tmpdir, f"config_pyradiomics_{self.name}_{num}.yaml")
config_pyradiomics = io.convert_config_pyradiomics(c)
with open(cfile_pyradiomics, 'w') as file:
yaml.safe_dump(config_pyradiomics, file)
cfile_pyradiomics = Path(self.fastr_tmpdir) / f"config_pyradiomics_{self.name}_{num}.yaml"
self.pyradiomics_configs.append(cfile_pyradiomics.as_uri().replace('%20', ' '))
# BUG: Make path with pathlib to create windows double slashes
cfile = Path(self.fastr_tmpdir) / f"config_{self.name}_{num}.ini"
self.fastrconfigs.append(cfile.as_uri().replace('%20', ' '))
class Tools(object):
"""
Create other pipelines besides the default radiomics executions.
Currently includes:
1. Registration pipeline
2. Evaluation pipeline
3. Slicer pipeline, to create pngs of middle slice of images.
"""
def __init__(self):
"""Initialize object with all pipelines."""
self.Elastix = Elastix()
self.Evaluate = Evaluate()
self.Slicer = Slicer()
| 53.822572 | 228 | 0.559897 | 97,869 | 0.986632 | 0 | 0 | 0 | 0 | 0 | 0 | 33,134 | 0.334029 |
8d832e77f438b0dd65c0dff2da0ca039538bc5cd | 2,019 | py | Python | utils/tweets_to_txt.py | magsol/datascibun | bb118eac59dc238c42f659871e25619d994f8575 | [
"Apache-2.0"
]
| null | null | null | utils/tweets_to_txt.py | magsol/datascibun | bb118eac59dc238c42f659871e25619d994f8575 | [
"Apache-2.0"
]
| null | null | null | utils/tweets_to_txt.py | magsol/datascibun | bb118eac59dc238c42f659871e25619d994f8575 | [
"Apache-2.0"
]
| 1 | 2022-03-01T01:45:47.000Z | 2022-03-01T01:45:47.000Z | import argparse
import json
import re
def remove_urls(tweet):
return re.sub(r"http\S+", "", tweet)
def fix_amp(tweet):
return tweet.replace("&", "&")
def remove_hashtags(tweet):
return re.sub(r"#\w+\s*", "", tweet)
def remove_mentions(tweet):
return re.sub(r"@\w+\s*", "", tweet)
def remove_rt(tweet):
return tweet[2:] if tweet[:2] == 'RT' else tweet
def remove_emojis(tweet):
return re.sub(r"[^\x00-\x7F]+", "", tweet)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = 'JSON tweet converter',
epilog = 'lol tw33tz', add_help = 'How to use',
prog = 'python json_to_txt.py <options>')
# Required arguments.
parser.add_argument("-i", "--input", required = True,
help = "JSON file to convert.")
# Optional arguments.
parser.add_argument("-o", "--output", default = "output.txt",
help = "Output file containing tweet content, one per line. [DEFAULT: output.txt]")
# Parse out the arguments.
args = vars(parser.parse_args())
content = json.load(open(args['input'], "r"))
fp = open(args['output'], "w")
item = 0
for obj in content:
tweet = obj['tweet']['full_text']
# STEP 1: Strip out RT.
tweet = remove_rt(tweet)
# STEP 2: Remove URLs, mentions, hashtags, emojis.
tweet = remove_urls(tweet)
tweet = remove_mentions(tweet)
tweet = remove_hashtags(tweet)
tweet = remove_emojis(tweet)
# STEP 3: Other random fixes.
tweet = tweet.strip()
tweet = fix_amp(tweet)
if len(tweet) == 0 or len(tweet) == 1: continue
tweet = tweet.replace("\"\"", "")
if tweet[0] == ":":
tweet = tweet[1:]
tweet = tweet.replace("\n", " ")
tweet = tweet.strip()
# Write out!
fp.write(f"{tweet}\n")
item += 1
if item % 1000 == 0:
print(f"{item} of {len(content)} done.")
fp.close()
print(f"{item} tweets processed!")
| 27.657534 | 91 | 0.574542 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 605 | 0.299653 |
8d85dffad6d22403418ce3ef5e06280cc317b3e4 | 528 | py | Python | truechat/chat/migrations/0007_auto_20191026_2020.py | TrueChat/Backend | 7d2bc73d5b7f157d7499a65af4157e1ddeb7a0ac | [
"MIT"
]
| 1 | 2019-12-19T19:04:33.000Z | 2019-12-19T19:04:33.000Z | truechat/chat/migrations/0007_auto_20191026_2020.py | TrueChat/Backend | 7d2bc73d5b7f157d7499a65af4157e1ddeb7a0ac | [
"MIT"
]
| 6 | 2020-06-05T23:42:41.000Z | 2022-02-10T13:32:59.000Z | truechat/chat/migrations/0007_auto_20191026_2020.py | TrueChat/Backend | 7d2bc73d5b7f157d7499a65af4157e1ddeb7a0ac | [
"MIT"
]
| null | null | null | # Generated by Django 2.2.5 on 2019-10-26 20:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('chat', '0006_auto_20191022_1218'),
]
operations = [
migrations.RemoveField(
model_name='membership',
name='is_admin',
),
migrations.AddField(
model_name='membership',
name='is_banned',
field=models.BooleanField(default=False, verbose_name='Забанен ли?'),
),
]
| 22.956522 | 81 | 0.587121 | 444 | 0.826816 | 0 | 0 | 0 | 0 | 0 | 0 | 145 | 0.270019 |
8d86a97241dd9580e12d59014523e0d42f09b38e | 354 | py | Python | libs/baseclass/about_screen.py | wildscsi/ecopos | 9922bb5160227777401eb33fa9a01cfba5730781 | [
"MIT"
]
| null | null | null | libs/baseclass/about_screen.py | wildscsi/ecopos | 9922bb5160227777401eb33fa9a01cfba5730781 | [
"MIT"
]
| 1 | 2021-11-04T20:43:03.000Z | 2021-11-04T20:43:03.000Z | libs/baseclass/about_screen.py | wildscsi/ecopos | 9922bb5160227777401eb33fa9a01cfba5730781 | [
"MIT"
]
| 1 | 2021-11-04T19:43:53.000Z | 2021-11-04T19:43:53.000Z | # -*- coding: utf-8 -*-
#
# Copyright (c) 2020 CPV.BY
#
# For suggestions and questions:
# <[email protected]>
#
# LICENSE: Commercial
import webbrowser
from kivymd.theming import ThemableBehavior
from kivymd.uix.screen import MDScreen
class AboutScreen(ThemableBehavior, MDScreen):
def open_url(self, instance, url):
webbrowser.open(url)
| 19.666667 | 46 | 0.728814 | 114 | 0.322034 | 0 | 0 | 0 | 0 | 0 | 0 | 127 | 0.358757 |
8d86c9a6526d8d524710fa780972b087a3f46ac3 | 7,715 | py | Python | causal_rl/environments/multi_typed.py | vluzko/causal_rl | 92ee221bdf1932fa83955441baabb5e28b78ab9d | [
"MIT"
]
| 2 | 2021-04-02T12:06:13.000Z | 2022-02-09T06:57:26.000Z | causal_rl/environments/multi_typed.py | vluzko/causal_rl | 92ee221bdf1932fa83955441baabb5e28b78ab9d | [
"MIT"
]
| 11 | 2020-12-28T14:51:31.000Z | 2021-03-29T19:53:24.000Z | causal_rl/environments/multi_typed.py | vluzko/causal_rl | 92ee221bdf1932fa83955441baabb5e28b78ab9d | [
"MIT"
]
| null | null | null | import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from gym import Env
from scipy.spatial import distance
from typing import Optional, Tuple, Any
from causal_rl.environments import CausalEnv
class MultiTyped(CausalEnv):
"""A simulation of balls bouncing with gravity and elastic collisions.
Attributes:
num_obj (int): Number of balls in the simulation.
obj_dim (int): The dimension of the balls. Will always be 2 * dimension_of_space
masses (np.ndarray): The masses of the balls.
radii (np.ndarray): The radii of the balls.
space (pymunk.Space): The actual simulation space.
"""
cls_name = 'multi_typed'
def __init__(self, num_obj: int=5, mass: float=10, radii: float=10, width: float=400):
self.num_obj = 2 * num_obj
self.obj_dim = 4
self.mass = mass
self.radius = radii
self.masses = mass * np.ones(self.num_obj)
self.radii = radii * np.ones(self.num_obj)
self.width = width
self.location_indices = (0, 1)
@property
def name(self) -> str:
return '{}_{}_{}_{}'.format(self.cls_name, self.mass, self.radius, self.width)
def reset(self):
import pymunk
self.space = pymunk.Space()
self.space.gravity = (0.0, 0.0)
self.objects = []
x_pos = np.random.rand(self.num_obj, 1) * (self.width - 40) + 20
y_pos = np.random.rand(self.num_obj, 1) * (self.width - 40) + 20
x_vel = np.random.rand(self.num_obj, 1) * 300 - 150
y_vel = np.random.rand(self.num_obj, 1) * 300 - 150
# Create circles
for i in range(self.num_obj):
mass = self.masses[i]
radius = self.radii[i]
moment = pymunk.moment_for_circle(mass, 0, radius, (0, 0))
body = pymunk.Body(mass, moment)
body.position = (x_pos[i], y_pos[i])
body.velocity = (x_vel[i], y_vel[i])
shape = pymunk.Circle(body, radius, (0, 0))
shape.elasticity = 1.0
self.space.add(body, shape)
self.objects.append(body)
# Create squares
for i in range(self.num_obj):
mass = self.masses[i] * 6
radius = self.radii[i] * 1.2
size = (radius, radius)
moment = pymunk.moment_for_box(mass, size)
body = pymunk.Body(mass, moment)
body.position = (x_pos[i], y_pos[i])
body.velocity = (x_vel[i], y_vel[i])
shape = pymunk.Poly.create_box(body, size)
shape.elasticity = 1.0
self.space.add(body, shape)
self.objects.append(body)
static_lines = [
pymunk.Segment(self.space.static_body, (0.0, 0.0), (0.0, self.width), 0),
pymunk.Segment(self.space.static_body, (0.0, 0.0), (self.width, 0.0), 0),
pymunk.Segment(self.space.static_body, (self.width, 0.0), (self.width, self.width), 0),
pymunk.Segment(self.space.static_body, (0.0, self.width), (self.width, self.width), 0)
]
for line in static_lines:
line.elasticity = 1.
self.space.add(static_lines)
return self.get_state(), 0, False, None
def get_state(self) -> np.ndarray:
"""Get the current state.
Returns:
A tensor representing the state. Each row is a single ball, columns are [*position, *velocity]
"""
state = np.zeros((self.num_obj, 4))
for i in range(self.num_obj):
state[i, :2] = np.array([self.objects[i].position[0], self.objects[i].position[1]])
state[i, 2:] = np.array([self.objects[i].velocity[0], self.objects[i].velocity[1]])
return state
def step(self, dt=0.01) -> Tuple[np.ndarray, float, bool, Any]:
self.space.step(dt)
return self.get_state(), 0, False, None
def generate_data(self, epochs: int=10000, dt: float=0.01) -> Tuple[np.ndarray, np.ndarray]:
states = np.zeros((epochs, self.num_obj, 4))
rewards = np.zeros((epochs, 1))
self.reset()
for t in range(epochs):
states[t] = self.get_state()
if t > 0:
states[t, :, 2:] = (states[t, :, :2] - states[t - 1, :, :2]) / dt
self.step(dt=dt)
return states, rewards
def visualize(self, state: np.ndarray, save_path: Optional[str]=None):
"""Visualize a single state.
Args:
state: The full
save_path: Path to save the image to.
"""
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
pos = state[:, :2]
momenta = state[:, 2:]
fig, ax = plt.subplots(figsize=(6, 6))
box = plt.Rectangle((0, 0), self.width, self.width, linewidth=5, edgecolor='k', facecolor='none')
ax.add_patch(box)
for i in range(self.num_obj // 2):
circle = plt.Circle((pos[i, 0], pos[i, 1]), radius=self.radii[i], edgecolor='b')
label = ax.annotate('{}'.format(i), xy=(pos[i, 0], pos[i, 1]), fontsize=8, ha='center')
# Plot the momentum
plt.arrow(pos[i, 0], pos[i, 1], momenta[i, 0], momenta[i, 1])
ax.add_patch(circle)
for i in range(self.num_obj // 2, self.num_obj):
circle = plt.Rectangle((pos[i, 0], pos[i, 1]), self.radii[i], self.radii[i], edgecolor='b')
label = ax.annotate('{}'.format(i), xy=(pos[i, 0], pos[i, 1]), fontsize=8, ha='center')
# Plot the momentum
plt.arrow(pos[i, 0], pos[i, 1], momenta[i, 0], momenta[i, 1])
ax.add_patch(circle)
plt.axis([0, self.width, 0, self.width])
plt.axis('off')
if save_path is not None:
plt.savefig(save_path)
else:
plt.show()
plt.close()
def detect_collisions(self, trajectories: np.ndarray) -> np.ndarray:
n = trajectories.shape[0]
k = self.num_obj
radii = np.copy(self.radii)
radii[self.num_obj // 2:] = radii[self.num_obj // 2:] * np.sqrt(2)
min_dist = radii.reshape(k, 1) + radii.reshape(1, k)
np.fill_diagonal(min_dist, 0)
collisions = np.zeros((n, k, k))
for i in range(1, n):
# The (x, y) coordinates of all balls at t=i
locs = trajectories[i, :, :2]
distances = distance.squareform(distance.pdist(locs))
collided = np.nonzero(distances < min_dist)
collisions[i-1][collided] = 1
collisions[i][collided] = 1
return collisions
def wall_collisions(self, states: np.ndarray) -> np.ndarray:
min_coord = 0 + self.radius
max_coord = self.width - self.radius
# Just the position coordinates
locs = states[:, :, :2]
has_collision = (locs < min_coord) | (locs > max_coord)
return has_collision
class WithTypes(MultiTyped):
"""Include the type of the object in the state."""
cls_name = 'with_types'
def __init__(self, num_obj=5, mass: float=10, radii: float=10, width: float=400):
super().__init__(num_obj, mass, radii, width)
self.obj_dim = 5
self.location_indices = (0, 1)
def generate_data(self, epochs: int=10000, dt: float=0.01) -> Tuple[np.ndarray, np.ndarray]:
states, rewards = super().generate_data(epochs, dt)
with_types = np.zeros((epochs, self.num_obj, self.obj_dim))
with_types[:, :, :-1] = states
with_types[:, self.num_obj//2:, -1] = 1
return with_types, rewards
def detect_collisions(self, trajectories: np.ndarray) -> np.ndarray:
return super().detect_collisions(trajectories[:, :, :-1])
| 36.738095 | 106 | 0.568762 | 7,491 | 0.970966 | 0 | 0 | 123 | 0.015943 | 0 | 0 | 1,027 | 0.133117 |
8d86e19a0f7bf48d0eb61da351363ace81caa8fc | 353 | py | Python | greetings.py | ucsd-cse-spis-2016/spis16-lecture-0815 | 24e0a8ea9726f969eb357db33eb2925aabd25e43 | [
"MIT"
]
| null | null | null | greetings.py | ucsd-cse-spis-2016/spis16-lecture-0815 | 24e0a8ea9726f969eb357db33eb2925aabd25e43 | [
"MIT"
]
| null | null | null | greetings.py | ucsd-cse-spis-2016/spis16-lecture-0815 | 24e0a8ea9726f969eb357db33eb2925aabd25e43 | [
"MIT"
]
| null | null | null | from flask import Flask
app = Flask(__name__)
@app.route("/en")
def hello():
return "Hello SPIS 2016!"
@app.route("/cn")
def ni_hao():
return "Ni Hao SPIS 2016!"
@app.route("/es")
def hola():
return "Hola SPIS 2016!"
@app.route("/fa")
def sobh_bx():
return "Sobh Bexair SPIS 2016!"
if __name__ == "__main__":
app.run(port=5000)
| 16.045455 | 35 | 0.628895 | 0 | 0 | 0 | 0 | 248 | 0.70255 | 0 | 0 | 108 | 0.305949 |
8d88e96d4a71ca08ce8d66eee14e65dd7c02396c | 3,189 | py | Python | bin/makeReport.py | oxfordmmm/SARS-CoV2_workflows | a84cb0a7142684414b2f285dd27cc2ea287eecb9 | [
"MIT"
]
| null | null | null | bin/makeReport.py | oxfordmmm/SARS-CoV2_workflows | a84cb0a7142684414b2f285dd27cc2ea287eecb9 | [
"MIT"
]
| null | null | null | bin/makeReport.py | oxfordmmm/SARS-CoV2_workflows | a84cb0a7142684414b2f285dd27cc2ea287eecb9 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
import pandas as pd
import sys
import json
from Bio import SeqIO
sample_name=sys.argv[1]
pango=pd.read_csv('pango.csv')
nextclade=pd.read_csv('nextclade.tsv', sep='\t')
aln2type=pd.read_csv('aln2type.csv')
pango['sampleName']=sample_name
nextclade['sampleName']=sample_name
aln2type['sampleName']=sample_name
df=pango.merge(nextclade, on='sampleName', how='left', suffixes=("_pango","_nextclade"))
df=df.merge(aln2type, on='sampleName', how='left', suffixes=(None,"_aln2type"))
# versions
wf=open('workflow_commit.txt').read()
df['workflowCommit']=str(wf).strip()
df['manifestVersion']=sys.argv[2]
nextclade_version=open('nextclade_files/version.txt').read()
df['nextcladeVersion']=str(nextclade_version).strip()
aln2type_variant_commit=open('variant_definitions/aln2type_variant_git_commit.txt').read()
aln2type_variant_version=open('variant_definitions/aln2type_variant_version.txt').read()
aln2type_source_commit=open('variant_definitions/aln2type_commit.txt').read()
df['aln2typeVariantCommit']=str(aln2type_variant_commit).strip()
df['aln2typeVariantVersion']=str(aln2type_variant_version).strip()
df['aln2typeSourceVommit']=str(aln2type_source_commit).strip()
df.to_csv('{0}_report.tsv'.format(sys.argv[1]), sep='\t', index=False)
### convert to json
pango['program']='pango'
pango.set_index('program',inplace=True)
p=pango.to_dict(orient='index')
nextclade['program']='nextclade'
nextclade['nextcladeVersion']=str(nextclade_version).strip()
nextclade.set_index('program',inplace=True)
n=nextclade.to_dict(orient='index')
with open('nextclade.json','rt', encoding= 'utf-8') as inf:
nj=json.load(inf)
n['nextcladeOutputJson']=nj
aln2type['program']='aln2type'
aln2type['label']=aln2type['phe-label']
aln2type['aln2typeVariantCommit']=str(aln2type_variant_commit).strip()
aln2type['aln2typeSourceCommit']=str(aln2type_source_commit).strip()
aln2type.set_index(['program','phe-label'],inplace=True)
a={level: aln2type.xs(level).to_dict('index') for level in aln2type.index.levels[0]}
w={'WorkflowInformation':{}}
w['WorkflowInformation']['workflowCommit']=str(wf).strip()
w['WorkflowInformation']['manifestVersion']=sys.argv[2]
w['WorkflowInformation']['sampleIdentifier']=sample_name
# add fasta to json
record = SeqIO.read('ref.fasta', "fasta")
w['WorkflowInformation']['referenceIdentifier']=record.id
#f={'FastaRecord':{'SeqId':record.id,
# 'SeqDescription': record.description,
# 'Sequence':str(record.seq),
# 'sampleName':sample_name}}
def completeness(nextcladeOutputJson):
ref_len = 29903
total_missing = nextcladeOutputJson['results'][0]['qc']['missingData']['totalMissing']
completeness_prop = (ref_len - total_missing) / ref_len
completeness_pc = round(completeness_prop * 100, 1)
return completeness_pc
s={'summary':{}}
s['summary']['completeness']=completeness(n['nextcladeOutputJson'])
d={sample_name:{}}
d[sample_name].update(p)
d[sample_name].update(n)
d[sample_name].update(a)
d[sample_name].update(w)
#d[sample_name].update(f)
d[sample_name].update(s)
with open('{0}_report.json'.format(sample_name), 'w', encoding='utf-8') as f:
json.dump(d, f, indent=4, sort_keys=True, ensure_ascii=False)
| 35.831461 | 90 | 0.756977 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,276 | 0.400125 |
8d8a5d72d65e690dc4c82341ed975187662e4c48 | 1,484 | py | Python | webhooks/statuscake/alerta_statuscake.py | frekel/alerta-contrib | d8f5c93a4ea735085b3689c2c852ecae94924d08 | [
"MIT"
]
| 114 | 2015-02-05T00:22:16.000Z | 2021-11-25T13:02:44.000Z | webhooks/statuscake/alerta_statuscake.py | NeilOrley/alerta-contrib | 69d271ef9fe6542727ec4aa39fc8e0f797f1e8b1 | [
"MIT"
]
| 245 | 2016-01-09T22:29:09.000Z | 2022-03-16T10:37:02.000Z | webhooks/statuscake/alerta_statuscake.py | NeilOrley/alerta-contrib | 69d271ef9fe6542727ec4aa39fc8e0f797f1e8b1 | [
"MIT"
]
| 193 | 2015-01-30T21:22:49.000Z | 2022-03-28T05:37:14.000Z | from alerta.models.alert import Alert
from alerta.webhooks import WebhookBase
from alerta.exceptions import RejectException
import os
import hashlib
class StatusCakeWebhook(WebhookBase):
def incoming(self, query_string, payload):
alert_severity = os.environ.get('STATUSCAKE_DEFAULT_ALERT_SEVERITY', 'major')
# If the statuscake username and apikey are provided
# We can validate that the webhook call is valid
statuscake_username = os.environ.get('STATUSCAKE_USERNAME')
statuscake_apikey = os.environ.get('STATUSCAKE_APIKEY')
if statuscake_username and statuscake_apikey:
decoded_token = statuscake_username + statuscake_apikey
statuscake_token = hashlib.md5(decoded_token.encode()).hexdigest()
if statuscake_token != payload['Token']:
raise RejectException("Provided Token couldn't be verified")
if payload['Status'] == 'UP':
severity = 'normal'
else:
severity = alert_severity
return Alert(
resource=payload['Name'],
event='AppDown',
environment='Production',
severity=severity,
service=['StatusCake'],
group='Application',
value=payload['StatusCode'],
text="%s is down" % payload['URL'],
tags=payload['Tags'].split(','),
origin='statuscake',
raw_data=str(payload)
)
| 35.333333 | 85 | 0.624663 | 1,334 | 0.898316 | 0 | 0 | 0 | 0 | 0 | 0 | 349 | 0.235017 |
8d8b51eaca246cacfde939fcbc4a16b39dba720e | 3,738 | py | Python | ironic_discoverd/main.py | enovance/ironic-discoverd | d3df6178ca5c95943c93ff80723c86b7080bca0b | [
"Apache-2.0"
]
| null | null | null | ironic_discoverd/main.py | enovance/ironic-discoverd | d3df6178ca5c95943c93ff80723c86b7080bca0b | [
"Apache-2.0"
]
| null | null | null | ironic_discoverd/main.py | enovance/ironic-discoverd | d3df6178ca5c95943c93ff80723c86b7080bca0b | [
"Apache-2.0"
]
| null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import eventlet
eventlet.monkey_patch(thread=False)
import json
import logging
import sys
from flask import Flask, request # noqa
from keystoneclient import exceptions
from ironic_discoverd import conf
from ironic_discoverd import discoverd
from ironic_discoverd import firewall
from ironic_discoverd import node_cache
from ironic_discoverd import utils
app = Flask(__name__)
LOG = discoverd.LOG
@app.route('/v1/continue', methods=['POST'])
def post_continue():
data = request.get_json(force=True)
LOG.debug("Got JSON %s, going into processing thread", data)
try:
res = discoverd.process(data)
except utils.DiscoveryFailed as exc:
return str(exc), exc.http_code
else:
return json.dumps(res), 200, {'Content-Type': 'applications/json'}
@app.route('/v1/discover', methods=['POST'])
def post_discover():
if conf.getboolean('discoverd', 'authenticate'):
if not request.headers.get('X-Auth-Token'):
LOG.debug("No X-Auth-Token header, rejecting")
return 'Authentication required', 401
try:
utils.get_keystone(token=request.headers['X-Auth-Token'])
except exceptions.Unauthorized:
LOG.debug("Keystone denied access, rejecting")
return 'Access denied', 403
# TODO(dtanstur): check for admin role
data = request.get_json(force=True)
LOG.debug("Got JSON %s", data)
try:
discoverd.discover(data)
except utils.DiscoveryFailed as exc:
return str(exc), exc.http_code
else:
return "", 202
def periodic_update(period):
while True:
LOG.debug('Running periodic update of filters')
try:
firewall.update_filters()
except Exception:
LOG.exception('Periodic update failed')
eventlet.greenthread.sleep(period)
def periodic_clean_up(period):
while True:
LOG.debug('Running periodic clean up of timed out nodes')
try:
if node_cache.clean_up():
firewall.update_filters()
except Exception:
LOG.exception('Periodic clean up failed')
eventlet.greenthread.sleep(period)
def main():
if len(sys.argv) < 2:
sys.exit("Usage: %s config-file" % sys.argv[0])
conf.read(sys.argv[1])
debug = conf.getboolean('discoverd', 'debug')
logging.basicConfig(level=logging.DEBUG if debug else logging.INFO)
logging.getLogger('urllib3.connectionpool').setLevel(logging.WARNING)
logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(
logging.WARNING)
if not conf.getboolean('discoverd', 'authenticate'):
LOG.warning('Starting unauthenticated, please check configuration')
node_cache.init()
firewall.init()
utils.check_ironic_available()
period = conf.getint('discoverd', 'firewall_update_period')
eventlet.greenthread.spawn_n(periodic_update, period)
period = conf.getint('discoverd', 'clean_up_period')
eventlet.greenthread.spawn_n(periodic_clean_up, period)
app.run(debug=debug,
host=conf.get('discoverd', 'listen_address'),
port=conf.getint('discoverd', 'listen_port'))
| 31.411765 | 75 | 0.688604 | 0 | 0 | 0 | 0 | 1,161 | 0.310594 | 0 | 0 | 1,303 | 0.348582 |
8d8c7b2102958e3a921b5b5a1f32ed6750cd5ff4 | 964 | py | Python | config_translator.py | Charahiro-tan/Jurubot_Translator | d0d0db137f3ddfe06d7cd9457d22c418bdeff94c | [
"MIT"
]
| 1 | 2021-07-26T11:14:05.000Z | 2021-07-26T11:14:05.000Z | config_translator.py | Charahiro-tan/Jurubot_Translator | d0d0db137f3ddfe06d7cd9457d22c418bdeff94c | [
"MIT"
]
| null | null | null | config_translator.py | Charahiro-tan/Jurubot_Translator | d0d0db137f3ddfe06d7cd9457d22c418bdeff94c | [
"MIT"
]
| null | null | null | ##################################################
# 翻訳の設定
# 変更した設定は次回起動時から適用されます
##################################################
# []でくくってある項目は""でくくって,で区切ることでいくつも設定できます。
# 無視するユーザー
ignore_user = ["Nightbot","Streamelements","Moobot"]
# 翻訳する前に削除するワード。正規表現対応。
# URLや同じ言葉の繰り返しなどはデフォルトで削除してますので足りなかったら追加してください。
del_word = ["88+","88+"]
# 無視する言語。
# 言語コードは https://cloud.google.com/translate/docs/languages 参照
ignore_lang = ["",""]
# 配信者が使用している言語。あらゆる言語がこの言語に翻訳されます。
home_lang = "ja"
# 上のhome_langで投稿された時の翻訳先
default_to_lang = "en"
# translate.googleのURLのサフィックス。日本の方ならこのままで。
url_suffix = "co.jp"
# 翻訳結果に発言者の名前を入れる場合はTrue、入れない場合はFalse
sender = True
# 上がTrueの場合に表示する名前
# "displayname" でディスプレイネーム
# "loginid" でログインID
sender_name = "displayname"
# 翻訳結果に言語情報(en ⇒ ja)を付ける場合はTrue、付けない場合はFalse
language = True
# Google Apps Scriptで作成したAPIを使用するときはTrue、しないときはFalse
# Google Apps Scriptを使用するときは必ずReadmeを読んでください。
gas = False
# Google Apps Scriptで作成したURL
gas_url = "" | 22.418605 | 61 | 0.692946 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,437 | 0.879437 |
8d8cd77924dc533eeabb54595050045f0fb725d3 | 1,489 | py | Python | wxcloudrun/dao.py | lubupang/resume_flask1 | 1ea18e88c0b667e92710096f57973a77d19e8fc6 | [
"MIT"
]
| null | null | null | wxcloudrun/dao.py | lubupang/resume_flask1 | 1ea18e88c0b667e92710096f57973a77d19e8fc6 | [
"MIT"
]
| null | null | null | wxcloudrun/dao.py | lubupang/resume_flask1 | 1ea18e88c0b667e92710096f57973a77d19e8fc6 | [
"MIT"
]
| null | null | null | import logging
from sqlalchemy.exc import OperationalError
from wxcloudrun import db
from wxcloudrun.model import Counters
# 初始化日志
logger = logging.getLogger('log')
logger.info("aaaaaaa")
def query_counterbyid(id):
"""
根据ID查询Counter实体
:param id: Counter的ID
:return: Counter实体
"""
logger.info("bbbbbbbbb")
try:
return Counters.query.filter(Counters.id == id).first()
except OperationalError as e:
logger.info("query_counterbyid errorMsg= {} ".format(e))
return None
def delete_counterbyid(id):
"""
根据ID删除Counter实体
:param id: Counter的ID
"""
try:
counter = Counters.query.get(id)
if counter is None:
return
db.session.delete(counter)
db.session.commit()
except OperationalError as e:
logger.info("delete_counterbyid errorMsg= {} ".format(e))
def insert_counter(counter):
"""
插入一个Counter实体
:param counter: Counters实体
"""
try:
db.session.add(counter)
db.session.commit()
except OperationalError as e:
logger.info("insert_counter errorMsg= {} ".format(e))
def update_counterbyid(counter):
"""
根据ID更新counter的值
:param counter实体
"""
try:
counter = query_counterbyid(counter.id)
if counter is None:
return
db.session.flush()
db.session.commit()
except OperationalError as e:
logger.info("update_counterbyid errorMsg= {} ".format(e))
| 22.560606 | 65 | 0.633983 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 486 | 0.31094 |
8d8db8eca4cacfeb8ce07aa8011f8a4b558400b4 | 7,411 | py | Python | src/bpp/tests/tests_legacy/test_views/test_raporty.py | iplweb/django-bpp | 85f183a99d8d5027ae4772efac1e4a9f21675849 | [
"BSD-3-Clause"
]
| 1 | 2017-04-27T19:50:02.000Z | 2017-04-27T19:50:02.000Z | src/bpp/tests/tests_legacy/test_views/test_raporty.py | mpasternak/django-bpp | 434338821d5ad1aaee598f6327151aba0af66f5e | [
"BSD-3-Clause"
]
| 41 | 2019-11-07T00:07:02.000Z | 2022-02-27T22:09:39.000Z | src/bpp/tests/tests_legacy/test_views/test_raporty.py | iplweb/bpp | f027415cc3faf1ca79082bf7bacd4be35b1a6fdf | [
"BSD-3-Clause"
]
| null | null | null | # -*- encoding: utf-8 -*-
import os
import sys
import uuid
import pytest
from django.apps import apps
from django.contrib.auth.models import Group
from django.core.files.base import ContentFile
try:
from django.core.urlresolvers import reverse
except ImportError:
from django.urls import reverse
from django.db import transaction
from django.http import Http404
from django.test.utils import override_settings
from django.utils import timezone
from model_mommy import mommy
from bpp.models import Typ_KBN, Jezyk, Charakter_Formalny, Typ_Odpowiedzialnosci
from bpp.tests.tests_legacy.testutil import UserTestCase, UserTransactionTestCase
from bpp.tests.util import any_jednostka, any_autor, any_ciagle
from bpp.util import rebuild_contenttypes
from bpp.views.raporty import RaportSelector, PodgladRaportu, KasowanieRaportu
from celeryui.models import Report
class TestRaportSelector(UserTestCase):
def test_raportselector(self):
p = RaportSelector()
p.request = self.factory.get('/')
p.get_context_data()
def test_raportselector_with_reports(self):
for x, kiedy_ukonczono in enumerate([timezone.now(), None]):
mommy.make(
Report, arguments={},
file=None, finished_on=kiedy_ukonczono)
self.client.get(reverse('bpp:raporty'))
def test_tytuly_raportow_kronika_uczelni(self):
any_ciagle(rok=2000)
rep = Report.objects.create(
ordered_by=self.user,
function="kronika-uczelni",
arguments={"rok": "2000"})
res = self.client.get(reverse('bpp:raporty'))
self.assertContains(
res,
"Kronika Uczelni dla roku 2000",
status_code=200)
def test_tytuly_raportow_raport_dla_komisji_centralnej(self):
a = any_autor("Kowalski", "Jan")
rep = Report.objects.create(
ordered_by=self.user,
function="raport-dla-komisji-centralnej",
arguments={"autor": a.pk})
res = self.client.get(reverse('bpp:raporty'))
self.assertContains(
res,
"Raport dla Komisji Centralnej - %s" % str(a),
status_code=200)
class RaportMixin:
def zrob_raport(self):
r = mommy.make(
Report, file=None,
function="kronika-uczelni",
arguments='{"rok":"2013"}')
return r
class TestPobranieRaportu(RaportMixin, UserTestCase):
def setUp(self):
UserTestCase.setUp(self)
self.r = self.zrob_raport()
error_class = OSError
if sys.platform.startswith('win'):
error_class = WindowsError
try:
os.unlink(
os.path.join(settings.MEDIA_ROOT, 'raport', 'test_raport'))
except error_class:
pass
self.r.file.save("test_raport", ContentFile("hej ho"))
def test_pobranie_nginx(self):
# Raport musi byc zakonczony, ineczej nie ma pobrania
self.r.finished_on = timezone.now()
self.r.save()
with override_settings(SENDFILE_BACKEND='sendfile.backends.nginx'):
url = reverse('bpp:pobranie-raportu', kwargs=dict(uid=self.r.uid))
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn('x-accel-redirect', resp._headers)
class TestPodgladRaportu(RaportMixin, UserTestCase):
def setUp(self):
UserTestCase.setUp(self)
self.r = self.zrob_raport()
def test_podgladraportu(self):
p = PodgladRaportu()
p.kwargs = {}
p.kwargs['uid'] = self.r.uid
self.assertEqual(p.get_object(), self.r)
p.kwargs['uid'] = str(uuid.uuid4())
self.assertRaises(Http404, p.get_object)
def test_podgladraportu_client(self):
url = reverse('bpp:podglad-raportu', kwargs=dict(uid=self.r.uid))
resp = self.client.get(url)
self.assertContains(resp, 'Kronika Uczelni', status_code=200)
class KasowanieRaportuMixin:
def setUp(self):
self.r = self.zrob_raport()
self.r.ordered_by = self.user
self.r.save()
class TestKasowanieRaportu(KasowanieRaportuMixin, RaportMixin, UserTestCase):
def setUp(self):
UserTestCase.setUp(self)
KasowanieRaportuMixin.setUp(self)
def test_kasowanieraportu(self):
k = KasowanieRaportu()
k.kwargs = dict(uid=self.r.uid)
class FakeRequest:
user = self.user
k.request = FakeRequest()
k.request.user = None
self.assertRaises(Http404, k.get_object)
k.request.user = self.user
self.assertEqual(k.get_object(), self.r)
def test_kasowanieraportu_client(self):
self.assertEqual(Report.objects.count(), 1)
url = reverse('bpp:kasowanie-raportu', kwargs=dict(uid=self.r.uid))
resp = self.client.get(url)
self.assertRedirects(resp, reverse("bpp:raporty"))
self.assertEqual(Report.objects.count(), 0)
from django.conf import settings
class TestWidokiRaportJednostek2012(UserTestCase):
# fixtures = ['charakter_formalny.json',
# 'jezyk.json',
# 'typ_kbn.json',
# 'typ_odpowiedzialnosci.json']
def setUp(self):
UserTestCase.setUp(self)
self.j = any_jednostka()
Typ_KBN.objects.get_or_create(skrot="PW", nazwa="Praca wieloośrodkowa")
Jezyk.objects.get_or_create(skrot='pol.', nazwa='polski')
Charakter_Formalny.objects.get_or_create(skrot='KSZ', nazwa='Książka w języku obcym')
Charakter_Formalny.objects.get_or_create(skrot='KSP', nazwa='Książka w języku polskim')
Charakter_Formalny.objects.get_or_create(skrot='KS', nazwa='Książka')
Charakter_Formalny.objects.get_or_create(skrot='ROZ', nazwa='Rozdział książki')
Group.objects.get_or_create(name="wprowadzanie danych")
def test_jeden_rok(self):
url = reverse("bpp:raport-jednostek-rok-min-max",
args=(self.j.pk, 2010, 2013))
res = self.client.get(url)
self.assertContains(
res,
"Dane o publikacjach za okres 2010 - 2013",
status_code=200)
def test_zakres_lat(self):
url = reverse("bpp:raport-jednostek", args=(self.j.pk, 2013))
res = self.client.get(url)
self.assertContains(
res,
"Dane o publikacjach za rok 2013",
status_code=200)
class TestRankingAutorow(UserTestCase):
def setUp(self):
UserTestCase.setUp(self)
rebuild_contenttypes()
Typ_Odpowiedzialnosci.objects.get_or_create(skrot='aut.', nazwa='autor')
Group.objects.get_or_create(name="wprowadzanie danych")
j = any_jednostka()
a = any_autor(nazwisko="Kowalski")
c = any_ciagle(impact_factor=200, rok=2000)
c.dodaj_autora(a, j)
def test_renderowanie(self):
url = reverse("bpp:ranking-autorow", args=(2000, 2000))
res = self.client.get(url)
self.assertContains(
res, "Ranking autorów", status_code=200)
self.assertContains(res, "Kowalski")
def test_renderowanie_csv(self):
url = reverse("bpp:ranking-autorow", args=(2000, 2000))
res = self.client.get(url, data={"_export": "csv"})
self.assertContains(
res,
'"Kowalski Jan Maria, dr",Jednostka')
| 32.221739 | 95 | 0.643233 | 6,497 | 0.875135 | 0 | 0 | 0 | 0 | 0 | 0 | 1,100 | 0.148168 |
8d8dfcd12be52225c59666f19fa694cef189e9ea | 1,373 | py | Python | bot/utilities/api/helpers/score.py | AiratK/kaishnik-bot | c42351611a40a04d78c8ae481b97339adbd321e5 | [
"MIT"
]
| null | null | null | bot/utilities/api/helpers/score.py | AiratK/kaishnik-bot | c42351611a40a04d78c8ae481b97339adbd321e5 | [
"MIT"
]
| null | null | null | bot/utilities/api/helpers/score.py | AiratK/kaishnik-bot | c42351611a40a04d78c8ae481b97339adbd321e5 | [
"MIT"
]
| null | null | null | from typing import List
from typing import Tuple
from bot.utilities.api.constants import SCORE_TEMPLATE
def beautify_score(raw_score_table_data: List[List[str]]) -> List[Tuple[str, str]]:
# Slightly refining traditional assessment to be written starting with lower case letter
for (subject_index, subject_score_data) in enumerate(raw_score_table_data):
# Making traditional grade to be viewed in the lower case
subject_score_data[16] = subject_score_data[16].lower()
# Putting strikethrough text decoration on non-grade value
if subject_score_data[16] == "ведомость не закрыта":
subject_score_data[16] = f"~{subject_score_data[16]}~"
# Finishing traditional grade processing
raw_score_table_data[subject_index][16] = subject_score_data[16]
score: List[Tuple[str, str]] = []
for subject_score_data in raw_score_table_data:
formatted_subject_score_data: str = SCORE_TEMPLATE.format(*subject_score_data[1:])
# Preparing for parsing by Markdown Parser of Version 2
for reserved_character in [ "-", "(", ")", "." ]:
formatted_subject_score_data = formatted_subject_score_data.replace(reserved_character, f"\{reserved_character}")
# Enhancing some words' appearance
formatted_subject_score_data = formatted_subject_score_data.replace("н/я", "неявка")
score.append((subject_score_data[1], formatted_subject_score_data))
return score
| 39.228571 | 116 | 0.780772 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 458 | 0.327377 |
8d8ebb77655b687ce95045239bb38a91c19a2901 | 1,192 | py | Python | manager_app/serializers/carousel_serializers.py | syz247179876/e_mall | f94e39e091e098242342f532ae371b8ff127542f | [
"Apache-2.0"
]
| 7 | 2021-04-10T13:20:56.000Z | 2022-03-29T15:00:29.000Z | manager_app/serializers/carousel_serializers.py | syz247179876/E_mall | f94e39e091e098242342f532ae371b8ff127542f | [
"Apache-2.0"
]
| 9 | 2021-05-11T03:53:31.000Z | 2022-03-12T00:58:03.000Z | manager_app/serializers/carousel_serializers.py | syz247179876/E_mall | f94e39e091e098242342f532ae371b8ff127542f | [
"Apache-2.0"
]
| 2 | 2020-11-24T08:59:22.000Z | 2020-11-24T14:10:59.000Z | # -*- coding: utf-8 -*-
# @Time : 2021/4/6 下午9:21
# @Author : 司云中
# @File : carousel_serializers.py
# @Software: Pycharm
from rest_framework import serializers
from Emall.exceptions import DataFormatError
from shop_app.models.commodity_models import Carousel
class ManagerCarouselSerializer(serializers.ModelSerializer):
"""管理轮播图序列化器"""
class Meta:
model = Carousel
fields = ('pk', 'picture', 'url', 'sort', 'type')
read_only_fields = ('pk',)
def add(self):
"""增加轮播图"""
self.Meta.model.objects.create(**self.validated_data)
def modify(self):
"""修改轮播图"""
pk = self.context.get('request').data.get('pk')
if not pk:
raise DataFormatError('缺少数据')
return self.Meta.model.objects.filter(pk=pk).update(**self.validated_data)
class DeleteCarouselSerializer(serializers.ModelSerializer):
pk_list = serializers.ListField(child=serializers.IntegerField(), allow_empty=False)
class Meta:
model = Carousel
fields = ('pk_list',)
def delete(self):
"""删除轮播图"""
return self.Meta.model.objects.filter(pk__in=self.validated_data.pop('pk_list')).delete()
| 27.090909 | 97 | 0.654362 | 979 | 0.778219 | 0 | 0 | 0 | 0 | 0 | 0 | 302 | 0.240064 |
8d9135e1864bf2b1336ddc05e72617edb4057d7b | 7,312 | py | Python | xfbin/structure/nud.py | SutandoTsukai181/xfbin_lib | 8e2c56f354bfd868f9162f816cc528e6f830cdbc | [
"MIT"
]
| 3 | 2021-07-20T09:13:13.000Z | 2021-09-06T18:08:15.000Z | xfbin/structure/nud.py | SutandoTsukai181/xfbin_lib | 8e2c56f354bfd868f9162f816cc528e6f830cdbc | [
"MIT"
]
| 1 | 2021-09-06T18:07:48.000Z | 2021-09-06T18:07:48.000Z | xfbin/structure/nud.py | SutandoTsukai181/xfbin_lib | 8e2c56f354bfd868f9162f816cc528e6f830cdbc | [
"MIT"
]
| null | null | null | from itertools import chain
from typing import List, Tuple
from .br.br_nud import *
class Nud:
name: str # chunk name
mesh_groups: List['NudMeshGroup']
def init_data(self, name, br_nud: BrNud):
self.name = name
self.bounding_sphere = br_nud.boundingSphere
self.mesh_groups = list()
for br_mesh_group in br_nud.meshGroups:
mesh_group = NudMeshGroup()
mesh_group.init_data(br_mesh_group)
self.mesh_groups.append(mesh_group)
def get_bone_range(self) -> Tuple[int, int]:
if not (self.mesh_groups and
self.mesh_groups[0].meshes and
self.mesh_groups[0].meshes[0].bone_type != NudBoneType.NoBones):
return (0, 0)
lower = 0xFF_FF
higher = 0
for mesh in [m for m in self.mesh_groups[0].meshes if m.vertices and m.vertices[0].bone_ids]:
lower = min(lower, min(chain(*map(lambda x: x.bone_ids, mesh.vertices))))
higher = max(higher, max(chain(*map(lambda x: x.bone_ids, mesh.vertices))))
if lower > higher:
return (0, 0)
return (lower, higher)
class NudMeshGroup:
name: str
meshes: List['NudMesh']
def init_data(self, br_mesh_group: BrNudMeshGroup):
self.name = br_mesh_group.name
self.bone_flags = br_mesh_group.boneFlags
self.bounding_sphere = br_mesh_group.boundingSphere
self.meshes = list()
for br_mesh in br_mesh_group.meshes:
mesh = NudMesh()
mesh.init_data(br_mesh)
self.meshes.append(mesh)
class NudMesh:
MAX_VERTICES = 32_767
MAX_FACES = 16_383
vertices: List['NudVertex']
faces: List[Tuple[int, int, int]]
materials: List['NudMaterial']
vertex_type: NudVertexType
bone_type: NudBoneType
uv_type: NudUvType
def init_data(self, br_mesh: BrNudMesh):
self.add_vertices(br_mesh.vertices)
self.add_faces(br_mesh.faces, br_mesh.faceSize)
self.add_materials(br_mesh.materials)
self.vertex_type = NudVertexType(br_mesh.vertexSize & 0x0F)
self.bone_type = NudBoneType(br_mesh.vertexSize & 0xF0)
self.uv_type = NudUvType(br_mesh.uvSize & 0x0F)
self.face_flag = br_mesh.faceFlag
def has_bones(self):
return bool(self.vertices and self.vertices[0].bone_ids)
def has_color(self):
return bool(self.vertices and self.vertices[0].color)
def get_uv_channel_count(self):
return len(self.vertices[0].uv) if bool(self.vertices and self.vertices[0].uv) else 0
def add_vertices(self, vertices: List[BrNudVertex]):
self.vertices = list()
for br_vertex in vertices:
vertex = NudVertex()
vertex.init_data(br_vertex)
self.vertices.append(vertex)
def add_faces(self, faces: List[int], faceSize: int):
faces = iter(faces)
if faceSize & 0x40:
# 0x40 format does not have -1 indices nor changing directions
self.faces = zip(faces, faces, faces)
return
self.faces = list()
start_dir = 1
f1 = next(faces)
f2 = next(faces)
face_dir = start_dir
try:
while True:
f3 = next(faces)
if f3 == -1:
f1 = next(faces)
f2 = next(faces)
face_dir = start_dir
else:
face_dir = -face_dir
if f1 != f2 != f3:
if face_dir > 0:
self.faces.append((f3, f2, f1))
else:
self.faces.append((f2, f3, f1))
f1 = f2
f2 = f3
except StopIteration:
pass
def add_materials(self, materials: List[BrNudMaterial]):
self.materials = list()
for br_material in materials:
material = NudMaterial()
material.init_data(br_material)
self.materials.append(material)
class NudVertex:
position: Tuple[float, float, float]
normal: Tuple[float, float, float]
bitangent: Tuple[float, float, float]
tangent: Tuple[float, float, float]
color: Tuple[int, int, int, int]
uv: List[Tuple[float, float]]
bone_ids: Tuple[int, int, int, int]
bone_weights: Tuple[float, float, float, float]
def init_data(self, br_vertex: BrNudVertex):
self.position = br_vertex.position
self.normal = br_vertex.normals
self.bitangent = br_vertex.biTangents if br_vertex.biTangents else None
self.tangent = br_vertex.tangents if br_vertex.tangents else None
self.color = tuple(map(lambda x: int(x), br_vertex.color)) if br_vertex.color else None
self.uv = br_vertex.uv
self.bone_ids = br_vertex.boneIds
self.bone_weights = br_vertex.boneWeights
def __eq__(self, o: 'NudVertex') -> bool:
return all(map(lambda x, y: x == y, self.position, o.position)) \
and all(map(lambda x, y: x == y, self.normal, o.normal)) \
and all(map(lambda x, y: all(map(lambda a, b: a == b, x, y)), self.uv, o.uv)) \
and all(map(lambda x, y: x == y, self.tangent, o.tangent)) \
and all(map(lambda x, y: x == y, self.bitangent, o.bitangent)) \
and all(map(lambda x, y: x == y, self.color, o.color)) \
and all(map(lambda x, y: x == y, self.bone_ids, o.bone_ids)) \
and all(map(lambda x, y: x == y, self.bone_weights, o.bone_weights))
def __hash__(self) -> int:
return hash(tuple(self.position)) ^ hash(tuple(self.normal)) ^ hash(tuple(self.color)) ^ hash(tuple(self.uv))
class NudMaterial:
def init_data(self, material: BrNudMaterial):
self.flags = material.flags
self.sourceFactor = material.sourceFactor
self.destFactor = material.destFactor
self.alphaTest = material.alphaTest
self.alphaFunction = material.alphaFunction
self.refAlpha = material.refAlpha
self.cullMode = material.cullMode
self.unk1 = material.unk1
self.unk2 = material.unk2
self.zBufferOffset = material.zBufferOffset
self.textures = list()
for br_texture in material.textures:
texture = NudMaterialTexture()
texture.init_data(br_texture)
self.textures.append(texture)
self.properties = list()
for br_property in [p for p in material.properties if p.name]:
property = NudMaterialProperty()
property.init_data(br_property)
self.properties.append(property)
class NudMaterialTexture:
def init_data(self, texture: BrNudMaterialTexture):
self.unk0 = texture.unk0
self.mapMode = texture.mapMode
self.wrapModeS = texture.wrapModeS
self.wrapModeT = texture.wrapModeT
self.minFilter = texture.minFilter
self.magFilter = texture.magFilter
self.mipDetail = texture.mipDetail
self.unk1 = texture.unk1
self.unk2 = texture.unk2
class NudMaterialProperty:
def init_data(self, property: BrNudMaterialProperty):
self.name = property.name
self.values: List[float] = property.values
| 32.642857 | 117 | 0.607358 | 7,206 | 0.985503 | 0 | 0 | 0 | 0 | 0 | 0 | 132 | 0.018053 |
8d92051bcbbae105ab8b259c257c80d404e8f4eb | 2,389 | py | Python | src/attack_surface_pypy/__main__.py | ccrvs/attack_surface_pypy | f2bc9998cf42f4764f1c495e6243d970e01bd176 | [
"CC0-1.0"
]
| null | null | null | src/attack_surface_pypy/__main__.py | ccrvs/attack_surface_pypy | f2bc9998cf42f4764f1c495e6243d970e01bd176 | [
"CC0-1.0"
]
| null | null | null | src/attack_surface_pypy/__main__.py | ccrvs/attack_surface_pypy | f2bc9998cf42f4764f1c495e6243d970e01bd176 | [
"CC0-1.0"
]
| null | null | null | import argparse
import gc
import pathlib
import sys
import typing
import uvicorn # type: ignore
from attack_surface_pypy import __service_name__, __version__, asgi
from attack_surface_pypy import logging as app_logging
from attack_surface_pypy import settings
# logger = structlog.get_logger()
gc.disable()
parser = argparse.ArgumentParser(description="App initial arguments.", prog=__service_name__)
parser.add_argument(
"-f",
"--file-path",
help="provide path to a file with initial data.",
type=pathlib.Path,
metavar=".fixtures/xxx.json",
required=True,
choices=[
pathlib.Path(".fixtures/input-1.json"),
pathlib.Path(".fixtures/input-2.json"),
pathlib.Path(".fixtures/input-3.json"),
pathlib.Path(".fixtures/input-4.json"),
pathlib.Path(".fixtures/input-5.json"),
],
)
parser.add_argument(
"-n",
"--host",
help="set host for the service.",
type=str,
metavar="localhost",
)
parser.add_argument(
"-p",
"--port",
type=int,
help="set port for the service.",
)
parser.add_argument(
"-v",
"--version",
action="version",
version=f"%(prog)s {__version__}",
)
def run_uvicorn(app_settings: settings.Settings, log_config: typing.Optional[dict] = None):
uvicorn.run(
asgi.create_app(app_settings),
# loop='uvloop',
http="httptools",
host=app_settings.service.host,
port=app_settings.service.port,
log_config=log_config or {},
reload=app_settings.autoreload,
debug=app_settings.debug,
access_log=app_settings.debug,
backlog=app_settings.backlog,
factory=True,
)
if __name__ == "__main__":
ns = parser.parse_args()
domain_settings = settings.Domain(file_path=ns.file_path)
service_settings = settings.Service()
if ns.host or ns.port:
service_settings = settings.Service(host=ns.host, port=ns.port)
app_settings = settings.Settings(domain=domain_settings, service=service_settings)
log_config = app_logging.LoggingConfig(
log_level=app_settings.log_level, traceback_depth=app_settings.traceback_depth
).prepare_logger()
# context = types.Context(file_path=ns.file_path, host=ns.host, port=ns.port) # TODO: update settings from args?
sys.exit(run_uvicorn(app_settings, log_config)) # TODO: hardcoded name, awry fabric
| 29.493827 | 117 | 0.686061 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 594 | 0.24864 |
8d92eb64df1700c877aeea998c716029d6df8ce0 | 391 | py | Python | subscriptions/migrations/0004_auto_20200630_1157.py | Naveendata-ux/tor_redesign | e4b5135f8b4134527ad04a097bdffd9d956d9858 | [
"BSD-2-Clause"
]
| null | null | null | subscriptions/migrations/0004_auto_20200630_1157.py | Naveendata-ux/tor_redesign | e4b5135f8b4134527ad04a097bdffd9d956d9858 | [
"BSD-2-Clause"
]
| null | null | null | subscriptions/migrations/0004_auto_20200630_1157.py | Naveendata-ux/tor_redesign | e4b5135f8b4134527ad04a097bdffd9d956d9858 | [
"BSD-2-Clause"
]
| null | null | null | # Generated by Django 2.2.5 on 2020-06-30 11:57
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('subscriptions', '0003_auto_20200630_1156'),
]
operations = [
migrations.RenameField(
model_name='subscriptionplan',
old_name='Account_type',
new_name='account_type',
),
]
| 20.578947 | 53 | 0.613811 | 306 | 0.782609 | 0 | 0 | 0 | 0 | 0 | 0 | 133 | 0.340153 |
8d9365bf3bc3b96e70ffdbc229d46a96e3d6b3fd | 545 | py | Python | Random_Colored/main.py | usamaahsan93/mischief-managed | 824022ecaeda46450ca1029bceb39f194c363138 | [
"MIT"
]
| null | null | null | Random_Colored/main.py | usamaahsan93/mischief-managed | 824022ecaeda46450ca1029bceb39f194c363138 | [
"MIT"
]
| null | null | null | Random_Colored/main.py | usamaahsan93/mischief-managed | 824022ecaeda46450ca1029bceb39f194c363138 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 2 16:54:10 2021
@author: sdn1
"""
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
import numpy as np
import os
i=0
while(i<1):
1/0
print(bcolors.OKGREEN + chr(np.random.randint(250,400)) + bcolors.ENDC, end='')
os.system('python $(pwd)/main.py')
i=i+1
print(i)
| 17.03125 | 83 | 0.552294 | 226 | 0.414679 | 0 | 0 | 0 | 0 | 0 | 0 | 215 | 0.394495 |
8d93c9fb2121a519402ceb1deef23ae520c7fdfe | 1,717 | py | Python | utils/event_store_rebuilder_for_segments.py | initialed85/eds-cctv-system | fcdb7e7e23327bf3a901d23d506b3915833027d1 | [
"MIT"
]
| null | null | null | utils/event_store_rebuilder_for_segments.py | initialed85/eds-cctv-system | fcdb7e7e23327bf3a901d23d506b3915833027d1 | [
"MIT"
]
| null | null | null | utils/event_store_rebuilder_for_segments.py | initialed85/eds-cctv-system | fcdb7e7e23327bf3a901d23d506b3915833027d1 | [
"MIT"
]
| null | null | null | import datetime
from pathlib import Path
from typing import Optional, Tuple
from .common import _IMAGE_SUFFIXES, _PERMITTED_EXTENSIONS, PathDetails, rebuild_event_store
def parse_path(path: Path, tzinfo: datetime.tzinfo) -> Optional[PathDetails]:
if path.suffix.lower() not in _PERMITTED_EXTENSIONS:
return None
if path.name.lower().startswith("event"):
raise ValueError("cannot process events; only segments")
parts = path.name.split('_')
timestamp = datetime.datetime.strptime(f'{parts[1]}_{parts[2]}', "%Y-%m-%d_%H-%M-%S")
timestamp = timestamp.replace(tzinfo=tzinfo)
camera_name = parts[3].split('.')[0]
if camera_name.endswith('-lowres'):
camera_name = camera_name.split('-lowres')[0]
return PathDetails(
path=path,
event_id=None,
camera_id=None,
timestamp=timestamp,
camera_name=camera_name,
is_image=path.suffix.lower() in _IMAGE_SUFFIXES,
is_lowres="-lowres" in path.name.lower(),
)
def _get_key(path_details: PathDetails) -> Tuple[str, str]:
return (
path_details.camera_name,
path_details.timestamp.strftime("%Y-%m-%d %H:%M:%S")
)
if __name__ == "__main__":
import argparse
from dateutil.tz import tzoffset
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--root-path", type=str, required=True)
parser.add_argument("-j", "--json-path", type=str, required=True)
args = parser.parse_args()
rebuild_event_store(
root_path=args.root_path,
tzinfo=tzoffset(name="WST-8", offset=8 * 60 * 60),
json_path=args.json_path,
parse_method=parse_path,
get_key_methods=[_get_key]
)
| 28.616667 | 92 | 0.663366 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 191 | 0.111241 |
8d9464e17bd59a5730ae1d8d76d451408c780a27 | 4,049 | py | Python | python/src/main/python/common/threadctl.py | esnet/netshell | 4cb010b63e72610cf81112b29587d3e980612333 | [
"BSD-3-Clause-LBNL"
]
| 6 | 2016-02-17T16:31:55.000Z | 2021-03-16T20:17:41.000Z | python/src/main/python/common/threadctl.py | esnet/netshell | 4cb010b63e72610cf81112b29587d3e980612333 | [
"BSD-3-Clause-LBNL"
]
| 27 | 2016-04-11T19:49:04.000Z | 2016-07-14T06:05:52.000Z | python/src/main/python/common/threadctl.py | esnet/netshell | 4cb010b63e72610cf81112b29587d3e980612333 | [
"BSD-3-Clause-LBNL"
]
| 1 | 2017-07-31T19:30:50.000Z | 2017-07-31T19:30:50.000Z | # ESnet Network Operating System (ENOS) Copyright (c) 2015, The Regents
# of the University of California, through Lawrence Berkeley National
# Laboratory (subject to receipt of any required approvals from the
# U.S. Dept. of Energy). All rights reserved.
#
# If you have questions about your rights to use or distribute this
# software, please contact Berkeley Lab's Innovation & Partnerships
# Office at [email protected].
#
# NOTICE. This Software was developed under funding from the
# U.S. Department of Energy and the U.S. Government consequently retains
# certain rights. As such, the U.S. Government has been granted for
# itself and others acting on its behalf a paid-up, nonexclusive,
# irrevocable, worldwide license in the Software to reproduce,
# distribute copies to the public, prepare derivative works, and perform
# publicly and display publicly, and to permit other to do so.
from java.lang import Thread, ThreadGroup
import jarray
rootThreadGroup = None
def getRootThreadGroup():
global rootThreadGroup
if rootThreadGroup != None:
return rootThreadGroup
tg = Thread.currentThread().getThreadGroup()
ptg = tg.getParent()
while ptg != None:
tg = ptg
ptg = tg.getParent()
return tg
def getThreadGroup(name):
groups = getAllThreadGroups()
for group in groups:
if group.getName().equals(name):
return group;
return None;
def getAllThreadGroups():
root = getRootThreadGroup()
nbGroups = root.activeGroupCount()
groups = None
while True:
groups = jarray.zeros(nbGroups, ThreadGroup)
n = root.enumerate(groups, True)
if n == nbGroups:
nbGroups *= 2
else:
nbGroups = n
break
return groups[0:(len(groups) - nbGroups) * -1]
def getAllThreads(match=None):
root = getRootThreadGroup()
nbThreads = root.activeGroupCount()
threads = None
while True:
threads = jarray.zeros(nbThreads, Thread)
n = root.enumerate(threads, True)
if n == nbThreads:
nbThreads *= 2
else:
nbThreads = n
break
threads = threads[0:(len(threads) - nbThreads) * -1]
if match != None:
filtered = []
for thread in threads:
if match in thread.getName():
filtered.append(thread)
threads = filtered
return threads
def getThread(id):
threads = getAllThreads()
for thread in threads:
if thread.getId() == id:
return thread
return None
def displayThread(thread, stack=True):
name = thread.getName()
print "Thread id=",thread.getId(),name
print " Stack Trace:\n"
if stack:
st = thread.getStackTrace()
index = len(st)
for trace in st:
print " ",index, trace
def print_syntax():
print
print "threadctl <cmd> <cmds options>"
print "Java Threads tool"
print " Commands are:"
print "\thelp"
print "\tPrints this help."
print "\tshow-thread <tid | all> [grep <string>] Displays a thread by its id or all threads"
print "\t\tAn optional string to match can be provided."
print "\tshow-threadgroup all [grep <string>] Displays all thread groups."
print "\t\tAn optional string to match can be provided."
if __name__ == '__main__':
argv = sys.argv
if len(argv) == 1:
print_syntax()
sys.exit()
cmd = argv[1]
if cmd == "help":
print_syntax()
elif cmd == "show-thread":
gri = argv[2]
if gri == 'all':
match = None
if 'grep' in argv:
match = argv[4]
threads = getAllThreads(match=match)
if threads != None:
for thread in threads:
displayThread(thread=thread)
print
else:
thread = getThread(long(argv[2]))
if (thread == None):
print "unknown",argv[2]
sys.exit()
displayThread(thread)
| 29.772059 | 96 | 0.613732 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,306 | 0.322549 |
8d94db8d2bb9acc8dbec349c6766ca408545196a | 599 | py | Python | python/distance/HaversineDistanceInMiles.py | jigneshoo7/AlgoBook | 8aecc9698447c0ee561a1c90d5c5ab87c4a07b79 | [
"MIT"
]
| 191 | 2020-09-28T10:00:20.000Z | 2022-03-06T14:36:55.000Z | python/distance/HaversineDistanceInMiles.py | jigneshoo7/AlgoBook | 8aecc9698447c0ee561a1c90d5c5ab87c4a07b79 | [
"MIT"
]
| 210 | 2020-09-28T10:06:36.000Z | 2022-03-05T03:44:24.000Z | python/distance/HaversineDistanceInMiles.py | jigneshoo7/AlgoBook | 8aecc9698447c0ee561a1c90d5c5ab87c4a07b79 | [
"MIT"
]
| 320 | 2020-09-28T09:56:14.000Z | 2022-02-12T16:45:57.000Z | import math
def distanceInMilesOrKilos(milesOrKilos,originLat,originLon,destinationLat,destinationLon):
radius = 3959 if milesOrKilos == "miles" else 6371
lat1 = originLat
lat2 = destinationLat
lon1 = originLon
lon2 = destinationLon
dlat = math.radians(lat2 - lat1)
dlon = math.radians(lon2 - lon1)
a = (math.sin(dlat / 2) * math.sin(dlat / 2) + math.cos(math.radians(lat1)) * math.cos(math.radians(lat2)) * math.sin(dlon / 2) * math.sin(dlon / 2))
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
distance = radius * c
return distance | 33.277778 | 153 | 0.651085 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 0.011686 |
8d95a5da0117840ab07b75457380a92375c5347d | 8,837 | py | Python | i2i/util.py | thorwhalen/i2i | f967aaba28793029e3fe643c5e17ae9bc7a77732 | [
"Apache-2.0"
]
| 1 | 2019-08-29T01:35:12.000Z | 2019-08-29T01:35:12.000Z | i2i/util.py | thorwhalen/i2i | f967aaba28793029e3fe643c5e17ae9bc7a77732 | [
"Apache-2.0"
]
| null | null | null | i2i/util.py | thorwhalen/i2i | f967aaba28793029e3fe643c5e17ae9bc7a77732 | [
"Apache-2.0"
]
| null | null | null | from __future__ import division
import inspect
import types
from functools import wraps
function_type = type(lambda x: x) # using this instead of callable() because classes are callable, for instance
class NoDefault(object):
def __repr__(self):
return 'no_default'
no_default = NoDefault()
class imdict(dict):
def __hash__(self):
return id(self)
def _immutable(self, *args, **kws):
raise TypeError('object is immutable')
__setitem__ = _immutable
__delitem__ = _immutable
clear = _immutable
update = _immutable
setdefault = _immutable
pop = _immutable
popitem = _immutable
def inject_method(self, method_function, method_name=None):
"""
method_function could be:
* a function
* a {method_name: function, ...} dict (for multiple injections)
* a list of functions or (function, method_name) pairs
"""
if isinstance(method_function, function_type):
if method_name is None:
method_name = method_function.__name__
setattr(self,
method_name,
types.MethodType(method_function, self))
else:
if isinstance(method_function, dict):
method_function = [(func, func_name) for func_name, func in method_function.items()]
for method in method_function:
if isinstance(method, tuple) and len(method) == 2:
self = inject_method(self, method[0], method[1])
else:
self = inject_method(self, method)
return self
def transform_args(**trans_func_for_arg):
"""
Make a decorator that transforms function arguments before calling the function.
For example:
* original argument: a relative path --> used argument: a full path
* original argument: a pickle filepath --> used argument: the loaded object
:param rootdir: rootdir to be used for all name arguments of target function
:param name_arg: the position (int) or argument name of the argument containing the name
:return: a decorator
>>> def f(a, b, c):
... return "a={a}, b={b}, c={c}".format(a=a, b=b, c=c)
>>>
>>> print(f('foo', 'bar', 3))
a=foo, b=bar, c=3
>>> ff = transform_args()(f)
>>> print(ff('foo', 'bar', 3))
a=foo, b=bar, c=3
>>> ff = transform_args(a=lambda x: 'ROOT/' + x)(f)
>>> print(ff('foo', 'bar', 3))
a=ROOT/foo, b=bar, c=3
>>> ff = transform_args(b=lambda x: 'ROOT/' + x)(f)
>>> print(ff('foo', 'bar', 3))
a=foo, b=ROOT/bar, c=3
>>> ff = transform_args(a=lambda x: 'ROOT/' + x, b=lambda x: 'ROOT/' + x)(f)
>>> print(ff('foo', b='bar', c=3))
a=ROOT/foo, b=ROOT/bar, c=3
"""
def transform_args_decorator(func):
if len(trans_func_for_arg) == 0: # if no transformations were specified...
return func # just return the function itself
else:
@wraps(func)
def transform_args_wrapper(*args, **kwargs):
# get a {argname: argval, ...} dict from *args and **kwargs
# Note: Didn't really need an if/else here but...
# Note: ... assuming getcallargs gives us an overhead that can be avoided if there's only keyword args.
if len(args) > 0:
val_of_argname = inspect.getcallargs(func, *args, **kwargs)
else:
val_of_argname = kwargs
# apply transform functions to argument values
for argname, trans_func in trans_func_for_arg.items():
val_of_argname[argname] = trans_func(val_of_argname[argname])
# call the function with transformed values
return func(**val_of_argname)
return transform_args_wrapper
return transform_args_decorator
def resolve_filepath_of_name(name_arg=None, rootdir=''):
"""
Make a decorator that applies a function to an argument before using it.
For example:
* original argument: a relative path --> used argument: a full path
* original argument: a pickle filepath --> used argument: the loaded object
:param rootdir: rootdir to be used for all name arguments of target function
:param name_arg: the position (int) or argument name of the argument containing the name
:return: a decorator
>>> def f(a, b, c):
... return "a={a}, b={b}, c={c}".format(a=a, b=b, c=c)
>>>
>>> print(f('foo', 'bar', 3))
a=foo, b=bar, c=3
>>> ff = resolve_filepath_of_name()(f)
>>> print(ff('foo', 'bar', 3))
a=foo, b=bar, c=3
>>> ff = resolve_filepath_of_name('a', 'ROOT')(f)
>>> print(ff('foo', 'bar', 3))
a=ROOT/foo, b=bar, c=3
>>> ff = resolve_filepath_of_name('b', 'ROOT')(f)
>>> print(ff('foo', 'bar', 3))
a=foo, b=ROOT/bar, c=3
"""
if name_arg is not None:
return transform_args(**{name_arg: lambda x: os.path.join(rootdir, x)})
else:
return lambda x: x
def arg_dflt_dict_of_callable(f):
"""
Get a {arg_name: default_val, ...} dict from a callable.
See also :py:mint_of_callable:
:param f: A callable (function, method, ...)
:return:
"""
argspec = inspect.getfullargspec(f)
args = argspec.args or []
defaults = argspec.defaults or []
return {arg: dflt for arg, dflt in zip(args, [no_default] * (len(args) - len(defaults)) + list(defaults))}
def add_self_as_first_argument(func):
@wraps(func)
def wrapped_func(self, *args, **kwargs):
return func(*args, **kwargs)
return wrapped_func
def add_cls_as_first_argument(func):
@wraps(func)
def wrapped_func(cls, *args, **kwargs):
return func(*args, **kwargs)
return wrapped_func
def infer_if_function_might_be_intended_as_a_classmethod_or_staticmethod(func):
"""
Tries to infer if the input function is a 'classmethod' or 'staticmethod' (or just 'normal')
When is that? When:
* the function's first argument is called 'cls' and has no default: 'classmethod'
* the function's first argument is called 'self' and has no default: 'staticmethod'
* otherwise: 'normal'
>>> def a_normal_func(x, y=None):
... pass
>>> def a_func_that_is_probably_a_classmethod(cls, y=None):
... pass
>>> def a_func_that_is_probably_a_staticmethod(self, y=None):
... pass
>>> def a_func_that_is_probably_a_classmethod_but_is_not(cls=3, y=None):
... pass
>>> def a_func_that_is_probably_a_staticmethod_but_is_not(self=None, y=None):
... pass
>>> list_of_functions = [
... a_normal_func,
... a_func_that_is_probably_a_classmethod,
... a_func_that_is_probably_a_staticmethod,
... a_func_that_is_probably_a_classmethod_but_is_not,
... a_func_that_is_probably_a_staticmethod_but_is_not,
... ]
>>>
>>> for func in list_of_functions:
... print("{}: {}".format(func.__name__,
... infer_if_function_might_be_intended_as_a_classmethod_or_staticmethod(func)))
...
a_normal_func: normal
a_func_that_is_probably_a_classmethod: classmethod
a_func_that_is_probably_a_staticmethod: staticmethod
a_func_that_is_probably_a_classmethod_but_is_not: normal_with_cls
a_func_that_is_probably_a_staticmethod_but_is_not: normal_with_self
"""
argsspec = inspect.getfullargspec(func)
if len(argsspec.args) > 0:
first_element_has_no_defaults = bool(len(argsspec.args) > len(argsspec.defaults))
if argsspec.args[0] == 'cls':
if first_element_has_no_defaults:
return 'classmethod'
else:
return 'normal_with_cls'
elif argsspec.args[0] == 'self':
if first_element_has_no_defaults:
return 'staticmethod'
else:
return 'normal_with_self'
return 'normal'
def decorate_as_staticmethod_or_classmethod_if_needed(func):
type_of_func = infer_if_function_might_be_intended_as_a_classmethod_or_staticmethod(func)
if type_of_func == 'classmethod':
return classmethod(func)
elif type_of_func == 'staticmethod':
return staticmethod(func)
elif type_of_func == 'normal':
return func
if __name__ == '__main__':
import os
import re
key_file_re = re.compile('setup.py')
def dir_is_a_pip_installable_dir(dirpath):
return any(filter(key_file_re.match, os.listdir(dirpath)))
rootdir = '/D/Dropbox/dev/py/proj'
cumul = list()
for f in filter(lambda x: not x.startswith('.'), os.listdir(rootdir)):
filepath = os.path.join(rootdir, f)
if os.path.isdir(filepath):
if dir_is_a_pip_installable_dir(filepath):
cumul.append(filepath)
for f in cumul:
print(f)
| 34.928854 | 119 | 0.629965 | 411 | 0.046509 | 0 | 0 | 1,020 | 0.115424 | 0 | 0 | 4,598 | 0.520312 |
8d97b86230f6560f3cd37b723cba275b3f968cb2 | 1,635 | py | Python | setup.py | robflintham/mippy | e642c697202acc5b96b42f62204786bf5e705c9a | [
"BSD-3-Clause"
]
| null | null | null | setup.py | robflintham/mippy | e642c697202acc5b96b42f62204786bf5e705c9a | [
"BSD-3-Clause"
]
| null | null | null | setup.py | robflintham/mippy | e642c697202acc5b96b42f62204786bf5e705c9a | [
"BSD-3-Clause"
]
| null | null | null | #!/usr/bin/env python
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
def get_version():
import mippy
return mippy.__version__
def test_version():
version = get_version()
import datetime
import os
# Get timestamp for __init__.pyc
version_time = datetime.datetime.fromtimestamp(os.path.getmtime(r'mippy\__init__.pyc'))
other_times = []
for root, dirs, files in os.walk('mippy'):
for f in files:
fpath = os.path.join(root,f)
lastdir = os.path.split(os.path.split(fpath)[0])[1]
if os.path.splitext(fpath)[1]=='.py' or lastdir=='resources':
other_times.append(datetime.datetime.fromtimestamp(os.path.getmtime(fpath)))
code_changed = False
for tstamp in other_times:
## print tstamp, version_time, tstamp>version_time
if tstamp>version_time:
code_changed = True
break
if code_changed:
print "CANNOT COMPILE - VERSION NUMBER OUTDATED"
import sys
sys.exit()
return
# Test version numbering before running setup
test_version()
setup( name='MIPPY',
version=get_version(),
description='Modular Image Processing in Python',
author='Robert Flintham',
author_email='[email protected]',
install_requires=['numpy','scipy','dicom','pillow','nibabel','matplotlib'],
license='BSD-3-Clause',
classifiers=[
'Programming Language :: Python :: 2.7',
],
packages=['mippy','mippy.mdicom','mippy.mviewer'],
package_data={'':['resources/*','mviewer/config']}
)
| 29.727273 | 92 | 0.666055 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 499 | 0.305199 |
8d98eec2f752514e211b3f9e607274f2de78ffd9 | 3,543 | py | Python | physprog/tests/test_sample_problem.py | partofthething/physprog | 8bbeb8d84697469417577c76c924dcb3a855cd2d | [
"Apache-2.0"
]
| 3 | 2018-03-25T16:13:53.000Z | 2021-06-29T14:30:20.000Z | physprog/tests/test_sample_problem.py | partofthething/physprog | 8bbeb8d84697469417577c76c924dcb3a855cd2d | [
"Apache-2.0"
]
| null | null | null | physprog/tests/test_sample_problem.py | partofthething/physprog | 8bbeb8d84697469417577c76c924dcb3a855cd2d | [
"Apache-2.0"
]
| 2 | 2021-09-18T08:38:32.000Z | 2022-03-01T07:43:52.000Z | """Run a sample problem to test full system."""
# pylint: disable=invalid-name,missing-docstring
import unittest
from collections import namedtuple
import math
import os
from physprog import classfunctions
from physprog import optimize
THIS_DIR = os.path.dirname(__file__)
SAMPLE_INPUT = os.path.join(THIS_DIR, 'sample-input.yaml')
class TestInput(unittest.TestCase):
"""Test that input can be read."""
def test_read_class_functions(self):
functions = classfunctions.from_input(SAMPLE_INPUT)
self.assertTrue('frequency' in functions)
class Test_Sample_Problem(unittest.TestCase):
"""Test by optimizing a beam problem from the literature."""
def test_optimization(self):
beam = SampleProblemBeam()
# check initial conditions
self.assertAlmostEqual(beam.frequency(), 113.0, delta=0.5)
self.assertAlmostEqual(beam.cost(), 1060.0)
self.assertAlmostEqual(beam.mass(), 2230.0)
prefs = classfunctions.from_input(SAMPLE_INPUT)
optimize.optimize(beam, prefs, plot=False)
# not rigorous, but happens in this problem
self.assertLess(beam.cost(), 1060.0)
SampleDesign = namedtuple('SampleDesign', ['d1', 'd2', 'd3', 'b', 'L'])
class SampleProblemBeam(object):
"""Sample beam design problem from Messac, 1996."""
E1 = 1.6e9
C1 = 500.0
RHO1 = 100.0
E2 = 70e9
C2 = 1500.0
RHO2 = 2770.0
E3 = 200e9
C3 = 800.0
RHO3 = 7780.0
def __init__(self):
self._design = SampleDesign(0.3, 0.35, 0.40, 0.40, 5.0) # initial
def evaluate(self, x=None):
"""Convert input design into output design parameters."""
if x is not None:
self.design = x
return [self.frequency(), self.cost(), self.width(), self.length(),
self.mass(), self.semiheight(), self.width_layer1(),
self.width_layer2(), self.width_layer3()]
@property
def design(self):
return self._design
@design.setter
def design(self, val):
self._design = SampleDesign(*val)
@property
def ei(self):
ds = self.design
return 2.0 / 3.0 * ds.b * (self.E1 * ds.d1 ** 3 +
self.E2 * (ds.d2 ** 3 - ds.d1 ** 3) +
self.E3 * (ds.d3 ** 3 - ds.d2 ** 3))
@property
def mu(self):
ds = self.design
return 2 * ds.b * (self.RHO1 * ds.d1 +
self.RHO2 * (ds.d2 - ds.d1) +
self.RHO3 * (ds.d3 - ds.d2))
def frequency(self):
return math.pi / (2 * self.design.L ** 2) * math.sqrt(self.ei / self.mu)
def cost(self):
ds = self.design
# cost in the paper says 1060 but I'm getting 212, exactly a
# factor of 5 off. But why?? Ah, because cost should have L in it!
# That's a typo in the paper.
return 2 * ds.b * ds.L * (self.C1 * ds.d1 +
self.C2 * (ds.d2 - ds.d1) +
self.C3 * (ds.d3 - ds.d2))
def width(self):
return self.design.b
def length(self):
return self.design.L
def mass(self):
return self.mu * self.design.L
def semiheight(self):
return self.design.d3
def width_layer1(self):
return self.design.d1
def width_layer2(self):
return self.design.d2 - self.design.d1
def width_layer3(self):
return self.design.d3 - self.design.d2
if __name__ == '__main__':
unittest.main()
| 28.804878 | 80 | 0.582275 | 3,075 | 0.867909 | 0 | 0 | 609 | 0.171888 | 0 | 0 | 602 | 0.169913 |
8d99f51b98aee394d6e4b4f62dcc6cdca1b6db1f | 10,131 | py | Python | tutorials/seq2seq_sated/seq2seq_sated_meminf.py | rizwandel/ml_privacy_meter | 5dc4c300eadccceadd0e664a7e46099f65728628 | [
"MIT"
]
| 294 | 2020-04-13T18:32:45.000Z | 2022-03-31T10:32:34.000Z | tutorials/seq2seq_sated/seq2seq_sated_meminf.py | kypomon/ml_privacy_meter | c0324e8f74cbd0cde0643a7854fa66eab47bbe53 | [
"MIT"
]
| 26 | 2020-04-29T19:56:21.000Z | 2022-03-31T10:42:24.000Z | tutorials/seq2seq_sated/seq2seq_sated_meminf.py | kypomon/ml_privacy_meter | c0324e8f74cbd0cde0643a7854fa66eab47bbe53 | [
"MIT"
]
| 50 | 2020-04-16T02:16:24.000Z | 2022-03-16T00:37:40.000Z | import os
import sys
from collections import defaultdict
import tensorflow as tf
import tensorflow.keras.backend as K
import numpy as np
import scipy.stats as ss
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve
from sklearn.linear_model import LogisticRegression
from utils import process_texts, load_texts, load_users, load_sated_data_by_user, \
build_nmt_model, words_to_indices, \
SATED_TRAIN_USER, SATED_TRAIN_FR, SATED_TRAIN_ENG
MODEL_PATH = 'checkpoints/model/'
OUTPUT_PATH = 'checkpoints/output/'
tf.compat.v1.disable_eager_execution()
# ================================ GENERATE RANKS ================================ #
# Code adapted from https://github.com/csong27/auditing-text-generation
def load_train_users_heldout_data(train_users, src_vocabs, trg_vocabs, user_data_ratio=0.5):
src_users = load_users(SATED_TRAIN_USER)
train_src_texts = load_texts(SATED_TRAIN_ENG)
train_trg_texts = load_texts(SATED_TRAIN_FR)
user_src_texts = defaultdict(list)
user_trg_texts = defaultdict(list)
for u, s, t in zip(src_users, train_src_texts, train_trg_texts):
if u in train_users:
user_src_texts[u].append(s)
user_trg_texts[u].append(t)
assert 0. < user_data_ratio < 1.
# Hold out some fraction of data for testing
for u in user_src_texts:
l = len(user_src_texts[u])
l = int(l * user_data_ratio)
user_src_texts[u] = user_src_texts[u][l:]
user_trg_texts[u] = user_trg_texts[u][l:]
for u in train_users:
process_texts(user_src_texts[u], src_vocabs)
process_texts(user_trg_texts[u], trg_vocabs)
return user_src_texts, user_trg_texts
def rank_lists(lists):
ranks = np.empty_like(lists)
for i, l in enumerate(lists):
ranks[i] = ss.rankdata(l, method='min') - 1
return ranks
def get_ranks(user_src_data, user_trg_data, pred_fn, save_probs=False):
indices = np.arange(len(user_src_data))
"""
Get ranks from prediction vectors.
"""
ranks = []
labels = []
probs = []
for idx in indices:
src_text = np.asarray(user_src_data[idx], dtype=np.float32).reshape(1, -1)
trg_text = np.asarray(user_trg_data[idx], dtype=np.float32)
trg_input = trg_text[:-1].reshape(1, -1)
trg_label = trg_text[1:].reshape(1, -1)
prob = pred_fn([src_text, trg_input, trg_label, 0])[0][0]
if save_probs:
probs.append(prob)
all_ranks = rank_lists(-prob)
sent_ranks = all_ranks[np.arange(len(all_ranks)), trg_label.flatten().astype(int)]
ranks.append(sent_ranks)
labels.append(trg_label.flatten())
if save_probs:
return probs
return ranks, labels
def save_users_rank_results(users, user_src_texts, user_trg_texts, src_vocabs, trg_vocabs, prob_fn, save_dir,
member_label=1, cross_domain=False, save_probs=False, mask=False, rerun=False):
"""
Save user ranks in the appropriate format for attacks.
"""
for i, u in enumerate(users):
save_path = save_dir + 'rank_u{}_y{}{}.npz'.format(i, member_label, '_cd' if cross_domain else '')
prob_path = save_dir + 'prob_u{}_y{}{}.npz'.format(i, member_label, '_cd' if cross_domain else '')
if os.path.exists(save_path) and not save_probs and not rerun:
continue
user_src_data = words_to_indices(user_src_texts[u], src_vocabs, mask=mask)
user_trg_data = words_to_indices(user_trg_texts[u], trg_vocabs, mask=mask)
rtn = get_ranks(user_src_data, user_trg_data, prob_fn, save_probs=save_probs)
if save_probs:
probs = rtn
np.savez(prob_path, probs)
else:
ranks, labels = rtn[0], rtn[1]
np.savez(save_path, ranks, labels)
if (i + 1) % 500 == 0:
sys.stderr.write('Finishing saving ranks for {} users'.format(i + 1))
def get_target_ranks(num_users=200, num_words=5000, mask=False, h=128, emb_h=128, user_data_ratio=0.,
tied=False, save_probs=False):
"""
Get ranks of target machine translation model.
"""
user_src_texts, user_trg_texts, test_user_src_texts, test_user_trg_texts, src_vocabs, trg_vocabs \
= load_sated_data_by_user(num_users, num_words, test_on_user=True, user_data_ratio=user_data_ratio)
train_users = sorted(user_src_texts.keys())
test_users = sorted(test_user_src_texts.keys())
# Get model
save_dir = OUTPUT_PATH + 'target_{}{}/'.format(num_users, '_dr' if 0. < user_data_ratio < 1. else '')
if not os.path.exists(save_dir):
os.mkdir(save_dir)
model_path = 'sated_nmt'.format(num_users)
if 0. < user_data_ratio < 1.:
model_path += '_dr{}'.format(user_data_ratio)
heldout_src_texts, heldout_trg_texts = load_train_users_heldout_data(train_users, src_vocabs, trg_vocabs)
for u in train_users:
user_src_texts[u] += heldout_src_texts[u]
user_trg_texts[u] += heldout_trg_texts[u]
model = build_nmt_model(Vs=num_words, Vt=num_words, mask=mask, drop_p=0., h=h, demb=emb_h, tied=tied)
model.load_weights(MODEL_PATH + '{}_{}.h5'.format(model_path, num_users))
src_input_var, trg_input_var = model.inputs
prediction = model.output
trg_label_var = K.placeholder((None, None), dtype='float32')
# Get predictions
prediction = K.softmax(prediction)
prob_fn = K.function([src_input_var, trg_input_var, trg_label_var, K.learning_phase()], [prediction])
# Save user ranks for train and test dataset
save_users_rank_results(users=train_users, save_probs=save_probs,
user_src_texts=user_src_texts, user_trg_texts=user_trg_texts,
src_vocabs=src_vocabs, trg_vocabs=trg_vocabs, cross_domain=False,
prob_fn=prob_fn, save_dir=save_dir, member_label=1)
save_users_rank_results(users=test_users, save_probs=save_probs,
user_src_texts=test_user_src_texts, user_trg_texts=test_user_trg_texts,
src_vocabs=src_vocabs, trg_vocabs=trg_vocabs, cross_domain=False,
prob_fn=prob_fn, save_dir=save_dir, member_label=0)
# ================================ ATTACK ================================ #
def avg_rank_feats(ranks):
"""
Averages ranks to get features for deciding the threshold for membership inference.
"""
avg_ranks = []
for r in ranks:
avg = np.mean(np.concatenate(r))
avg_ranks.append(avg)
return avg_ranks
def load_ranks_by_label(save_dir, num_users=300, cross_domain=False, label=1):
"""
Helper method to load ranks by train/test dataset.
If label = 1, train set ranks are loaded. If label = 0, test set ranks are loaded.
Ranks are generated by running sated_nmt_ranks.py.
"""
ranks = []
labels = []
y = []
for i in range(num_users):
save_path = save_dir + 'rank_u{}_y{}{}.npz'.format(i, label, '_cd' if cross_domain else '')
if os.path.exists(save_path):
f = np.load(save_path, allow_pickle=True)
train_rs, train_ls = f['arr_0'], f['arr_1']
ranks.append(train_rs)
labels.append(train_ls)
y.append(label)
return ranks, labels, y
def load_all_ranks(save_dir, num_users=5000, cross_domain=False):
"""
Loads all ranks generated by the target model.
Ranks are generated by running sated_nmt_ranks.py.
"""
ranks = []
labels = []
y = []
# Load train ranks
train_label = 1
train_ranks, train_labels, train_y = load_ranks_by_label(save_dir, num_users, cross_domain, train_label)
ranks = ranks + train_ranks
labels = labels + train_labels
y = y + train_y
# Load test ranks
test_label = 0
test_ranks, test_labels, test_y = load_ranks_by_label(save_dir, num_users, cross_domain, test_label)
ranks = ranks + test_ranks
labels = labels + test_labels
y = y + test_y
return ranks, labels, np.asarray(y)
def run_average_rank_thresholding(num_users=300, dim=100, prop=1.0, user_data_ratio=0.,
top_words=5000, cross_domain=False, rerun=False):
"""
Runs average rank thresholding attack on the target model.
"""
result_path = OUTPUT_PATH
if dim > top_words:
dim = top_words
attack1_results_save_path = result_path + 'mi_data_dim{}_prop{}_{}{}_attack1.npz'.format(
dim, prop, num_users, '_cd' if cross_domain else '')
if not rerun and os.path.exists(attack1_results_save_path):
f = np.load(attack1_results_save_path)
X, y = [f['arr_{}'.format(i)] for i in range(4)]
else:
save_dir = result_path + 'target_{}{}/'.format(num_users, '_dr' if 0. < user_data_ratio < 1. else '')
# Load ranks
train_ranks, _, train_y = load_ranks_by_label(save_dir, num_users, label=1)
test_ranks, _, test_y = load_ranks_by_label(save_dir, num_users, label=0)
# Convert to average rank features
train_feat = avg_rank_feats(train_ranks)
test_feat = avg_rank_feats(test_ranks)
# Create dataset
X, y = np.concatenate([train_feat, test_feat]), np.concatenate([train_y, test_y])
np.savez(attack1_results_save_path, X, y)
# print(X.shape, y.shape)
# Find threshold using ROC
clf = LogisticRegression()
clf.fit(X.reshape(-1, 1), y)
probs = clf.predict_proba(X.reshape(-1, 1))
fpr, tpr, thresholds = roc_curve(y, probs[:, 1])
plt.figure(1)
plt.plot(fpr, tpr, label='Attack 1')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.savefig('sateduser_attack1_roc_curve.png')
if __name__ == '__main__':
num_users = 300
save_probs = False
rerun = True
print("Getting target ranks...")
get_target_ranks(num_users=num_users, save_probs=save_probs)
print("Running average rank thresholding attack...")
run_average_rank_thresholding(num_users=num_users, rerun=True)
| 35.672535 | 113 | 0.660251 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,661 | 0.163952 |
8d9d264830cab7159205ed06b41898abec3b84f4 | 2,685 | py | Python | app/recipe/tests/test_tags_api.py | MohamedAbdelmagid/django-recipe-api | 229d3a7cff483b3cad76c70aefde6a51250b9bc8 | [
"MIT"
]
| null | null | null | app/recipe/tests/test_tags_api.py | MohamedAbdelmagid/django-recipe-api | 229d3a7cff483b3cad76c70aefde6a51250b9bc8 | [
"MIT"
]
| null | null | null | app/recipe/tests/test_tags_api.py | MohamedAbdelmagid/django-recipe-api | 229d3a7cff483b3cad76c70aefde6a51250b9bc8 | [
"MIT"
]
| null | null | null | from django.test import TestCase
from django.urls import reverse
from django.contrib.auth import get_user_model
from rest_framework.test import APIClient
from rest_framework import status
from core.models import Tag
from recipe.serializers import TagSerializer
TAGS_URL = reverse("recipe:tag-list")
class PublicTagsApiTests(TestCase):
""" Test the publicly available tags API """
def setUp(self):
self.client = APIClient()
def test_login_required(self):
""" Test that login is required for retrieving tags """
response = self.client.get(TAGS_URL)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTagsApiTests(TestCase):
""" Test the authorized user tags API """
def setUp(self):
self.user = get_user_model().objects.create_user(
"[email protected]", "testpassword"
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_tags(self):
""" Test retrieving tags """
Tag.objects.create(user=self.user, name="Dessert")
Tag.objects.create(user=self.user, name="Salad")
response = self.client.get(TAGS_URL)
tags = Tag.objects.all().order_by("-name")
serializer = TagSerializer(tags, many=True)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, serializer.data)
def test_tags_limited_to_user(self):
""" Test that tags returned are for the authenticated user """
user2 = get_user_model().objects.create_user(
"[email protected]", "test2password"
)
Tag.objects.create(user=user2, name="Candied Yams")
tag = Tag.objects.create(user=self.user, name="Soul Food")
response = self.client.get(TAGS_URL)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]["name"], tag.name)
def test_create_tag_successful(self):
""" Test creating a new tag """
payload = {'name': 'Test Tag Name'}
notExists = Tag.objects.filter(user=self.user, name=payload['name']).exists()
self.client.post(TAGS_URL, payload)
exists = Tag.objects.filter(user=self.user, name=payload['name']).exists()
self.assertTrue(exists)
self.assertFalse(notExists)
def test_create_tag_invalid(self):
""" Test creating a new tag with invalid payload """
payload = {'name': ''}
response = self.client.post(TAGS_URL, payload)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
| 32.349398 | 85 | 0.672998 | 2,376 | 0.884916 | 0 | 0 | 0 | 0 | 0 | 0 | 487 | 0.181378 |
8d9d7d5c7ee0f28e0c8877291fb904e2d8ace2db | 5,736 | py | Python | dtlpy/entities/annotation_definitions/cube_3d.py | dataloop-ai/dtlpy | 2c73831da54686e047ab6aefd8f12a8e53ea97c2 | [
"Apache-2.0"
]
| 10 | 2020-05-21T06:25:35.000Z | 2022-01-07T20:34:03.000Z | dtlpy/entities/annotation_definitions/cube_3d.py | dataloop-ai/dtlpy | 2c73831da54686e047ab6aefd8f12a8e53ea97c2 | [
"Apache-2.0"
]
| 22 | 2019-11-17T17:25:16.000Z | 2022-03-10T15:14:28.000Z | dtlpy/entities/annotation_definitions/cube_3d.py | dataloop-ai/dtlpy | 2c73831da54686e047ab6aefd8f12a8e53ea97c2 | [
"Apache-2.0"
]
| 8 | 2020-03-05T16:23:55.000Z | 2021-12-27T11:10:42.000Z | import numpy as np
# import open3d as o3d
from . import BaseAnnotationDefinition
# from scipy.spatial.transform import Rotation as R
import logging
logger = logging.getLogger(name=__name__)
class Cube3d(BaseAnnotationDefinition):
"""
Cube annotation object
"""
type = "cube_3d"
def __init__(self, label, position, scale, rotation,
attributes=None, description=None):
"""
:param label:
:param position: the XYZ position of the ‘center’ of the annotation.
:param scale: the scale of the object by each axis (XYZ).
:param rotation: an euler representation of the object rotation on each axis (with rotation order ‘XYZ’). (rotation in radians)
:param attributes:
:param description:
"""
super().__init__(description=description, attributes=attributes)
self.position = position
self.scale = scale
self.rotation = rotation
self.label = label
def _translate(self, points, translate_x, translate_y, translate_z):
translation_matrix = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[translate_x, translate_y, translate_z, 1]])
matrix = [(list(i) + [1]) for i in points]
pts2 = np.dot(matrix, translation_matrix)
return [pt[:3] for pt in pts2]
# def make_points(self):
# simple = [
# [self.scale[0] / 2, self.scale[1] / 2, self.scale[2] / 2],
# [-self.scale[0] / 2, self.scale[1] / 2, self.scale[2] / 2],
# [self.scale[0] / 2, -self.scale[1] / 2, self.scale[2] / 2],
# [self.scale[0] / 2, self.scale[1] / 2, -self.scale[2] / 2],
# [-self.scale[0] / 2, -self.scale[1] / 2, self.scale[2] / 2],
# [self.scale[0] / 2, -self.scale[1] / 2, -self.scale[2] / 2],
# [-self.scale[0] / 2, self.scale[1] / 2, -self.scale[2] / 2],
# [-self.scale[0] / 2, -self.scale[1] / 2, -self.scale[2] / 2],
# ]
#
# # matrix = R.from_euler('xyz', self.rotation, degrees=False)
#
# vecs = [np.array(p) for p in simple]
# rotated = matrix.apply(vecs)
# translation = np.array(self.position)
# dX = translation[0]
# dY = translation[1]
# dZ = translation[2]
# points = self._translate(rotated, dX, dY, dZ)
# return points
@property
def geo(self):
return np.asarray([
list(self.position),
list(self.scale),
list(self.rotation)
])
def show(self, image, thickness, with_text, height, width, annotation_format, color):
"""
Show annotation as ndarray
:param image: empty or image to draw on
:param thickness:
:param with_text: not required
:param height: item height
:param width: item width
:param annotation_format: options: list(dl.ViewAnnotationOptions)
:param color: color
:return: ndarray
"""
try:
import cv2
except (ImportError, ModuleNotFoundError):
self.logger.error(
'Import Error! Cant import cv2. Annotations operations will be limited. import manually and fix errors')
raise
points = self.make_points()
front_bl = points[0]
front_br = points[1]
front_tr = points[2]
front_tl = points[3]
back_bl = points[4]
back_br = points[5]
back_tr = points[6]
back_tl = points[7]
logger.warning('the show for 3d_cube is not supported.')
return image
# image = np.zeros((100, 100, 100), dtype=np.uint8)
# pcd = o3d.io.read_point_cloud(r"C:\Users\97250\PycharmProjects\tt\qw\3D\D34049418_0000635.las.pcd")
# # o3d.visualization.draw_geometries([pcd])
# # points = [[0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 0], [0, 0, 1], [1, 0, 1],
# # [0, 1, 1], [1, 1, 1]]
# lines = [[0, 1], [0, 2], [1, 3], [2, 3], [4, 5], [4, 6], [5, 7], [6, 7],
# [0, 4], [1, 5], [2, 6], [3, 7]]
# colors = [[1, 0, 0] for i in range(len(lines))]
# points = [back_bl, back_br, back_tl, back_tr, front_bl, front_br, front_tl, front_tr]
# line_set = o3d.geometry.LineSet()
# line_set.points = o3d.utility.Vector3dVector(points)
# line_set.lines = o3d.utility.Vector2iVector(lines)
# line_set.colors = o3d.utility.Vector3dVector(colors)
# o3d.visualization.draw_geometries([line_set])
# return image
def to_coordinates(self, color=None):
keys = ["position", "scale", "rotation"]
coordinates = {keys[idx]: {"x": float(x), "y": float(y), "z": float(z)}
for idx, [x, y, z] in enumerate(self.geo)}
return coordinates
@staticmethod
def from_coordinates(coordinates):
geo = list()
for key, pt in coordinates.items():
geo.append([pt["x"], pt["y"], pt["z"]])
return np.asarray(geo)
@classmethod
def from_json(cls, _json):
if "coordinates" in _json:
key = "coordinates"
elif "data" in _json:
key = "data"
else:
raise ValueError('can not find "coordinates" or "data" in annotation. id: {}'.format(_json["id"]))
return cls(
position=list(_json[key]['position'].values()),
scale=list(_json[key]['scale'].values()),
rotation=list(_json[key]['rotation'].values()),
label=_json["label"],
attributes=_json.get("attributes", None)
)
| 38.496644 | 135 | 0.544107 | 5,550 | 0.966226 | 0 | 0 | 944 | 0.164345 | 0 | 0 | 2,950 | 0.513579 |
8d9e1079bef17b6514de9131ede3ab7099ea53a4 | 3,702 | py | Python | my_module/tools.py | roki18d/sphinx_autogen-apidoc | 67ad9c716c909d89bcd813a5fa871df8850e4fd5 | [
"Apache-2.0"
]
| null | null | null | my_module/tools.py | roki18d/sphinx_autogen-apidoc | 67ad9c716c909d89bcd813a5fa871df8850e4fd5 | [
"Apache-2.0"
]
| null | null | null | my_module/tools.py | roki18d/sphinx_autogen-apidoc | 67ad9c716c909d89bcd813a5fa871df8850e4fd5 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python
# coding: utf-8
from my_module.exceptions import InvalidArgumentsError
class SimpleCalculator(object):
"""SimpleCalculator
SimpleCalculator is a simple calculator.
Attributes:
operator (str):
String that represents operation type.
Acceptable values are: {"add": addition, "sub": subtraction
"mul": multiplication, "div": divide}
response (dict):
Response for API execution.
This contains conditions (such as operands) and execution results.
"""
def __init__(self, operator: str) -> None:
"""Initialize instance
Args:
operator (str):
"""
valid_operators = ["add", "sub", "mul", "div"]
if operator not in valid_operators:
msg = f"Invalid operator '{operator}' was given, choose from {valid_operators}."
raise InvalidArgumentsError(msg)
else:
self.operator = operator
self.response = dict()
def __add(self, num1: int, num2: int) -> None:
self.response['results'] = {"sum": num1 + num2}
return None
def __sub(self, num1: int, num2: int) -> None:
self.response['results'] = {"difference": num1 - num2}
return None
def __mul(self, num1: int, num2: int) -> None:
self.response['results'] = {"product": num1 * num2}
return None
def __div(self, num1: int, num2: int) -> None:
self.response['results'] = {"quotient": num1//num2, "remainder": num1%num2}
return None
def __handle_exceptions(self, e) -> None:
self.response['results'] = {"error_message": e}
return None
def execute(self, num1: int, num2: int):
"""
Interface to execute caluculation.
Args:
num1 (int): 1st operand.
num2 (int): 2nd operand.
Returns:
dict: self.response
Raises:
InvalidArgumentsError:
Examples:
>>> my_adder = SimpleCalculator(operator="add")
>>> my_adder.execute(4, 2)
{'operands': {'num1': 4, 'num2': 2}, 'results': {'sum': 6}}
"""
try:
operands = {"num1": num1, "num2": num2}
self.response['operands'] = operands
if (not isinstance(num1, int)) or (not isinstance(num2, int)):
msg = f"All operands should be integer, given: {operands}."
raise InvalidArgumentsError(msg)
except Exception as e:
_ = self.__handle_exceptions(e)
try:
if self.operator == "add":
_ = self.__add(num1, num2)
elif self.operator == "sub":
_ = self.__sub(num1, num2)
elif self.operator == "mul":
_ = self.__mul(num1, num2)
elif self.operator == "div":
_ = self.__div(num1, num2)
except Exception as e:
_ = self.__handle_exceptions(e)
return self.response
if __name__ == "__main__":
my_adder = SimpleCalculator(operator="add")
print('Case01:', my_adder.execute(4, 2))
print('Case02:', my_adder.execute(5, "a"))
my_subtractor = SimpleCalculator(operator="sub")
print('Case03:', my_subtractor.execute(3, 5))
my_multiplier = SimpleCalculator(operator="mul")
print('Case04:', my_multiplier.execute(2, 7))
my_divider = SimpleCalculator(operator="div")
print('Case05:', my_divider.execute(17, 5))
print('Case06:', my_divider.execute(6, 0))
print('Case07:')
my_unknown = SimpleCalculator(operator="unknown")
import sys; sys.exit(0)
| 30.85 | 92 | 0.562939 | 2,973 | 0.803079 | 0 | 0 | 0 | 0 | 0 | 0 | 1,400 | 0.378174 |
8da38969800ff2540723920b2ba94670badb3561 | 12,114 | py | Python | PCA_ResNet50.py | liuyingbin19222/HSI_svm_pca_resNet50 | cd95d21c81e93f8b873183f10f52416f71a93d07 | [
"Apache-2.0"
]
| 12 | 2020-03-13T02:39:53.000Z | 2022-02-21T03:28:33.000Z | PCA_ResNet50.py | liuyingbin19222/HSI_svm_pca_resNet50 | cd95d21c81e93f8b873183f10f52416f71a93d07 | [
"Apache-2.0"
]
| 14 | 2020-02-17T12:31:08.000Z | 2022-02-10T01:07:05.000Z | PCA_ResNet50.py | liuyingbin19222/HSI_svm_pca_resNet50 | cd95d21c81e93f8b873183f10f52416f71a93d07 | [
"Apache-2.0"
]
| 3 | 2020-09-06T08:19:15.000Z | 2021-03-08T10:15:40.000Z | import keras
from keras.layers import Conv2D, Conv3D, Flatten, Dense, Reshape, BatchNormalization
from keras.layers import Dropout, Input
from keras.models import Model
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from keras.utils import np_utils
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report, cohen_kappa_score
from operator import truediv
from plotly.offline import init_notebook_mode
import numpy as np
import tensorflow as tf
from keras import layers
from keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D
from keras.models import Model, load_model
from keras.preprocessing import image
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras.applications.imagenet_utils import preprocess_input
from keras.utils.vis_utils import model_to_dot
from keras.utils import plot_model
from keras.initializers import glorot_uniform
import pydot
from IPython.display import SVG
import scipy.misc
from matplotlib.pyplot import imshow
import keras.backend as K
K.set_image_data_format('channels_last')
K.set_learning_phase(1)
from keras.utils import to_categorical
import numpy as np
import matplotlib.pyplot as plt
import scipy.io as sio
import os
import spectral
## GLOBAL VARIABLES
dataset = 'IP'
test_ratio = 0.8
windowSize = 25
def loadData(name):
data_path = os.path.join(os.getcwd(),'data')
if name == 'IP':
data = sio.loadmat(os.path.join(data_path, 'Indian_pines_corrected.mat'))['indian_pines_corrected']
labels = sio.loadmat(os.path.join(data_path, 'Indian_pines_gt.mat'))['indian_pines_gt']
elif name == 'SA':
data = sio.loadmat(os.path.join(data_path, 'Salinas_corrected.mat'))['salinas_corrected']
labels = sio.loadmat(os.path.join(data_path, 'Salinas_gt.mat'))['salinas_gt']
elif name == 'PU':
data = sio.loadmat(os.path.join(data_path, 'PaviaU.mat'))['paviaU']
labels = sio.loadmat(os.path.join(data_path, 'PaviaU_gt.mat'))['paviaU_gt']
return data, labels
def splitTrainTestSet(X, y, testRatio, randomState=345):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=testRatio, random_state=randomState,
stratify=y)
return X_train, X_test, y_train, y_test
def applyPCA(X, numComponents=75):
newX = np.reshape(X, (-1, X.shape[2]))
pca = PCA(n_components=numComponents, whiten=True)
newX = pca.fit_transform(newX)
newX = np.reshape(newX, (X.shape[0],X.shape[1], numComponents))
return newX, pca
def padWithZeros(X, margin=2):
newX = np.zeros((X.shape[0] + 2 * margin, X.shape[1] + 2* margin, X.shape[2]))
x_offset = margin
y_offset = margin
newX[x_offset:X.shape[0] + x_offset, y_offset:X.shape[1] + y_offset, :] = X
return newX
# 去零
def createImageCubes(X, y, windowSize=5, removeZeroLabels = True):
margin = int((windowSize - 1) / 2)
zeroPaddedX = padWithZeros(X, margin=margin)
# split patches
patchesData = np.zeros((X.shape[0] * X.shape[1], windowSize, windowSize, X.shape[2]))
patchesLabels = np.zeros((X.shape[0] * X.shape[1]))
patchIndex = 0
for r in range(margin, zeroPaddedX.shape[0] - margin):
for c in range(margin, zeroPaddedX.shape[1] - margin):
patch = zeroPaddedX[r - margin:r + margin + 1, c - margin:c + margin + 1]
patchesData[patchIndex, :, :, :] = patch
patchesLabels[patchIndex] = y[r-margin, c-margin]
patchIndex = patchIndex + 1
if removeZeroLabels:
patchesData = patchesData[patchesLabels>0,:,:,:]
patchesLabels = patchesLabels[patchesLabels>0]
patchesLabels -= 1
return patchesData, patchesLabels
X, y = loadData(dataset)
K = 30 if dataset == 'IP' else 15
X,pca = applyPCA(X,numComponents=K)
X, y = createImageCubes(X, y, windowSize=windowSize)
##
Xtrain, Xtest, ytrain, ytest = splitTrainTestSet(X, y, test_ratio)
# print("Xtrain.shape:",Xtrain.shape)
# print("ytrain.shape:",ytrain.shape)
# print("ytrain:",ytrain)
def convert_one_hot(labels,classes=16):
return to_categorical(labels,num_classes=classes)
ytrain = convert_one_hot(ytrain,16)
ytest = convert_one_hot(ytest,16)
# print("ytrain.shape:",ytrain.shape)
# ResNet50 网络;
def identity_block(X, f, filters, stage, block):
"""
实现图3的恒等块
参数:
X - 输入的tensor类型的数据,维度为( m, n_H_prev, n_W_prev, n_H_prev )
f - 整数,指定主路径中间的CONV窗口的维度
filters - 整数列表,定义了主路径每层的卷积层的过滤器数量
stage - 整数,根据每层的位置来命名每一层,与block参数一起使用。
block - 字符串,据每层的位置来命名每一层,与stage参数一起使用。
返回:
X - 恒等块的输出,tensor类型,维度为(n_H, n_W, n_C)
"""
#定义命名规则
conv_name_base = "res" + str(stage) + block + "_branch"
bn_name_base = "bn" + str(stage) + block + "_branch"
#获取过滤器
F1, F2, F3 = filters
#保存输入数据,将会用于为主路径添加捷径
X_shortcut = X
#主路径的第一部分
##卷积层
X = Conv2D(filters=F1, kernel_size=(1,1), strides=(1,1) ,padding="valid",
name=conv_name_base+"2a", kernel_initializer=glorot_uniform(seed=0))(X)
##归一化
X = BatchNormalization(axis=3,name=bn_name_base+"2a")(X)
##使用ReLU激活函数
X = Activation("relu")(X)
#主路径的第二部分
##卷积层
X = Conv2D(filters=F2, kernel_size=(f,f),strides=(1,1), padding="same",
name=conv_name_base+"2b", kernel_initializer=glorot_uniform(seed=0))(X)
##归一化
X = BatchNormalization(axis=3,name=bn_name_base+"2b")(X)
##使用ReLU激活函数
X = Activation("relu")(X)
#主路径的第三部分
##卷积层
X = Conv2D(filters=F3, kernel_size=(1,1), strides=(1,1), padding="valid",
name=conv_name_base+"2c", kernel_initializer=glorot_uniform(seed=0))(X)
##归一化
X = BatchNormalization(axis=3,name=bn_name_base+"2c")(X)
##没有ReLU激活函数
#最后一步:
##将捷径与输入加在一起
X = Add()([X,X_shortcut])
##使用ReLU激活函数
X = Activation("relu")(X)
return X
def convolutional_block(X, f, filters, stage, block, s=2):
"""
实现图5的卷积块
参数:
X - 输入的tensor类型的变量,维度为( m, n_H_prev, n_W_prev, n_C_prev)
f - 整数,指定主路径中间的CONV窗口的维度
filters - 整数列表,定义了主路径每层的卷积层的过滤器数量
stage - 整数,根据每层的位置来命名每一层,与block参数一起使用。
block - 字符串,据每层的位置来命名每一层,与stage参数一起使用。
s - 整数,指定要使用的步幅
返回:
X - 卷积块的输出,tensor类型,维度为(n_H, n_W, n_C)
"""
#定义命名规则
conv_name_base = "res" + str(stage) + block + "_branch"
bn_name_base = "bn" + str(stage) + block + "_branch"
#获取过滤器数量
F1, F2, F3 = filters
#保存输入数据
X_shortcut = X
#主路径
##主路径第一部分
X = Conv2D(filters=F1, kernel_size=(1,1), strides=(s,s), padding="valid",
name=conv_name_base+"2a", kernel_initializer=glorot_uniform(seed=0))(X)
X = BatchNormalization(axis=3,name=bn_name_base+"2a")(X)
X = Activation("relu")(X)
##主路径第二部分
X = Conv2D(filters=F2, kernel_size=(f,f), strides=(1,1), padding="same",
name=conv_name_base+"2b", kernel_initializer=glorot_uniform(seed=0))(X)
X = BatchNormalization(axis=3,name=bn_name_base+"2b")(X)
X = Activation("relu")(X)
##主路径第三部分
X = Conv2D(filters=F3, kernel_size=(1,1), strides=(1,1), padding="valid",
name=conv_name_base+"2c", kernel_initializer=glorot_uniform(seed=0))(X)
X = BatchNormalization(axis=3,name=bn_name_base+"2c")(X)
#捷径
X_shortcut = Conv2D(filters=F3, kernel_size=(1,1), strides=(s,s), padding="valid",
name=conv_name_base+"1", kernel_initializer=glorot_uniform(seed=0))(X_shortcut)
X_shortcut = BatchNormalization(axis=3,name=bn_name_base+"1")(X_shortcut)
#最后一步
X = Add()([X,X_shortcut])
X = Activation("relu")(X)
return X
def ResNet50(input_shape=(25,25,30),classes=16):
"""
实现ResNet50
CONV2D -> BATCHNORM -> RELU -> MAXPOOL -> CONVBLOCK -> IDBLOCK*2 -> CONVBLOCK -> IDBLOCK*3
-> CONVBLOCK -> IDBLOCK*5 -> CONVBLOCK -> IDBLOCK*2 -> AVGPOOL -> TOPLAYER
参数:
input_shape - 图像数据集的维度
classes - 整数,分类数
返回:
model - Keras框架的模型
"""
#定义tensor类型的输入数据
X_input = Input(input_shape)
#0填充
X = ZeroPadding2D((3,3))(X_input)
#stage1
X = Conv2D(filters=64, kernel_size=(7,7), strides=(2,2), name="conv1",
kernel_initializer=glorot_uniform(seed=0))(X)
X = BatchNormalization(axis=3, name="bn_conv1")(X)
X = Activation("relu")(X)
X = MaxPooling2D(pool_size=(3,3), strides=(2,2))(X)
#stage2
X = convolutional_block(X, f=3, filters=[64,64,256], stage=2, block="a", s=1)
X = identity_block(X, f=3, filters=[64,64,256], stage=2, block="b")
X = identity_block(X, f=3, filters=[64,64,256], stage=2, block="c")
#stage3
X = convolutional_block(X, f=3, filters=[128,128,512], stage=3, block="a", s=2)
X = identity_block(X, f=3, filters=[128,128,512], stage=3, block="b")
X = identity_block(X, f=3, filters=[128,128,512], stage=3, block="c")
X = identity_block(X, f=3, filters=[128,128,512], stage=3, block="d")
#stage4
X = convolutional_block(X, f=3, filters=[256,256,1024], stage=4, block="a", s=2)
X = identity_block(X, f=3, filters=[256,256,1024], stage=4, block="b")
X = identity_block(X, f=3, filters=[256,256,1024], stage=4, block="c")
X = identity_block(X, f=3, filters=[256,256,1024], stage=4, block="d")
X = identity_block(X, f=3, filters=[256,256,1024], stage=4, block="e")
X = identity_block(X, f=3, filters=[256,256,1024], stage=4, block="f")
#stage5
X = convolutional_block(X, f=3, filters=[512,512,2048], stage=5, block="a", s=2)
X = identity_block(X, f=3, filters=[512,512,2048], stage=5, block="b")
X = identity_block(X, f=3, filters=[512,512,2048], stage=5, block="c")
#均值池化层
X = AveragePooling2D(pool_size=(2,2),padding="same")(X)
#输出层
X = Flatten()(X)
X = Dense(classes, activation="softmax", name="fc"+str(classes),
kernel_initializer=glorot_uniform(seed=0))(X)
#创建模型
model = Model(inputs=X_input, outputs=X, name="ResNet50")
return model
# # x_train : (3074,25,25,30) y_train: (3074)
# model = ResNet50(input_shape=(25,25,30),classes=16)
# model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
#
#
# model.fit(Xtrain,ytrain,epochs=2,batch_size=25)
# preds = model.evaluate(Xtest,ytest)
#
# print("误差率:",str(preds[0]))
# print("准确率:",str(preds[1]))
def main():
# x_train : (3074,25,25,30) y_train: (3074)
model = ResNet50(input_shape=(25, 25, 30), classes=16)
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
history = model.fit(Xtrain, ytrain, epochs=100, batch_size=25)
preds = model.evaluate(Xtest, ytest)
plt.figure(figsize=(5,5))
plt.ylim(0,1.1)
plt.grid()
plt.plot(history.history['accuracy'])
#plt.plot(history.history['val_acc'])
plt.ylabel( dataset+' _Accuracy')
plt.xlabel('Epochs')
plt.legend(['Training','Validation'])
plt.savefig("acc_curve.jpg")
plt.show()
plt.figure(figsize=(7,7))
plt.grid()
plt.plot(history.history['loss'])
#plt.plot(history.history['val_loss'])
plt.ylabel(dataset+' _Loss')
plt.xlabel('Epochs')
plt.legend(['Training','Validation'], loc='upper right')
plt.savefig("loss_curve.jpg")
plt.show()
print("误差率:", str(preds[0]))
print("准确率:", str(preds[1]))
if __name__ == "__main__":
main()
| 34.123944 | 159 | 0.630097 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,773 | 0.288103 |
8da4e24daba79cfc5a237fbfd0bd61228b6bdc1d | 754 | py | Python | tests/test_data/utest/setup.py | gordonmessmer/pyp2rpm | 60145ba6fa49ad5bb29eeffa5765e10ba8417f03 | [
"MIT"
]
| 114 | 2015-07-13T12:38:27.000Z | 2022-03-23T15:05:11.000Z | tests/test_data/utest/setup.py | gordonmessmer/pyp2rpm | 60145ba6fa49ad5bb29eeffa5765e10ba8417f03 | [
"MIT"
]
| 426 | 2015-07-13T12:09:38.000Z | 2022-01-07T16:41:32.000Z | tests/test_data/utest/setup.py | Mattlk13/pyp2rpm | f9ced95877d88c96b77b2b8c510dc4ceaa10504a | [
"MIT"
]
| 51 | 2015-07-14T13:11:29.000Z | 2022-03-31T07:27:32.000Z | #!/usr/bin/env python3
from setuptools import setup, find_packages
requirements = ["pyp2rpm~=3.3.1"]
setup(
name="utest",
version="0.1.0",
description="Micro test module",
license="GPLv2+",
author="pyp2rpm Developers",
author_email='[email protected], [email protected], [email protected], [email protected]',
url='https://github.com/fedora-python/pyp2rpm',
install_requires=requirements,
include_package_data=True,
packages=find_packages(exclude=["test"]),
classifiers=(
"License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
),
)
| 30.16 | 99 | 0.66313 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 409 | 0.54244 |
8da621c7d046b3bbba97fe0075833d24a4276a49 | 4,235 | py | Python | abstract_nas/train/preprocess.py | dumpmemory/google-research | bc87d010ab9086b6e92c3f075410fa6e1f27251b | [
"Apache-2.0"
]
| null | null | null | abstract_nas/train/preprocess.py | dumpmemory/google-research | bc87d010ab9086b6e92c3f075410fa6e1f27251b | [
"Apache-2.0"
]
| null | null | null | abstract_nas/train/preprocess.py | dumpmemory/google-research | bc87d010ab9086b6e92c3f075410fa6e1f27251b | [
"Apache-2.0"
]
| null | null | null | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data preprocessing for ImageNet2012 and CIFAR-10."""
from typing import Any, Callable
# pylint: disable=unused-import
from big_vision.pp import ops_general
from big_vision.pp import ops_image
# pylint: enable=unused-import
from big_vision.pp import utils
from big_vision.pp.builder import get_preprocess_fn as _get_preprocess_fn
from big_vision.pp.registry import Registry
import tensorflow as tf
CIFAR_MEAN = [0.4914, 0.4822, 0.4465]
CIFAR_STD = [0.247, 0.243, 0.261]
@Registry.register("preprocess_ops.random_crop_with_pad")
@utils.InKeyOutKey()
def get_random_crop_with_pad(crop_size,
padding):
"""Makes a random crop of a given size.
Args:
crop_size: either an integer H, where H is both the height and width of the
random crop, or a list or tuple [H, W] of integers, where H and W are
height and width of the random crop respectively.
padding: how much to pad before cropping.
Returns:
A function, that applies random crop.
"""
crop_size = utils.maybe_repeat(crop_size, 2)
padding = utils.maybe_repeat(padding, 2)
def _crop(image):
image = tf.image.resize_with_crop_or_pad(image,
crop_size[0] + padding[0],
crop_size[1] + padding[1])
return tf.image.random_crop(image,
[crop_size[0], crop_size[1], image.shape[-1]])
return _crop
def preprocess_cifar(split, **_):
"""Preprocessing functions for CIFAR-10 training."""
mean_str = ",".join([str(m) for m in CIFAR_MEAN])
std_str = ",".join([str(m) for m in CIFAR_STD])
if split == "train":
pp = ("decode|"
"value_range(0,1)|"
"random_crop_with_pad(32,4)|"
"flip_lr|"
f"vgg_value_range(({mean_str}),({std_str}))|"
"onehot(10, key='label', key_result='labels')|"
"keep('image', 'labels')")
else:
pp = ("decode|"
"value_range(0,1)|"
"central_crop(32)|"
f"vgg_value_range(({mean_str}),({std_str}))|"
"onehot(10, key='label', key_result='labels')|"
"keep('image', 'labels')")
return _get_preprocess_fn(pp)
def preprocess_imagenet(split,
autoaugment = False,
label_smoothing = 0.0,
**_):
"""Preprocessing functions for ImageNet training."""
if split == "train":
pp = ("decode_jpeg_and_inception_crop(224)|"
"flip_lr|")
if autoaugment:
pp += "randaug(2,10)|"
pp += "value_range(-1,1)|"
if label_smoothing:
confidence = 1.0 - label_smoothing
low_confidence = (1.0 - confidence) / (1000 - 1)
pp += ("onehot(1000, key='label', key_result='labels', "
f"on_value={confidence}, off_value={low_confidence})|")
else:
pp += "onehot(1000, key='label', key_result='labels')|"
pp += "keep('image', 'labels')"
else:
pp = ("decode|"
"resize_small(256)|"
"central_crop(224)|"
"value_range(-1,1)|"
"onehot(1000, key='label', key_result='labels')|"
"keep('image', 'labels')")
return _get_preprocess_fn(pp)
PREPROCESS = {
"cifar10": preprocess_cifar,
"imagenet2012": preprocess_imagenet,
}
def get_preprocess_fn(dataset, split,
**preprocess_kwargs):
"""Makes a preprocessing function."""
preprocess_fn_by_split = PREPROCESS.get(dataset, lambda _: (lambda x: x))
split = "train" if "train" in split else "val"
preprocess_fn = preprocess_fn_by_split(split, **preprocess_kwargs)
return preprocess_fn
| 32.576923 | 79 | 0.633058 | 0 | 0 | 0 | 0 | 967 | 0.228335 | 0 | 0 | 2,067 | 0.488076 |
8da6f40241c238cd5d1aecce8bbe81273d1e484a | 5,570 | py | Python | Decission_Tree/mytree.py | luoshao23/ML_algorithm | 6e94fdd0718cd892118fd036c7c5851cf3e6d796 | [
"MIT"
]
| 4 | 2017-06-19T06:33:38.000Z | 2019-01-31T12:07:12.000Z | Decission_Tree/mytree.py | luoshao23/ML_algorithm | 6e94fdd0718cd892118fd036c7c5851cf3e6d796 | [
"MIT"
]
| null | null | null | Decission_Tree/mytree.py | luoshao23/ML_algorithm | 6e94fdd0718cd892118fd036c7c5851cf3e6d796 | [
"MIT"
]
| 1 | 2017-12-06T08:41:06.000Z | 2017-12-06T08:41:06.000Z | from math import log
from PIL import Image, ImageDraw
from collections import Counter
import numpy as np
from pandas import DataFrame
# my_data = [['slashdot', 'USA', 'yes', 18, 213.2, 'None'],
# ['google', 'France', 'yes', 23, 121.2, 'Premium'],
# ['digg', 'USA', 'yes', 24, 21.32, 'Basic'],
# ['kiwitobes', 'France', 'yes', 23, 1.2, 'Basic'],
# ['google', 'UK', 'no', 21, .2, 'Premium'],
# ['(direct)', 'New Zealand', 'no', 12, 71.2, 'None'],
# ['(direct)', 'UK', 'no', 21, -21.2, 'Basic'],
# ['google', 'USA', 'no', 24, 241.2, 'Premium'],
# ['slashdot', 'France', 'yes', 19, 20, 'None'],
# ['digg', 'USA', 'no', 18, 1.0, 'None'],
# ['google', 'UK', 'no', 18, 2, 'None'],
# ['kiwitobes', 'UK', 'no', 19, 44, 'None'],
# ['digg', 'New Zealand', 'yes', 12, 27, 'Basic'],
# ['slashdot', 'UK', 'no', 21, 86, 'None'],
# ['google', 'UK', 'yes', 18, 2, 'Basic'],
# ['kiwitobes', 'France', 'yes', 19, 0.0, 'Basic']]
my_data = [[213.2, 'None'],
[121.2, 'Premium'],
[21.32, 'Basic'],
[1.2, 'Basic'],
[.2, 'Premium'],
[71.2, 'None'],
[-21.2, 'Basic'],
[241.2, 'Premium'],
[20, 'None'],
[1.0, 'None'],
[2, 'None'],
[44, 'None'],
[27, 'Basic'],
[86, 'None'],
[2, 'Basic'],
[0.0, 'Basic']]
data = np.array(DataFrame(my_data))
# my_data = [['slashdot', 'USA', 'yes', 18, 'None'],
# ['google', 'France', 'yes', 23, 'None'],
# ['digg', 'USA', 'yes', 24, 'None'],
# ['kiwitobes', 'France', 'yes', 23, 'None'],
# ['google', 'UK', 'no', 21, 'None'],
# ['(direct)', 'New Zealand', 'no', 12, 'None'],
# ['(direct)', 'UK', 'no', 21, 'None'],
# ['google', 'USA', 'no', 24, 'None'],
# ['slashdot', 'France', 'yes', 19, 'None'],
# ['digg', 'USA', 'no', 18, 'None'],
# ['google', 'UK', 'no', 18, 'None'],
# ['kiwitobes', 'UK', 'no', 19, 'None'],
# ['digg', 'New Zealand', 'yes', 12, 'None'],
# ['slashdot', 'UK', 'no', 21, 'None'],
# ['google', 'UK', 'yes', 18, 'None'],
# ['kiwitobes', 'France', 'yes', 19, 'None']]
class decisionnode(object):
"""docstring for decisionnode"""
def __init__(self, col=-1, value=None, tb=None, fb=None, results=None):
self.col = col
self.value = value
self.tb = tb
self.fb = fb
self.results = results
def divideset(rows, column, value):
split_func = None
if isinstance(value, int) or isinstance(value, float):
split_func = lambda x: x[column] >= value
else:
split_func = lambda x: x[column] == value
set1 = [row for row in rows if split_func(row)]
set2 = [row for row in rows if not split_func(row)]
return (set1, set2)
def uniquecounts(rows, col=-1):
results = {}
for row in rows:
r = row[col]
results.setdefault(r, 0)
results[r] += 1
return results
def entropy(rows):
ent = 0.0
log2 = lambda x: log(x) / log(2)
res = uniquecounts(rows)
for k, v in res.items():
p = float(v) / len(rows)
ent -= p * log2(p)
return ent
def variance(rows, col=-1):
if len(rows) == 0:
return 0
data = [float(row[col]) for row in rows]
mean = sum(data) / len(data)
variance = sum([(d - mean)**2 for d in data]) / len(data)
return variance
def buildtree(rows, scoref=entropy, min_len=2):
if len(rows) <= min_len:
return decisionnode(results=uniquecounts(rows))
current_score = scoref(rows)
if current_score <= 0.0:
return decisionnode(results=uniquecounts(rows))
best_gain = 0.0
best_criteria = None
best_set = None
for col in xrange(len(rows[0]) - 1):
if isinstance(rows[0][col], float):
uniq_value = split_float(rows, col)
else:
uniq_value = uniquecounts(rows, col)
for val in uniq_value.keys():
set1, set2 = divideset(rows, col, val)
p = float(len(set1)) / len(rows)
gain = current_score - (p * scoref(set1) + (1 - p) * scoref(set2))
if gain > best_gain and len(set1) > 0 and len(set2) > 0:
best_gain = gain
best_criteria = (col, val)
best_set = (set1, set2)
if best_gain > 0:
tbr = buildtree(best_set[0])
fbr = buildtree(best_set[1])
return decisionnode(col=best_criteria[0], value=best_criteria[1], tb=tbr, fb=fbr)
else:
return decisionnode(results=uniquecounts(rows))
def printtree(tree, indent='|', depth=0):
if tree.results is not None:
print str(tree.results) + '##'
else:
if isinstance(tree.value, int) or isinstance(tree.value, float):
print 'depth %s ~ %s>=%s?' % (str(depth), str(tree.col), str(tree.value))
else:
print 'depth %s ~ %s is %s?' % (str(depth), str(tree.col), str(tree.value))
print indent + 'T->'
printtree(tree.tb, indent + ' |', depth + 1)
print indent + 'F->'
printtree(tree.fb, indent + ' |', depth + 1)
def split_float(rows, column):
col = [row[column] for row in rows]
val = np.median(col)
return {val: 1}
def classify(obs, tree):
pass
def prune(tree, mingain):
pass
def mdclassify(obs, tree):
pass
| 31.828571 | 89 | 0.498025 | 264 | 0.047397 | 0 | 0 | 0 | 0 | 0 | 0 | 1,961 | 0.352065 |
8da70610f3402c8b44d3fbdf21a05f4f563b016b | 488 | py | Python | hidb/wrapper.py | sk-ip/hidb | 1394000992c016607e7af15095f058cd9cce007b | [
"MIT"
]
| null | null | null | hidb/wrapper.py | sk-ip/hidb | 1394000992c016607e7af15095f058cd9cce007b | [
"MIT"
]
| null | null | null | hidb/wrapper.py | sk-ip/hidb | 1394000992c016607e7af15095f058cd9cce007b | [
"MIT"
]
| null | null | null | from datetime import datetime
class fileWrapper(object):
def __init__(self):
self.data = {}
self.keys = set()
# JSON data size 16KB in Bytes
self.max_data_size = 16384
# Max database size 1GB in Bytes
self.max_database_size = 1073741824
self.current_database_size = 0
class dataWrapper:
def __init__(self, data, ttl):
self.data = data
self.timestamp = datetime.today().timestamp()
self.ttl = ttl
| 24.4 | 53 | 0.622951 | 452 | 0.92623 | 0 | 0 | 0 | 0 | 0 | 0 | 62 | 0.127049 |
8da8f86888f2ee041a3f2312c9709ef180e420d0 | 4,504 | py | Python | ion-channel-models/compare.py | sanmitraghosh/fickleheart-method-tutorials | d5ee910258a2656951201d4ada2a412804013bd5 | [
"BSD-3-Clause"
]
| null | null | null | ion-channel-models/compare.py | sanmitraghosh/fickleheart-method-tutorials | d5ee910258a2656951201d4ada2a412804013bd5 | [
"BSD-3-Clause"
]
| null | null | null | ion-channel-models/compare.py | sanmitraghosh/fickleheart-method-tutorials | d5ee910258a2656951201d4ada2a412804013bd5 | [
"BSD-3-Clause"
]
| null | null | null | #!/usr/bin/env python3
from __future__ import print_function
import sys
sys.path.append('./method')
import os
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import model as m
"""
Run fit.
"""
predict_list = ['sinewave', 'staircase', 'activation', 'ap']
try:
which_predict = sys.argv[1]
except:
print('Usage: python %s [str:which_predict]' % os.path.basename(__file__))
sys.exit()
if which_predict not in predict_list:
raise ValueError('Input data %s is not available in the predict list' \
% which_predict)
# Get all input variables
import importlib
sys.path.append('./mmt-model-files')
info_id_a = 'model_A'
info_a = importlib.import_module(info_id_a)
info_id_b = 'model_B'
info_b = importlib.import_module(info_id_b)
data_dir = './data'
savedir = './fig/compare'
if not os.path.isdir(savedir):
os.makedirs(savedir)
data_file_name = 'data-%s.csv' % which_predict
print('Predicting ', data_file_name)
saveas = 'compare-sinewave-' + which_predict
# Protocol
protocol = np.loadtxt('./protocol-time-series/%s.csv' % which_predict,
skiprows=1, delimiter=',')
protocol_times = protocol[:, 0]
protocol = protocol[:, 1]
# Load data
data = np.loadtxt(data_dir + '/' + data_file_name,
delimiter=',', skiprows=1) # headers
times = data[:, 0]
data = data[:, 1]
# Model
model_a = m.Model(info_a.model_file,
variables=info_a.parameters,
current_readout=info_a.current_list,
set_ion=info_a.ions_conc,
transform=None,
temperature=273.15 + info_a.temperature, # K
)
model_b = m.Model(info_b.model_file,
variables=info_b.parameters,
current_readout=info_b.current_list,
set_ion=info_b.ions_conc,
transform=None,
temperature=273.15 + info_b.temperature, # K
)
# Update protocol
model_a.set_fixed_form_voltage_protocol(protocol, protocol_times)
model_b.set_fixed_form_voltage_protocol(protocol, protocol_times)
# Load calibrated parameters
load_seed = 542811797
fix_idx = [1]
calloaddir_a = './out/' + info_id_a
calloaddir_b = './out/' + info_id_b
cal_params_a = []
cal_params_b = []
for i in fix_idx:
cal_params_a.append(np.loadtxt('%s/%s-solution-%s-%s.txt' % \
(calloaddir_a, 'sinewave', load_seed, i)))
cal_params_b.append(np.loadtxt('%s/%s-solution-%s-%s.txt' % \
(calloaddir_b, 'sinewave', load_seed, i)))
# Predict
predictions_a = []
for p in cal_params_a:
predictions_a.append(model_a.simulate(p, times))
predictions_b = []
for p in cal_params_b:
predictions_b.append(model_b.simulate(p, times))
# Plot
fig, axes = plt.subplots(2, 1, sharex=True, figsize=(10, 4),
gridspec_kw={'height_ratios': [1, 3]})
is_predict = ' prediction' if which_predict != 'sinewave' else ''
sim_protocol = model_a.voltage(times) # model_b should give the same thing
axes[0].plot(times, sim_protocol, c='#7f7f7f')
axes[0].set_ylabel('Voltage\n(mV)', fontsize=16)
axes[1].plot(times, data, alpha=0.5, label='Data')
for i, p in zip(fix_idx, predictions_a):
axes[1].plot(times, p, label='Model A' + is_predict)
for i, p in zip(fix_idx, predictions_b):
axes[1].plot(times, p, label='Model B' + is_predict)
# Zooms
from mpl_toolkits.axes_grid1.inset_locator import inset_axes, mark_inset
sys.path.append('./protocol-time-series')
zoom = importlib.import_module(which_predict + '_to_zoom')
axes[1].set_ylim(zoom.set_ylim)
for i_zoom, (w, h, loc) in enumerate(zoom.inset_setup):
axins = inset_axes(axes[1], width=w, height=h, loc=loc,
axes_kwargs={"facecolor" : "#f0f0f0"})
axins.plot(times, data, alpha=0.5)
for i, p in zip(fix_idx, predictions_a):
axins.plot(times, p)
for i, p in zip(fix_idx, predictions_b):
axins.plot(times, p)
axins.set_xlim(zoom.set_xlim_ins[i_zoom])
axins.set_ylim(zoom.set_ylim_ins[i_zoom])
#axins.yaxis.get_major_locator().set_params(nbins=3)
#axins.xaxis.get_major_locator().set_params(nbins=3)
axins.set_xticklabels([])
axins.set_yticklabels([])
pp, p1, p2 = mark_inset(axes[1], axins, loc1=zoom.mark_setup[i_zoom][0],
loc2=zoom.mark_setup[i_zoom][1], fc="none", lw=0.75, ec='k')
pp.set_fill(True); pp.set_facecolor("#f0f0f0")
axes[1].legend()
axes[1].set_ylabel('Current (pA)', fontsize=16)
axes[1].set_xlabel('Time (ms)', fontsize=16)
plt.subplots_adjust(hspace=0)
plt.savefig('%s/%s' % (savedir, saveas), bbox_inches='tight', dpi=200)
plt.close()
| 31.277778 | 78 | 0.690941 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 887 | 0.196936 |
8da906c8ad76ecde7a1bd94e5017709b02a7ce8e | 7,752 | py | Python | examples/services/classifier_service.py | bbbdragon/python-pype | f0618150cb4d2fae1f959127453fb6eca8db84e5 | [
"MIT"
]
| 8 | 2019-07-12T03:28:10.000Z | 2019-07-19T20:34:45.000Z | examples/services/classifier_service.py | bbbdragon/python-pype | f0618150cb4d2fae1f959127453fb6eca8db84e5 | [
"MIT"
]
| null | null | null | examples/services/classifier_service.py | bbbdragon/python-pype | f0618150cb4d2fae1f959127453fb6eca8db84e5 | [
"MIT"
]
| null | null | null | '''
python3 classifier_service.py data.csv
This service runs a scikit-learn classifier on data provided by the csv file data.csv.
The idea of this is a simple spam detector. In the file, you will see a number, 1 or
-1, followed by a pipe, followed by a piece of text. The text is designed to be a
subject email, and the number its label: 1 for spam and -1 for not spam.
The service loads the csv file, trains the classifier, and then waits for you to
send it a list of texts via the 'classify' route. This service can be tested using:
./test_classifier_service.sh
'''
from flask import Flask,request,jsonify
from pype import pype as p
from pype import _,_0,_1,_p
from pype import _assoc as _a
from pype import _dissoc as _d
from pype import _do
from statistics import mean,stdev
from pype.vals import lenf
from sklearn.ensemble import RandomForestClassifier as Classifier
from sklearn.feature_extraction.text import TfidfVectorizer as Vectorizer
import sys
import csv
'''
We have to use lambda to define the read function because pype functions can't yet
deal with keyword args.
'''
read=lambda f: csv.reader(f,delimiter='|')
def train_classifier(texts,y):
'''
Here is a perfect example of the "feel it ... func it" philosophy:
The pype call uses the function arguments and function body to specify
three variables, texts, a list of strings, y, a list of floats, and vectorizer,
a scikit-learn object that vectorizes text. This reiterates the adivce that you
should use the function body and function arguments to declare your scope,
whenever you can.
Line-by-line, here we go:
{'vectorizer':vectorizer.fit,
'X':vectorizer.transform},
We build a dict, the first element of which is the fit vectorizer. Luckily, the
'fit' function returns an instance of the trained vectorizer, so we do not need to
use _do. This vectorizer is then assigned to 'vectorizer'. Because iterating
through dictionaries in Python3.6 preserves the order of the keys in which they
were declared, we can apply the fit function to the vectorizer on the texts,
assign that to the 'vectorizer' key. We need this instance of the vectorizer to
run the classifier for unknown texts.
After this, we apply the 'transform' to convert the texts into a training matrix
keyed by 'X', whose rows are texts and whose columns are words.
_a('classifier',(Classifier().fit,_['X'],y)),
Finally, we can build a classifier. _a, or _assoc, means we are adding a
key-value pair to the previous dictionary. This will be a new instance of our
Classifier, which is trained through the fit function on the text-word matrix 'X'
and the labels vector y.
_d('X'),
Since we don't need the X matrix anymore, we delete it from the returned JSON,
which now only contains 'vectorizer' and 'classifier', the two things we will
need to classify unknown texts.
'''
vectorizer=Vectorizer()
return p( texts,
{'vectorizer':vectorizer.fit,
'X':vectorizer.transform},
_a('classifier',(Classifier().fit,_['X'],y)),
_d('X'),
)
'''
We train the model in a global variable containing our vectorizer and classifier.
This use of global variables is only used for microservices, by the way.
Here is a line-by-line description:
sys.argv[1],
open,
Open the file.
read,
We build a csv reader with the above-defined 'read' function, which builds a csv reader
with a '|' delimiter. I chose this delimeter because the texts often have commas.
list,
Because csv.reader is a generator, it cannot be accessed twice, so I cast it to a list. This list is a list of 2-element lists, of the form [label,text], where label is a
string for the label ('1' or '-1'), and text is a string for the training text. So an
example of this would be ['1','free herbal viagra buy now'].
(train,[_1],[(float,[_0])])
This is a lambda which calls the 'train' function on two arguments, the first being
a list of texts, the second being a list of numerical labels.
We know that the incoming argument is a list of 2-element lists, so [_1] is a map,
which goes through this list - [] - and builds a new list containing only the second
element of each 2-element list, referenced by _1.
With the first elements of the 2-element lists, we must extract the first element and
cast it to a float. In [(float,[_0])], the [] specifies a map over the list of
2-element lists. (float,_0) specifies we are accessing the first element of the
2-element list ('1' or '-1'), and calls the float function on it, to cast it to a
float. If we do not cast it to a float, sklearn will not be able to process it as
a label.
'''
MODEL=p( sys.argv[1],
open,
read,
list,
(train_classifier,[_1],[(float,_0)]),
)
app = Flask(__name__)
@app.route('/classify',methods=['POST'])
def classify():
'''
This is the function that is run on a JSON containing one field, 'texts', which
is a list of strings. This function will return a list of JSON's containing the
label for that text given by the classifier (1 or -1), and the original text.
Notice that, in this routing, we need access to 'texts' in (zip,_,texts).
Line-by-line:
global MODEL
We need this to refer to the model we trained at the initialization of the
microservice.
texts=request.get_json(force=True)['texts']
This extracts the 'texts' list from the json embedded in the request.
MODEL['vectorizer'].transform,
This uses the vectorizer to convert the list of strings in texts to a text-word
matrix that can be fed into the classifier.
MODEL['classifier'].predict,
This runs the prediction on the text-word matrix, producing an array of 1's and
-1's, where 1 indicates that the classification is positive (it is spam), and -1
indicates that the classification is negative (it is not spam).
(zip,_,texts),
We know that the n-th label produced by the classifier is for the n-th string in
texts, so we zip them together to produce an iterable of tuples (label,text).
[{'label':_0,
'text':_1,
'description':{_0 == 1: 'not spam',
'else':'spam'}}],
Here, we are performing a mapping over the (label,text) tuples produced by the
zip. For each tuple, we build a dictionary with three items. The first is the
label, which is numberic, either 1.0 or -1.0. The second is the actual text
string.
However, to help the user, we also include a description of what the label means:
'description':{_0 == 1: 'not spam',
'else':'spam'}
The value is a switch dict. Since _0 is a Getter object, it overrides the ==
operator to produce a LamTup, which Python will accept, but which the pype
interpreter will run as an expression. _0 == 1 simply means, "the first element
of the (label,text) tuple, label, is 1. If this is true, 'description is set to
'not spam'. Otherwise, it is set to 'spam'.
jsonify
This just turns the resulting JSON, a list of dicitonaries, into something that can
be returned to the client over HTTP.
'''
global MODEL
texts=request.get_json(force=True)['texts']
return p( texts,
MODEL['vectorizer'].transform,
MODEL['classifier'].predict,
(zip,_,texts),
[{'label':_0,
'text':_1,
'description':{_0 == 1: 'not spam',
'else':'spam'}}],
jsonify)
if __name__=='__main__':
app.run(host='0.0.0.0',port=10004,debug=True)
| 36.739336 | 172 | 0.68434 | 0 | 0 | 0 | 0 | 2,754 | 0.355263 | 0 | 0 | 6,512 | 0.840041 |
8da9192128d87d058ba7b763d377c653bfe2eb10 | 2,657 | py | Python | ida_plugin/uefi_analyser.py | fengjixuchui/UEFI_RETool | 72c5d54c1dab9f58a48294196bca5ce957f6fb24 | [
"MIT"
]
| 240 | 2019-03-12T21:28:06.000Z | 2021-02-09T16:20:09.000Z | ida_plugin/uefi_analyser.py | fengjixuchui/UEFI_RETool | 72c5d54c1dab9f58a48294196bca5ce957f6fb24 | [
"MIT"
]
| 10 | 2019-09-09T08:38:35.000Z | 2020-11-30T15:19:30.000Z | ida_plugin/uefi_analyser.py | fengjixuchui/UEFI_RETool | 72c5d54c1dab9f58a48294196bca5ce957f6fb24 | [
"MIT"
]
| 53 | 2019-03-16T06:54:18.000Z | 2020-12-23T06:16:38.000Z | # SPDX-License-Identifier: MIT
import os
import idaapi
import idautils
from PyQt5 import QtWidgets
from uefi_analyser import dep_browser, dep_graph, prot_explorer, ui
AUTHOR = "yeggor"
VERSION = "1.2.0"
NAME = "UEFI_RETool"
WANTED_HOTKEY = "Ctrl+Alt+U"
HELP = "This plugin performs automatic analysis of the input UEFI module"
class UefiAnalyserPlugin(idaapi.plugin_t):
flags = idaapi.PLUGIN_MOD | idaapi.PLUGIN_PROC | idaapi.PLUGIN_FIX
comment = HELP
help = HELP
wanted_name = NAME
wanted_hotkey = WANTED_HOTKEY
def init(self):
self._last_directory = idautils.GetIdbDir()
ui.init_menu(MenuHandler(self))
self._welcome()
return idaapi.PLUGIN_KEEP
def run(self, arg):
try:
self._analyse_all()
except Exception as err:
import traceback
print(f"[{NAME} error] {str(err)}\n{traceback.format_exc()}")
def term(self):
pass
def load_json_log(self):
print(f"[{NAME}] try to parse JSON log file")
log_name = self._select_log()
print(f"[{NAME}] log name: {log_name}")
dep_browser.run(log_name)
dep_graph.run(log_name)
def _select_log(self):
file_dialog = QtWidgets.QFileDialog()
file_dialog.setFileMode(QtWidgets.QFileDialog.ExistingFiles)
filename = None
try:
filename, _ = file_dialog.getOpenFileName(
file_dialog,
f"Select the {NAME} log file",
self._last_directory,
"Results files (*.json)",
)
except Exception as e:
print(f"[{NAME} error] {str(e)}")
if filename:
self._last_directory = os.path.dirname(filename)
return filename
@staticmethod
def _welcome():
print(f"\n{NAME} plugin by {AUTHOR} ({VERSION})")
print(f"{NAME} shortcut key is {WANTED_HOTKEY}\n")
@staticmethod
def _analyse_all():
prot_explorer.run()
class MenuHandler(idaapi.action_handler_t):
def __init__(self, plugin):
idaapi.action_handler_t.__init__(self)
self.plugin = plugin
def activate(self, ctx):
try:
self.plugin.load_json_log()
except Exception as err:
import traceback
print(f"[{NAME} error] {str(err)}\n{traceback.format_exc()}")
return True
def update(self, ctx):
return idaapi.AST_ENABLE_ALWAYS
def PLUGIN_ENTRY():
try:
return UefiAnalyserPlugin()
except Exception as err:
import traceback
print(f"[{NAME} error] {str(err)}\n{traceback.format_exc()}")
| 26.04902 | 73 | 0.616861 | 2,128 | 0.800903 | 0 | 0 | 215 | 0.080918 | 0 | 0 | 532 | 0.200226 |
8da9e0178f00d72e18dd60857fa82ec6eecb27f0 | 10,063 | py | Python | examples/exchange_demag/test_exchange_demag.py | davidcortesortuno/finmag | 9ac0268d2c0e45faf1284cee52a73525aa589e2b | [
"BSL-1.0"
]
| 10 | 2018-03-24T07:43:17.000Z | 2022-03-26T10:42:27.000Z | examples/exchange_demag/test_exchange_demag.py | davidcortesortuno/finmag | 9ac0268d2c0e45faf1284cee52a73525aa589e2b | [
"BSL-1.0"
]
| 21 | 2018-03-26T15:08:53.000Z | 2021-07-10T16:11:14.000Z | examples/exchange_demag/test_exchange_demag.py | davidcortesortuno/finmag | 9ac0268d2c0e45faf1284cee52a73525aa589e2b | [
"BSL-1.0"
]
| 7 | 2018-04-09T11:50:48.000Z | 2021-06-10T09:23:25.000Z | import os
import logging
import matplotlib
matplotlib.use('Agg')
import pylab as p
import numpy as np
import dolfin as df
from finmag import Simulation as Sim
from finmag.energies import Exchange, Demag
from finmag.util.meshes import from_geofile, mesh_volume
import pytest
logger = logging.getLogger(name='finmag')
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
REL_TOLERANCE = 5e-4
Ms = 0.86e6
unit_length = 1e-9
mesh = from_geofile(os.path.join(MODULE_DIR, "bar30_30_100.geo"))
def run_finmag():
"""Run the finmag simulation and store data in averages.txt."""
sim = Sim(mesh, Ms, unit_length=unit_length)
sim.alpha = 0.5
sim.set_m((1, 0, 1))
exchange = Exchange(13.0e-12)
sim.add(exchange)
demag = Demag(solver="FK")
sim.add(demag)
fh = open(os.path.join(MODULE_DIR, "averages.txt"), "w")
fe = open(os.path.join(MODULE_DIR, "energies.txt"), "w")
logger.info("Time integration")
times = np.linspace(0, 3.0e-10, 61)
for counter, t in enumerate(times):
# Integrate
sim.run_until(t)
# Save averages to file
mx, my, mz = sim.m_average
fh.write(str(t) + " " + str(mx) + " " + str(my) + " " + str(mz) + "\n")
# Energies
E_e = exchange.compute_energy()
E_d = demag.compute_energy()
fe.write(str(E_e) + " " + str(E_d) + "\n")
# Energy densities
if counter == 10:
exch_energy = exchange.energy_density_function()
demag_energy = demag.energy_density_function()
finmag_exch, finmag_demag = [], []
R = range(100)
for i in R:
finmag_exch.append(exch_energy([15, 15, i]))
finmag_demag.append(demag_energy([15, 15, i]))
# Store data
np.save(os.path.join(MODULE_DIR, "finmag_exch_density.npy"), np.array(finmag_exch))
np.save(os.path.join(MODULE_DIR, "finmag_demag_density.npy"), np.array(finmag_demag))
fh.close()
fe.close()
@pytest.mark.slow
def test_compare_averages():
ref = np.loadtxt(os.path.join(MODULE_DIR, "averages_ref.txt"))
if not os.path.isfile(os.path.join(MODULE_DIR, "averages.txt")) \
or (os.path.getctime(os.path.join(MODULE_DIR, "averages.txt")) <
os.path.getctime(os.path.abspath(__file__))):
run_finmag()
computed = np.loadtxt(os.path.join(MODULE_DIR, "averages.txt"))
dt = ref[:,0] - computed[:,0]
assert np.max(dt) < 1e-15, "Compare timesteps."
ref1, computed1 = np.delete(ref, [0], 1), np.delete(computed, [0], 1)
diff = ref1 - computed1
print "max difference: %g" % np.max(diff)
rel_diff = np.abs(diff / np.sqrt(ref1[0]**2 + ref1[1]**2 + ref1[2]**2))
print "test_averages, max. relative difference per axis:"
print np.nanmax(rel_diff, axis=0)
err = np.nanmax(rel_diff)
if err > 1e-2:
print "nmag:\n", ref1
print "finmag:\n", computed1
assert err < REL_TOLERANCE, "Relative error = {} is larger " \
"than tolerance (= {})".format(err, REL_TOLERANCE)
# Plot nmag data
nmagt = list(ref[:,0])*3
nmagy = list(ref[:,1]) + list(ref[:,2]) + list(ref[:,3])
p.plot(nmagt, nmagy, 'o', mfc='w', label='nmag')
# Plot finmag data
t = computed[:, 0]
x = computed[:, 1]
y = computed[:, 2]
z = computed[:, 3]
p.plot(t, x, 'k', label='$m_\mathrm{x}$ finmag')
p.plot(t, y, 'b-.', label='$m_\mathrm{y}$')
p.plot(t, z, 'r', label='$m_\mathrm{z}$')
p.axis([0, max(t), -0.2, 1.1])
p.xlabel("time (s)")
p.ylabel("$m$")
p.legend(loc='center right')
p.savefig(os.path.join(MODULE_DIR, "exchange_demag.pdf"))
p.savefig(os.path.join(MODULE_DIR, "exchange_demag.png"))
#p.show
p.close()
print "Comparison of development written to exchange_demag.pdf"
@pytest.mark.slow
def test_compare_energies():
ref = np.loadtxt(os.path.join(MODULE_DIR, "energies_ref.txt"))
if not os.path.isfile(os.path.join(MODULE_DIR, "energies.txt")) \
or (os.path.getctime(os.path.join(MODULE_DIR, "energies.txt")) <
os.path.getctime(os.path.abspath(__file__))):
run_finmag()
computed = np.loadtxt(os.path.join(MODULE_DIR, "energies.txt"))
assert np.size(ref) == np.size(computed), "Compare number of energies."
vol = mesh_volume(mesh)*unit_length**mesh.topology().dim()
#30x30x100nm^3 = 30x30x100=9000
# Compare exchange energy...
exch = computed[:, 0]/vol # <-- ... density!
exch_nmag = ref[:, 0]
diff = abs(exch - exch_nmag)
rel_diff = np.abs(diff / max(exch))
print "Exchange energy, max relative error:", max(rel_diff)
assert max(rel_diff) < 0.002, \
"Max relative error in exchange energy = {} is larger than " \
"tolerance (= {})".format(max(rel_diff), REL_TOLERANCE)
# Compare demag energy
demag = computed[:, 1]/vol
demag_nmag = ref[:, 1]
diff = abs(demag - demag_nmag)
rel_diff = np.abs(diff / max(demag))
print "Demag energy, max relative error:", max(rel_diff)
# Don't really know why this is ten times higher than everyting else.
assert max(rel_diff) < REL_TOLERANCE*10, \
"Max relative error in demag energy = {} is larger than " \
"tolerance (= {})".format(max(rel_diff), REL_TOLERANCE)
# Plot
p.plot(exch_nmag, 'o', mfc='w', label='nmag')
p.plot(exch, label='finmag')
p.xlabel("time step")
p.ylabel("$e_\mathrm{exch}\, (\mathrm{Jm^{-3}})$")
p.legend()
p.savefig(os.path.join(MODULE_DIR, "exchange_energy.pdf"))
p.savefig(os.path.join(MODULE_DIR, "exchange_energy.png"))
p.close()
p.plot(demag_nmag, 'o', mfc='w', label='nmag')
p.plot(demag, label='finmag')
p.xlabel("time step")
p.ylabel("$e_\mathrm{demag}\, (\mathrm{Jm^{-3}})$")
p.legend()
p.savefig(os.path.join(MODULE_DIR, "demag_energy.pdf"))
p.savefig(os.path.join(MODULE_DIR, "demag_energy.png"))
#p.show()
p.close()
print "Energy plots written to exchange_energy.pdf and demag_energy.pdf"
@pytest.mark.slow
def test_compare_energy_density():
"""
After ten time steps, compute the energy density through
the center of the bar (seen from x and y) from z=0 to z=100,
and compare the results with nmag and oomf.
"""
R = range(100)
# Run simulation only if not run before or changed since last time.
if not (os.path.isfile(os.path.join(MODULE_DIR, "finmag_exch_density.npy"))):
run_finmag()
elif (os.path.getctime(os.path.join(MODULE_DIR, "finmag_exch_density.npy")) <
os.path.getctime(os.path.abspath(__file__))):
run_finmag()
if not (os.path.isfile(os.path.join(MODULE_DIR, "finmag_demag_density.npy"))):
run_finmag()
elif (os.path.getctime(os.path.join(MODULE_DIR, "finmag_demag_density.npy")) <
os.path.getctime(os.path.abspath(__file__))):
run_finmag()
# Read finmag data
finmag_exch = np.load(os.path.join(MODULE_DIR, "finmag_exch_density.npy"))
finmag_demag = np.load(os.path.join(MODULE_DIR, "finmag_demag_density.npy"))
# Read nmag data
nmag_exch = [float(i) for i in open(os.path.join(MODULE_DIR, "nmag_exch_Edensity.txt"), "r").read().split()]
nmag_demag = [float(i) for i in open(os.path.join(MODULE_DIR, "nmag_demag_Edensity.txt"), "r").read().split()]
# Compare with nmag
nmag_exch = np.array(nmag_exch)
nmag_demag = np.array(nmag_demag)
rel_error_exch_nmag = np.abs(finmag_exch - nmag_exch)/np.linalg.norm(nmag_exch)
rel_error_demag_nmag = np.abs(finmag_demag - nmag_demag)/np.linalg.norm(nmag_demag)
print "Exchange energy density, max relative error from nmag:", max(rel_error_exch_nmag)
print "Demag energy density, max relative error from nmag:", max(rel_error_demag_nmag)
TOL_EXCH = 3e-2
TOL_DEMAG = 1e-2
assert max(rel_error_exch_nmag) < TOL_EXCH, \
"Exchange energy density, max relative error from nmag = {} is " \
"larger than tolerance (= {})".format(max(rel_error_exch_nmag), TOL_EXCH)
assert max(rel_error_demag_nmag) < TOL_DEMAG, \
"Demag energy density, max relative error from nmag = {} is larger " \
"than tolarance (= {})".format(max(rel_error_demag_nmag), TOL_DEMAG)
# Read oommf data
oommf_exch = np.genfromtxt(os.path.join(MODULE_DIR, "oommf_exch_Edensity.txt"))
oommf_demag = np.genfromtxt(os.path.join(MODULE_DIR, "oommf_demag_Edensity.txt"))
oommf_coords = np.genfromtxt(os.path.join(MODULE_DIR, "oommf_coords_z_axis.txt")) * 1e9
# Compare with oomf - FIXME: doesn't work at the moment
#rel_error_exch_oomf = np.abs(finmag_exch - oommf_exch)/np.linalg.norm(oommf_exch)
#rel_error_demag_oomf = np.abs(finmag_demag - oommf_demag)/np.linalg.norm(oommf_demag)
#print "Rel error exch, oommf:", max(rel_error_exch_oommf)
#print "Rel error demag, oommf:", max(rel_error_demag_oommf)
# Plot exchange energy density
p.plot(R, finmag_exch, 'k-')
p.plot(R, nmag_exch, 'r^:', alpha=0.5)
p.plot(oommf_coords, oommf_exch, "bv:", alpha=0.5)
p.xlabel("$x\, (\mathrm{nm})$")
p.ylabel("$e_\mathrm{exch}\, (\mathrm{Jm^{-3}})$")
p.legend(["finmag", "nmag", "oommf"], loc="upper center")
p.savefig(os.path.join(MODULE_DIR, "exchange_density.pdf"))
p.savefig(os.path.join(MODULE_DIR, "exchange_density.png"))
p.close()
# Plot demag energy density
p.plot(R, finmag_demag, 'k-')
p.plot(R, nmag_demag, 'r^:', alpha=0.5)
p.plot(oommf_coords, oommf_demag, "bv:", alpha=0.5)
p.xlabel("$x\, (\mathrm{nm})$")
p.ylabel("$e_\mathrm{demag}\, (\mathrm{Jm^{-3}})$")
p.legend(["finmag", "nmag", "oommf"], loc="upper center")
p.savefig(os.path.join(MODULE_DIR, "demag_density.pdf"))
p.savefig(os.path.join(MODULE_DIR, "demag_density.png"))
#p.show()
p.close()
print "Energy density plots written to exchange_density.pdf and demag_density.pdf"
if __name__ == '__main__':
run_finmag()
test_compare_averages()
test_compare_energies()
test_compare_energy_density()
| 37.408922 | 114 | 0.644241 | 0 | 0 | 0 | 0 | 7,910 | 0.786048 | 0 | 0 | 3,248 | 0.322767 |
8daa3414a09b9f3c7c95225a1a7fdf929b8d3dfe | 440 | py | Python | BPt/default/options/samplers.py | sahahn/ABCD_ML | a8b1c48c33f3fdc046c8922964f1c456273238da | [
"MIT"
]
| 1 | 2019-09-25T23:23:49.000Z | 2019-09-25T23:23:49.000Z | BPt/default/options/samplers.py | sahahn/ABCD_ML | a8b1c48c33f3fdc046c8922964f1c456273238da | [
"MIT"
]
| 1 | 2020-04-20T20:53:27.000Z | 2020-04-20T20:53:27.000Z | BPt/default/options/samplers.py | sahahn/ABCD_ML | a8b1c48c33f3fdc046c8922964f1c456273238da | [
"MIT"
]
| 1 | 2019-06-21T14:44:40.000Z | 2019-06-21T14:44:40.000Z | from ..helpers import get_obj_and_params, all_from_objects
from ...extensions.samplers import OverSampler
SAMPLERS = {
'oversample': (OverSampler, ['default']),
}
def get_sampler_and_params(obj_str, extra_params, params, **kwargs):
obj, extra_obj_params, obj_params =\
get_obj_and_params(obj_str, SAMPLERS, extra_params, params)
return obj(**extra_obj_params), obj_params
all_obj_keys = all_from_objects(SAMPLERS) | 25.882353 | 68 | 0.756818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 21 | 0.047727 |
a5c09a3f2e4a0708c742cfe5829c7e01efbe2a70 | 300 | py | Python | ABC151-200/ABC192/abc192_c.py | billyio/atcoder | 9d16765f91f28deeb7328fcc6c19541ee790941f | [
"MIT"
]
| 1 | 2021-02-01T08:48:07.000Z | 2021-02-01T08:48:07.000Z | ABC151-200/ABC192/abc192_c.py | billyio/atcoder | 9d16765f91f28deeb7328fcc6c19541ee790941f | [
"MIT"
]
| null | null | null | ABC151-200/ABC192/abc192_c.py | billyio/atcoder | 9d16765f91f28deeb7328fcc6c19541ee790941f | [
"MIT"
]
| null | null | null | N, K = map(int, input().split())
ans = N
for i in range(K):
num_list = [int(n) for n in str(ans)]
g1 = sorted(num_list, reverse=True)
g1 = ''.join((str(g) for g in g1))
g2 = sorted(num_list, reverse=False)
g2 = ''.join((str(g) for g in g2))
ans = int(g1) - int(g2)
print(ans) | 27.272727 | 41 | 0.566667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.013333 |
a5c0fa60cac177d2865547e53143112bdfdc7111 | 1,008 | py | Python | testing.py | madjabal/morphine | 2c76b10a7276936042913d609ad773fbc08b0887 | [
"MIT"
]
| 15 | 2017-03-11T18:25:04.000Z | 2022-03-31T19:54:31.000Z | testing.py | madjabal/morphine | 2c76b10a7276936042913d609ad773fbc08b0887 | [
"MIT"
]
| 2 | 2018-10-17T15:08:36.000Z | 2021-06-08T13:34:56.000Z | testing.py | madjabal/morphine | 2c76b10a7276936042913d609ad773fbc08b0887 | [
"MIT"
]
| 2 | 2018-07-25T15:15:54.000Z | 2019-06-14T11:16:41.000Z | # Python modules
import time
from datetime import timedelta
def consistency(func, args, expected, n=10**4):
"""Analyze and report on the consistency of a function."""
print('\n[CONSISTENCY TEST] {0}'.format(func.__doc__.format(*args)))
def show(num, den, t, p, end='\r'):
print('{3}|{4:.3f}: {0}/{1} = {2}'.format(num, den, num/den, str(timedelta(seconds=t)), p), end=end)
start = time.time()
interval = start
tally = 0
for i in range(n):
isCorrect = func(*args) == expected
tally += (1 if isCorrect else 0)
diff = time.time() - interval
if diff > 0.01:
interval = time.time()
show(tally, (i+1), time.time() - start, (i+1)/n)
show(tally, n, time.time() - start, (i+1)/n, '\n')
def max_over(n, func, args=None):
"""Compute the maximum value returned by func(args) in n runs."""
m = 0
for i in range(n):
v = func(*args) if args else func()
if v > m:
m = v
return m | 30.545455 | 108 | 0.558532 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 201 | 0.199405 |
a5c112fb1800922ae32e15c8c2c3119937a66895 | 520 | py | Python | misc/python/fibonacci.py | saranshbht/codes-and-more-codes | 0bd2e46ca613b3b81e1196d393902e86a43aa353 | [
"MIT"
]
| null | null | null | misc/python/fibonacci.py | saranshbht/codes-and-more-codes | 0bd2e46ca613b3b81e1196d393902e86a43aa353 | [
"MIT"
]
| null | null | null | misc/python/fibonacci.py | saranshbht/codes-and-more-codes | 0bd2e46ca613b3b81e1196d393902e86a43aa353 | [
"MIT"
]
| null | null | null | from itertools import permutations
from collections import Counter
import time
print(time.time())
s=["dgajkhdjkjfkl","ahfjkh","jfskoj","hfakljfio","fjfjir","jiosj","jiojf","jriosj","jiorjf","jhhhhaskgasjdfljjriof"]
t=10
while t>0:
S=s[10-t]
c=dict(Counter(S))
Cperm=list(permutations(c.values()))
flag= False
for i in Cperm:
for j in range(2,len(i)):
if i[j]==i[j-1]+i[j-2]:
print("Dynamic")
flag= True
break
if flag==True:
break
else:
print("Not")
t=t-1
print(time.time())
| 18.571429 | 117 | 0.646154 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 117 | 0.225 |
a5c55462952f35e96e4d815b3891933e684d12b8 | 784 | py | Python | rhasspy/speech.py | Wil-Peters/HomeAutomation | ab4f78d9fad42093435732233e99003f12dca5e7 | [
"MIT"
]
| 2 | 2020-04-09T20:29:15.000Z | 2021-01-20T09:21:02.000Z | rhasspy/speech.py | Wil-Peters/HomeAutomation | ab4f78d9fad42093435732233e99003f12dca5e7 | [
"MIT"
]
| null | null | null | rhasspy/speech.py | Wil-Peters/HomeAutomation | ab4f78d9fad42093435732233e99003f12dca5e7 | [
"MIT"
]
| null | null | null | import configparser
import os
from typing import ByteString
import requests
from core.speaker import Speaker
from core.texttospeech import TextToSpeechGenerator
class RhasspySpeech(Speaker, TextToSpeechGenerator):
def __init__(self):
Speaker.__init__(self)
TextToSpeechGenerator.__init__(self)
config = configparser.ConfigParser()
config_file = os.path.dirname(os.path.abspath(__file__)) + "/config.ini"
config.read(config_file)
self._api_url = config["Rhasspy"]["Speaker"]
def speak_text(self, text: str):
requests.post(self._api_url + "/text-to-speech", text)
def generate_speech_file(self, text: str) -> ByteString:
return requests.post(self._api_url + "/text-to-speech?play=false", text).content
| 29.037037 | 88 | 0.715561 | 617 | 0.78699 | 0 | 0 | 0 | 0 | 0 | 0 | 76 | 0.096939 |
a5c6922a61844f38e222e52aacc04701fb1c3022 | 4,953 | py | Python | main.py | rodrigobercinimartins/export-import-por-mesorregiao-brasil | 73b8126e593eec63ae29eb81a2967f566ec93bc9 | [
"MIT"
]
| 1 | 2020-04-06T17:55:04.000Z | 2020-04-06T17:55:04.000Z | main.py | rodrigobercini/export-import-por-mesorregiao-brasil | 73b8126e593eec63ae29eb81a2967f566ec93bc9 | [
"MIT"
]
| null | null | null | main.py | rodrigobercini/export-import-por-mesorregiao-brasil | 73b8126e593eec63ae29eb81a2967f566ec93bc9 | [
"MIT"
]
| null | null | null | import pandas as pd
import os
import ssl
# I'm getting SSL certificates issues when downloading files from MDIC.
# The code below is a hack to get around this issue.
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
pass
else:
ssl._create_default_https_context = _create_unverified_https_context
class ExportsByMesoregion:
def __init__(self
, start_year:int
, end_year:int = None
, transaction_type:str='exports'):
self.start_year = start_year
if end_year is not None:
self.end_year = end_year
else:
self.end_year = start_year
self.TRANSACTION_TYPES = {
'exports':'EXP'
, 'imports':'IMP'
}
if transaction_type in self.TRANSACTION_TYPES:
self.transaction_type = transaction_type
else:
raise ValueError(f"Invalid transaction type. Valid values are: {''.join(self.TRANSACTION_TYPES)}")
self.BASE_URL = 'https://balanca.economia.gov.br/balanca/bd/comexstat-bd/mun/'
self.REPO_FOLDER_PATH = os.path.dirname(os.path.abspath(__file__))
self.MUN_FOLDER_PATH = os.path.join(self.REPO_FOLDER_PATH, 'data', 'municipalities',"")
self.MESO_FOLDER_PATH = os.path.join(self.REPO_FOLDER_PATH, 'data', 'mesoregions',"")
self.MUN_LOOKUP_FILENAME = os.path.join(self.REPO_FOLDER_PATH, 'municipalities_lookup.xlsx')
def create_folder_if_not_exists(self, folder_path):
if not os.path.exists(folder_path):
os.makedirs(folder_path)
def get_file_name(self, transaction_type, year, division_type):
return f'{self.TRANSACTION_TYPES[transaction_type]}_{year}_{division_type}.csv'
def download_mun_data(self):
self.create_folder_if_not_exists(self.MUN_FOLDER_PATH)
for year in range(self.start_year, self.end_year+1):
file_name = self.get_file_name(self.transaction_type, year, 'MUN')
file_path = f'{self.MUN_FOLDER_PATH}{file_name}'
if os.path.isfile(file_path):
print(f'{year} - Mun {self.transaction_type} already exists. Skipping download...')
continue
url = f'{self.BASE_URL}{file_name}'
pd.read_csv(url, sep=';', encoding='UTF-8').to_csv(file_path, sep=';', encoding='UTF-8')
print(f'{year} - Municipalities {self.transaction_type} finished downloading')
def add_meso_to_mun_data(self, year):
mun_exp_filename = self.get_file_name(self.transaction_type, year, 'MUN')
mun_exports = pd.read_csv(f'{self.MUN_FOLDER_PATH}{mun_exp_filename}', sep=';')
municip_codes = pd.read_excel(self.MUN_LOOKUP_FILENAME)
mun_with_meso = mun_exports.merge(municip_codes, left_on= 'CO_MUN',
right_on='Código Município Completo (MDIC)')
mun_with_meso.drop(['Município', 'CO_MUN', 'Nome_Microrregião',
'Microrregião Geográfica',
'Código Município Completo (MDIC)'], axis=1, inplace=True)
print(f'{year} - Mesoregions info added to municipalities data')
return mun_with_meso
def aggregate_by_mesoregion(self, year, mun_with_meso):
meso_aggregated = mun_with_meso.groupby(['CO_ANO','Nome_Mesorregião','CD_GEOCME', 'CO_MES', 'CO_PAIS', 'SH4'],as_index=False).sum() # Consolida dados por mesorregião
meso_aggregated.drop(['UF', 'Mesorregião Geográfica', 'Código Município Completo (IBGE)'], axis=1, inplace=True)
print(f'{year} - Mesoregions data aggregated')
return meso_aggregated
def download_data_and_aggregate_by_meso(self):
self.create_folder_if_not_exists(self.MESO_FOLDER_PATH)
self.download_mun_data()
for year in (range(self.start_year, self.end_year+1)):
mun_with_meso = self.add_meso_to_mun_data(year)
meso_aggregated = self.aggregate_by_mesoregion(year, mun_with_meso)
meso_exp_filename = self.get_file_name(self.transaction_type, year, 'MESO')
meso_aggregated.to_csv(f'{self.MESO_FOLDER_PATH}{meso_exp_filename}', encoding='UTF-8')
print(f'{year} - Mesoregions data saved')
def download_data_and_add_meso_info(self):
self.create_folder_if_not_exists(self.MUN_FOLDER_PATH)
self.download_mun_data()
final_df = pd.DataFrame()
for year in (range(self.start_year, self.end_year+1)):
mun_with_meso = self.add_meso_to_mun_data(year)
final_df = final_df.append(mun_with_meso)
return final_df
if __name__ == '__main__':
ExportsObject = ExportsByMesoregion(start_year=2020, end_year=2020, transaction_type='imports')
ExportsObject.download_data_and_aggregate_by_meso() | 43.447368 | 173 | 0.657985 | 4,419 | 0.889672 | 0 | 0 | 0 | 0 | 0 | 0 | 1,249 | 0.25146 |
a5c927734733b551301c1522c13b6095afdcc07d | 903 | py | Python | backend/customers/migrations/0001_initial.py | cbreezy623/modabella | b68bcc8aca903887d31489baae609ed70fe3dba7 | [
"Apache-2.0"
]
| null | null | null | backend/customers/migrations/0001_initial.py | cbreezy623/modabella | b68bcc8aca903887d31489baae609ed70fe3dba7 | [
"Apache-2.0"
]
| null | null | null | backend/customers/migrations/0001_initial.py | cbreezy623/modabella | b68bcc8aca903887d31489baae609ed70fe3dba7 | [
"Apache-2.0"
]
| null | null | null | # Generated by Django 3.1.1 on 2020-10-02 01:11
from django.db import migrations, models
import phonenumber_field.modelfields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(default='', max_length=120)),
('last_name', models.CharField(default='', max_length=120)),
('phone', phonenumber_field.modelfields.PhoneNumberField(blank=True, default='', max_length=128, region=None)),
('email', models.EmailField(blank=True, default='', max_length=254)),
('notes', models.TextField(blank=True, default='')),
],
),
]
| 33.444444 | 127 | 0.604651 | 773 | 0.856035 | 0 | 0 | 0 | 0 | 0 | 0 | 119 | 0.131783 |
a5ca6ea7872c55e908f6afc4233961e95a90159a | 1,366 | py | Python | sendUAV/recevier.py | RobEn-AAST/AI-UAVC | 732683fd5821d492b772cc5f966e86aed164a68c | [
"MIT"
]
| 16 | 2022-02-05T15:51:13.000Z | 2022-02-05T17:38:54.000Z | sendUAV/recevier.py | RobEn-AAST/AI-UAVC | 732683fd5821d492b772cc5f966e86aed164a68c | [
"MIT"
]
| null | null | null | sendUAV/recevier.py | RobEn-AAST/AI-UAVC | 732683fd5821d492b772cc5f966e86aed164a68c | [
"MIT"
]
| null | null | null | from socket import socket, AF_INET, SOCK_STREAM, IPPROTO_TCP
import struct
import pickle
class ServerSock(socket):
def __init__(self, PORT):
super().__init__(AF_INET, SOCK_STREAM, IPPROTO_TCP)
self.bind(("", PORT))
self.listen()
def getMessage(self):
payload_size = struct.calcsize(">L")
conn, _ = self.accept()
conn.settimeout(5)
while True:
try:
string = b""
while len(string) < payload_size:
bits = conn.recv(4096)
string += bits
packed_msg_size = string[:payload_size]
data = string[payload_size:]
msg_size = struct.unpack(">L", packed_msg_size)[0]
while len(data) < msg_size:
bits = conn.recv(4096)
data += bits
frame_data = data[:msg_size]
data = data[msg_size:]
msg = pickle.loads(frame_data, fix_imports=True, encoding="bytes")
# if msg start then get it's len from the header
return msg
except Exception:
conn.close()
return self.getMessage()
if __name__ == "__main__":
server = ServerSock(5500)
while True:
print(server.getMessage())
| 29.695652 | 82 | 0.51757 | 1,148 | 0.84041 | 0 | 0 | 0 | 0 | 0 | 0 | 78 | 0.057101 |
a5cabad6e15a3e94d18ccf5c8c5a2de2396af9ef | 3,867 | py | Python | graphdot/minipandas/dataframe.py | yhtang/GraphDot | 3d5ed4fbb2f6912052baa42780b436da76979691 | [
"BSD-3-Clause-LBNL"
]
| 9 | 2020-02-14T18:07:39.000Z | 2021-12-15T12:07:31.000Z | graphdot/minipandas/dataframe.py | yhtang/graphdot | 3d5ed4fbb2f6912052baa42780b436da76979691 | [
"BSD-3-Clause-LBNL"
]
| 3 | 2020-03-19T19:07:26.000Z | 2021-02-24T06:08:51.000Z | graphdot/minipandas/dataframe.py | yhtang/graphdot | 3d5ed4fbb2f6912052baa42780b436da76979691 | [
"BSD-3-Clause-LBNL"
]
| 3 | 2019-10-17T06:11:18.000Z | 2021-05-07T11:56:33.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import namedtuple
import numpy as np
import pandas as pd
from .series import Series
class DataFrame:
def __init__(self, data=None):
self._data = {}
if isinstance(data, dict):
for key, value in data.items():
self[key] = value
def __getitem__(self, key):
if isinstance(key, str):
return self._data[key]
elif hasattr(key, '__iter__'):
i = np.array(key)
if np.issubsctype(i.dtype, np.bool_):
return self.__class__({k: v[i] for k, v in self._data.items()})
else:
return self.__class__({k: self._data[k] for k in key})
else:
raise TypeError(f'Invalid column index {key}')
def __setitem__(self, key, value):
self._data[key] = Series(value)
def __getattr__(self, name):
if '_data' in self.__dict__ and name in self._data.keys():
return self._data[name]
else:
raise AttributeError(f'Dataframe has no column {name}.')
def __repr__(self):
return repr(self._data)
def __len__(self):
return max([0] + [len(array) for array in self._data.values()])
def __contains__(self, item):
return item in self._data
def __iter__(self):
for key in self._data:
yield key
@property
def columns(self):
return list(self._data.keys())
def rowtype(self, pack=True):
cols = np.array(list(self.columns))
ctypes = {key: np.dtype(self[key].concrete_type) for key in cols}
if pack is True:
perm = np.argsort([-ctypes[key].itemsize for key in self.columns])
cols = cols[perm]
packed_dtype = np.dtype([(key, ctypes[key].newbyteorder('='))
for key in cols], align=True)
return packed_dtype
def rows(self, rowname='row'):
'''Iterate over rows in the form of named tuples while skipping columns
that do not have valid field names.'''
visible = [key for key in self._data if key.isidentifier()]
class RowTuple(namedtuple(rowname, visible)):
def __getitem__(self, key):
'''To support both member access and index access.'''
if isinstance(key, str):
return getattr(self, key)
else:
return super().__getitem__(key)
RowTuple.__name__ = rowname
# for row in zip(*[self[key] for key in visible]):
# yield RowTuple(row)
for i in range(len(self)):
yield RowTuple(*[self[key][i] for key in visible])
def itertuples(self, tuplename='tuple'):
'''Alias of `rows()` for API compatibility with pandas.'''
yield from self.rows(rowname=tuplename)
def iterrows(self, rowname='row'):
'''Iterate in (row_id, row_content) tuples.'''
yield from enumerate(self.rows(rowname=rowname))
def iterstates(self, pack=True):
'''Iterate over rows, use the .state attribute if element is not
scalar.'''
cols = np.array(list(self.rowtype(pack=pack).fields.keys()))
for row in zip(*[self[key] for key in cols]):
yield tuple(i if np.isscalar(i) else i.state for i in row)
def to_pandas(self):
return pd.DataFrame(self._data)
def copy(self, deep=False):
if deep:
return self.__class__({
key: np.copy(value) for key, value in self._data.items()
})
else:
return self.__class__(self._data)
def drop(self, keys, inplace=False):
if inplace is True:
for key in keys:
del self._data[key]
else:
return self[[k for k in self.columns if k not in keys]]
| 32.495798 | 79 | 0.573054 | 3,717 | 0.96121 | 1,473 | 0.380915 | 71 | 0.01836 | 0 | 0 | 577 | 0.149211 |
a5cb7a30978758aaea2edade994cdb342894093c | 21,620 | py | Python | pedal/questions/loader.py | acbart/python-analysis | 3cd2cc22d50a414ae6b62c74d2643be4742238d4 | [
"MIT"
]
| 14 | 2019-08-22T03:40:23.000Z | 2022-03-13T00:30:53.000Z | pedal/questions/loader.py | pedal-edu/pedal | 3cd2cc22d50a414ae6b62c74d2643be4742238d4 | [
"MIT"
]
| 74 | 2019-09-12T04:35:56.000Z | 2022-01-26T19:21:32.000Z | pedal/questions/loader.py | acbart/python-analysis | 3cd2cc22d50a414ae6b62c74d2643be4742238d4 | [
"MIT"
]
| 2 | 2021-01-11T06:34:00.000Z | 2021-07-21T12:48:07.000Z | """
instructions: blah blah blah
settings:
tifa:
enabled: True
unit test by function (bool): Whether to test each function entirely before moving onto the
next one, or to first check that all functions have been defined, and then
checking their parameters, etc. Defaults to True.
show case details (bool): Whether to show the specific args/inputs that caused a test case
to fail.
rubric:
functions:
total: 100
definition: 10
signature: 10
cases: 80
global:
variables:
name:
type:
value:
inputs:
prints:
# Sandbox, type checking
functions:
documentation: "any" or "google"
coverage: 100%
tests: int
name: do_complicated_stuff
arity: int
signature: int, int -> float
signature: int, int, list[int], (int->str), dict[str:list[int]] -> list[int]
parameters:
name: banana
exactly:
regex:
includes:
within:
type: int
cases:
- arguments (list): 5, 4
inputs (list):
returns (Any):
equals: 27.3
is:
is not: _1
name (str): Meaningful name for tracking purposes? Or possibly separate into label/id/code
hint (str): Message to display to user
prints:
exactly:
regex:
startswith:
endswith:
plots:
# Cait
syntax:
prevent:
___ + ___
# Override any of our default feedback messages
messages:
FUNCTION_NOT_DEFINED: "Oops you missed a function"
"""
from pedal.core.commands import set_success, give_partial
from pedal.core.feedback_category import FeedbackCategory
from pedal.questions.constants import TOOL_NAME
from pedal.sandbox.commands import get_sandbox
from pedal.utilities.comparisons import equality_test
SETTING_SHOW_CASE_DETAILS = "show case details"
DEFAULT_SETTINGS = {
SETTING_SHOW_CASE_DETAILS: True
}
EXAMPLE_DATA = {
'functions': [{
'name': 'do_complicated_stuff',
'signature': 'int, int, [int] -> list[int]',
'cases': [
{'arguments': "5, 4, 3", 'returns': "12"},
]
}]
}
class FeedbackException(Exception):
"""
"""
def __init__(self, category, label, **fields):
self.category = category
self.label = label
self.fields = fields
def as_message(self):
"""
Returns:
"""
return FEEDBACK_MESSAGES[self.category][self.label].format(**self.fields)
def check_function_defined(function, function_definitions, settings=None):
"""
Args:
function:
function_definitions:
settings:
Returns:
"""
# 1. Is the function defined syntactically?
# 1.1. With the right name?
function_name = function['name']
if function_name not in function_definitions:
raise FeedbackException(FeedbackCategory.SPECIFICATION, 'missing_function', function_name=function_name)
definition = function_definitions[function_name]
return definition
def check_function_signature(function, definition, settings=None):
"""
Args:
function:
definition:
settings:
Returns:
"""
function_name = function['name']
# 1.2. With the right parameters and return type?
# 1.2.1 'arity' style - simply checks number of parameters
if 'arity' in function or 'parameters' in function:
expected_arity = function['arity'] if 'arity' in function else len(function['parameters'])
actual_arity = len(definition.args.args)
if actual_arity < expected_arity:
raise FeedbackException(FeedbackCategory.SPECIFICATION, 'insufficient_args',
function_name=function_name, expected_arity=expected_arity,
actual_arity=actual_arity)
elif actual_arity > expected_arity:
raise FeedbackException(FeedbackCategory.SPECIFICATION, 'excessive_args',
function_name=function_name, expected_arity=expected_arity,
actual_arity=actual_arity)
# 1.2.2 'parameters' style - checks each parameter's name and type
if 'parameters' in function:
expected_parameters = function['parameters']
actual_parameters = definition.args.args
for expected_parameter, actual_parameter in zip(expected_parameters, actual_parameters):
actual_parameter_name = get_arg_name(actual_parameter)
if 'name' in expected_parameter:
if actual_parameter_name != expected_parameter['name']:
raise FeedbackException(FeedbackCategory.SPECIFICATION, 'wrong_parameter_name',
function_name=function_name,
expected_parameter_name=expected_parameter['name'],
actual_parameter_name=actual_parameter_name
)
if 'type' in expected_parameter:
actual_parameter_type = parse_type(actual_parameter)
# TODO: Handle non-string expected_parameter types (dict)
expected_parameter_type = parse_type_value(expected_parameter['type'], True)
if not type_check(expected_parameter_type, actual_parameter_type):
raise FeedbackException(FeedbackCategory.SPECIFICATION, 'wrong_parameter_type',
function_name=function_name,
parameter_name=actual_parameter_name,
expected_parameter_type=expected_parameter_type,
actual_parameter_type=actual_parameter_type)
# 1.2.3. 'returns' style - checks the return type explicitly
if 'returns' in function:
expected_returns = parse_type_value(function['returns'], True)
actual_returns = parse_type(definition.returns)
if actual_returns != "None":
if not type_check(expected_returns, actual_returns):
raise FeedbackException(FeedbackCategory.SPECIFICATION, "wrong_returns",
function_name=function_name, expected_returns=expected_returns,
actual_returns=actual_returns)
elif expected_returns != "None":
raise FeedbackException(FeedbackCategory.SPECIFICATION, "missing_returns",
function_name=function_name, expected_returns=expected_returns)
# 1.2.4. 'signature' style - shortcut for specifying the types
if 'signature' in function:
expected_signature = function['signature']
actual_returns = parse_type(definition.returns)
actual_parameters = ", ".join(parse_type(actual_parameter.annotation)
for actual_parameter in definition.args.args)
actual_signature = "{} -> {}".format(actual_parameters, actual_returns)
if not type_check(expected_signature, actual_signature):
raise FeedbackException(FeedbackCategory.SPECIFICATION, "wrong_signature",
function_name=function_name, expected_signature=expected_signature,
actual_signature=actual_signature)
# All good here!
return True
def check_function_value(function, values, settings):
"""
2. Does the function exist in the data?
:param function:
:param values:
:param settings:
:return:
"""
function_name = function['name']
# 2.1. Does the name exist in the values?
if function_name not in values:
raise FeedbackException(FeedbackCategory.SPECIFICATION, "function_not_available", function_name=function_name)
function_value = values[function_name]
# 2.2. Is the name bound to a callable?
if not callable(function_value):
raise FeedbackException(FeedbackCategory.SPECIFICATION, "name_is_not_function", function_name=function_name)
# All good here
return function_value
class TestCase:
"""
"""
CASE_COUNT = 0
def __init__(self, function_name, case_name):
self.function_name = function_name
if case_name is None:
self.case_name = str(TestCase.CASE_COUNT)
TestCase.CASE_COUNT += 1
else:
self.case_name = case_name
self.arguments, self.has_arguments = [], False
self.inputs, self.has_inputs = [], False
self.error, self.has_error = None, False
self.message, self.has_message = None, False
self.expected_prints, self.has_expected_prints = None, False
self.expected_returns, self.has_expected_returns = None, False
self.prints = []
self.returns = None
self.success = True
def add_message(self, message):
"""
Args:
message:
"""
self.message = message
self.has_message = True
def add_inputs(self, inputs):
"""
Args:
inputs:
"""
if not isinstance(inputs, list):
inputs = [inputs]
self.inputs = inputs
self.has_inputs = True
def add_arguments(self, arguments):
"""
Args:
arguments:
"""
if not isinstance(arguments, list):
arguments = [arguments]
self.arguments = arguments
self.has_arguments = True
def add_error(self, error):
"""
Args:
error:
"""
self.error = error
self.has_error = True
self.success = False
def add_expected_prints(self, prints):
"""
Args:
prints:
"""
self.expected_prints = prints
self.has_expected_prints = True
def add_expected_returns(self, returns):
"""
Args:
returns:
"""
self.expected_returns = returns
self.has_expected_returns = True
def add_prints_returns(self, prints, returns):
"""
Args:
prints:
returns:
"""
self.prints = prints
self.returns = returns
def fail(self):
"""
"""
self.success = False
def check_case(function, case, student_function):
"""
:param function:
:param case:
:param student_function:
:return: status, arg, input, error, output, return, message
"""
function_name = function['name']
test_case = TestCase(function_name, case.get('name'))
# Get callable
sandbox = get_sandbox(MAIN_REPORT)
sandbox.clear_output()
# Potential bonus message
if 'message' in case:
test_case.add_message(case['message'])
# Queue up the the inputs
if 'inputs' in case:
test_case.add_inputs(case['inputs'])
sandbox.set_input(test_case.inputs)
else:
sandbox.clear_input()
# Pass in the arguments and call the function
if 'arguments' in case:
test_case.add_arguments(case['arguments'])
result = sandbox.call(function_name, *test_case.arguments)
# Store actual values
test_case.add_prints_returns(sandbox.output, result)
# Check for errors
if sandbox.exception:
test_case.add_error(sandbox.exception)
# 4. Check out the output
if 'prints' in case:
test_case.add_expected_prints(case['prints'])
if not output_test(sandbox.output, case['prints'], False, .0001):
test_case.fail()
# 5. Check the return value
if 'returns' in case:
test_case.add_expected_returns(case['returns'])
if not equality_test(result, case['returns'], True, .0001):
test_case.fail()
# TODO: Check the plots
# Return results
return test_case
# TODO: blockpy-feedback-unit => pedal-test-cases in BlockPy Client
TEST_TABLE_TEMPLATE = """<table class='pedal-test-cases table table-sm table-bordered table-hover'>
<tr class='table-active'>
<th></th>
<th>Arguments</th>
<th>Expected</th>
<th>Returned</th>
</tr>
{body}
</table>"""
TEST_TABLE_FOOTER = "</table>"
TEST_TABLE_ROW_HEADER = "<tr class='table-active'>"
TEST_TABLE_ROW_NORMAL = "<tr>"
TEST_TABLE_ROW_FOOTER = "</tr>"
TEST_TABLE_ROW_INFO = "<tr class='table-info'>"
GREEN_CHECK = " <td class='green-check-mark'>✔</td>"
RED_X = " <td>❌</td>"
CODE_CELL = " <td><code>{}</code></td>"
COLUMN_TITLES = ["", "Arguments", "Inputs", "Errors", "Expected", "Expected", "Returned", "Printed"]
def make_table(cases):
"""
Args:
cases:
Returns:
"""
body = []
for case in cases:
body.append(" <tr>")
body.append(GREEN_CHECK if case.success else RED_X)
body.append(CODE_CELL.format(", ".join(repr(arg) for arg in case.arguments)))
if case.has_error:
body.append(" <td colspan='2'>Error: <code>{}</code></td>".format(str(case.error)))
else:
body.append(CODE_CELL.format(repr(case.expected_returns)))
body.append(CODE_CELL.format(repr(case.returns)))
if not case.success and case.has_message:
body.append(" </tr><tr><td colspan='4'>{}</td>".format(case.message))
body.append(" </tr>")
body = "\n".join(body)
return TEST_TABLE_TEMPLATE.format(body=body)
#if ((any(args) and any(inputs)) or
# (any(expected_outputs) and any(expected_returns)) or
# (any(actual_outputs) and any(actual_returns))):
# # Complex cells
# pass
#else:
# Simple table
# Make header
# row_mask = [True, any(args), any(inputs), False,
# any("returns" in reason for reason in reasons),
# any("prints" in reason for reason in reasons),
# any("returns" in reason for reason in reasons),
# any("prints" in reason for reason in reasons)]
# header_cells = "".join("<th>{}</th>".format(title) for use, title in zip(row_mask, COLUMN_TITLES) if use)
# body = [TEST_TABLE_ROW_HEADER.format(header_cells)]
# for case in zip(
# statuses, args, inputs, errors, actual_outputs, actual_returns,
# expected_outputs, expected_returns):
# status, case = case[0], case[1:]
# print(row_mask[1:], case)
# def make_code(values):
# if values == None:
# return "<code>None</code>"
# elif isinstance(values, int):
# return "<code>{!r}</code>".format(values)
# else:
# return ", ".join("<code>{}</code>".format(repr(value)) for value in values)
# body.append(
# TEST_TABLE_ROW_NORMAL+
# (GREEN_CHECK if case[0] else RED_X)+
# "\n".join(" <td>{}</td>".format(make_code(values))
# for use, values in zip(row_mask[1:], case) if use)+
# "</tr>\n"
# )
# # Make each row
# table = "{}\n{}\n{}".format(TEST_TABLE_HEADER, "\n ".join(body), TEST_TABLE_FOOTER)
# return table
def check_cases(function, student_function, settings):
"""
Args:
function:
student_function:
settings:
"""
function_name = function['name']
if 'cases' in function:
cases = function['cases']
test_cases = [check_case(function, case, student_function) for case in cases]
success_cases = sum(test.success for test in test_cases)
if success_cases < len(cases):
if settings[SETTING_SHOW_CASE_DETAILS]:
table = make_table(test_cases)
raise FeedbackException(FeedbackCategory.SPECIFICATION, "failed_test_cases",
function_name=function_name,
cases_count=len(cases), failure_count=len(cases)-success_cases,
table=table)
else:
raise FeedbackException(FeedbackCategory.SPECIFICATION, "failed_test_cases_count",
function_name=function_name,
cases_count=len(cases), failure_count=len(cases) - success_cases)
def get_arg_name(node):
"""
Args:
node:
Returns:
"""
name = node.id
if name is None:
return node.arg
else:
return name
def load_question(data):
"""
:param data:
:return:
"""
ast = parse_program()
student_data = commands.get_student_data()
# Check that there aren't any invalid syntactical structures
# Get all of the function ASTs in a dictionary
function_definitions = {definition._name: definition
for definition in ast.find_all("FunctionDef")}
settings = DEFAULT_SETTINGS.copy()
settings.update(data.get('settings', {}))
rubric = settings.get('rubric', {})
function_points = 0
if 'functions' in data:
function_rubric = rubric.get('functions', {})
successes = []
for function in data['functions']:
success = False
try:
definition = check_function_defined(function, function_definitions, settings)
function_points += function_rubric.get('definition', 10)
check_function_signature(function, definition, settings)
function_points += function_rubric.get('signature', 10)
student_function = check_function_value(function, student_data, settings)
function_points += function_rubric.get('value', 0)
except FeedbackException as fe:
yield fe.as_message(), fe.label
else:
try:
check_cases(function, student_function, settings)
except FeedbackException as fe:
success_ratio = (1.0 - fe.fields['failure_count'] / fe.fields['cases_count'])
function_points += function_rubric.get('cases', 80*success_ratio)
yield fe.as_message(), fe.label
else:
function_points += function_rubric.get('cases', 80)
success = True
successes.append(success)
function_points /= len(data['functions'])
if all(successes):
set_success()
else:
give_partial(function_points, tool=TOOL_NAME,
justification="Passed some but not all unit tests")
def check_question(data):
"""
Args:
data:
"""
results = list(load_question(data))
if results:
message, label = results[0]
gently(message, label=label)
def check_pool(questions):
"""
Args:
questions:
"""
pass
def load_file(filename):
"""
Args:
filename:
"""
pass
FEEDBACK_MESSAGES = {
FeedbackCategory.SPECIFICATION: {
"missing_function": "No function named `{function_name}` was found.",
"insufficient_args": ("The function named `{function_name}` "
"has fewer parameters ({actual_arity}) "
"than expected ({expected_arity})."),
"excessive_args": ("The function named `{function_name}` "
"has more parameters ({actual_arity}) "
"than expected ({expected_arity})."),
# TODO: missing_parameter that checks if parameter name exists, but is in the wrong place
"wrong_parameter_name": ("Error in definition of `{function_name}`. "
"Expected a parameter named `{expected_parameter_name}`, "
"instead found `{actual_parameter_name}`."),
"wrong_parameter_type": ("Error in definition of function `{function_name}` "
"parameter `{parameter_name}`. Expected `{expected_parameter_type}`, "
"instead found `{actual_parameter_type}`."),
"missing_returns": ("Error in definition of function `{function_name}` return type. "
"Expected `{expected_returns}`, but there was no return type specified."),
"wrong_returns": ("Error in definition of function `{function_name}` return type. "
"Expected `{expected_returns}`, instead found `{actual_returns}`."),
"wrong_signature": ("Error in definition of function `{function_name}` signature. "
"Expected `{expected_signature}`, instead found `{actual_signature}`."),
"name_is_not_function": "You defined `{function_name}`, but did not define it as a function.",
"function_not_available": ("You defined `{function_name}` somewhere in your code, "
"but it was not available in the top-level scope to be called. "
"Perhaps you defined it inside another function or scope?"),
"failed_test_cases": ("I ran your function <code>{function_name}</code> on my own test cases. "
"It failed {failure_count}/{cases_count} of my tests.\n{table}"),
"failed_test_cases_count": ("I ran your function <code>{function_name}</code> on my own test cases. "
"It failed {failure_count}/{cases_count} of my tests."),
}
}
| 35.913621 | 118 | 0.592091 | 2,538 | 0.117391 | 2,107 | 0.097456 | 0 | 0 | 0 | 0 | 8,675 | 0.401249 |
a5cc7ebfb0f671bb1d1aeac6021cc68675439a1a | 8,732 | py | Python | VM/fetchLoop.py | djtech-dev/PyVM | 1edda436ce7073d0cecbf16f5cab2509895d953c | [
"MIT"
]
| 75 | 2017-09-22T22:36:13.000Z | 2022-03-20T16:18:27.000Z | VM/fetchLoop.py | djtech-dev/PyVM | 1edda436ce7073d0cecbf16f5cab2509895d953c | [
"MIT"
]
| 7 | 2019-05-10T19:15:08.000Z | 2021-08-24T16:03:34.000Z | VM/fetchLoop.py | djtech-dev/PyVM | 1edda436ce7073d0cecbf16f5cab2509895d953c | [
"MIT"
]
| 14 | 2018-07-02T02:49:46.000Z | 2022-02-22T15:24:47.000Z | import enum
from .ELF import ELF32, enums
from .util import SegmentRegs, MissingOpcodeError
from .CPU import CPU32
import logging
logger = logging.getLogger(__name__)
class FetchLoopMixin:
_attrs_ = 'eip', 'mem', 'reg.ebx', 'fmt', 'instr', 'sizes', 'default_mode'
def execute_opcode(self: CPU32) -> None:
self.eip += 1
off = 1
if self.opcode == 0x0F:
op = self.mem.get_eip(self.eip, 1)
self.eip += 1
self.opcode = (self.opcode << 8) | op
off += 1
if __debug__:
logger.debug(self.fmt, self.eip - off, self.opcode)
try:
impls = self.instr[self.opcode]
except KeyError:
... # could not find opcode
else:
for impl in impls:
if impl():
return # opcode executed
# could not find suitable implementation
# read one more byte
op = self.mem.get_eip(self.eip, 1)
self.eip += 1
self.opcode = (self.opcode << 8) | op
try:
impls = self.instr[self.opcode]
except KeyError:
raise MissingOpcodeError(f'Opcode {self.opcode:x} is not recognized yet (at 0x{self.eip - off - 1:08x})')
else:
for impl in impls:
if impl():
return # opcode executed
# could not find suitable implementation
raise NotImplementedError(f'No suitable implementation found for opcode {self.opcode:x} (@0x{self.eip - off - 1:02x})')
def run(self: CPU32) -> int:
"""
Implements the basic CPU instruction cycle (https://en.wikipedia.org/wiki/Instruction_cycle)
:param self: passed implicitly
:param offset: location of the first opcode
:return: None
"""
# opcode perfixes
pref_segments = {
0x2E: SegmentRegs.CS,
0x36: SegmentRegs.SS,
0x3E: SegmentRegs.DS,
0x26: SegmentRegs.ES,
0x64: SegmentRegs.FS,
0x65: SegmentRegs.GS
}
pref_op_size_override = {0x66, 0x67}
pref_lock = {0xf0}
rep = {0xf3}
prefixes = set(pref_segments) | pref_op_size_override | pref_lock | rep
self.running = True
while self.running and self.eip + 1 < self.mem.size:
overrides = []
self.opcode = self.mem.get(self.eip, 1)
while self.opcode in prefixes:
overrides.append(self.opcode)
self.eip += 1
self.opcode = self.mem.get(self.eip, 1)
# apply overrides
size_override_active = False
for ov in overrides:
if ov == 0x66:
if not size_override_active:
self.current_mode = not self.current_mode
size_override_active = True
old_operand_size = self.operand_size
self.operand_size = self.sizes[self.current_mode]
logger.debug(
'Operand size override: %d -> %d',
old_operand_size, self.operand_size
)
elif ov == 0x67:
if not size_override_active:
self.current_mode = not self.current_mode
size_override_active = True
old_address_size = self.address_size
self.address_size = self.sizes[self.current_mode]
logger.debug(
'Address size override: %d -> %d',
old_address_size, self.address_size
)
elif ov in pref_segments:
is_special = ov >> 6
if is_special:
sreg_number = 4 + (ov & 1) # FS or GS
else:
sreg_number = (ov >> 3) & 0b11
self.mem.segment_override = sreg_number
logger.debug('Segment override: %s', self.mem.segment_override)
elif ov == 0xf0: # LOCK prefix
logger.debug('LOCK prefix') # do nothing; all operations are atomic anyway. Right?
elif ov == 0xf3: # REP prefix
self.opcode = ov
self.eip -= 1 # repeat the previous opcode
self.execute_opcode()
# undo all overrides
for ov in overrides:
if ov == 0x66:
self.current_mode = self.default_mode
self.operand_size = self.sizes[self.current_mode]
elif ov == 0x67:
self.current_mode = self.default_mode
self.address_size = self.sizes[self.current_mode]
elif ov in pref_segments:
self.mem.segment_override = SegmentRegs.DS
return self.reg.eax
class ExecutionStrategy(enum.Enum):
BYTES = 1
FLAT = 2
ELF = 3
class ExecutionMixin(FetchLoopMixin):
def execute(self, *args, **kwargs):
return NotImplemented
class ExecuteBytes(ExecutionMixin):
_attrs_ = 'eip', 'mem', 'code_segment_end'
_funcs_ = 'run',
def execute(self: CPU32, data: bytes, offset=0):
l = len(data)
self.mem.set_bytes(offset, l, data)
self.eip = offset
self.code_segment_end = self.eip + l - 1
self.mem.program_break = self.code_segment_end
return self.run()
class ExecuteFlat(ExecutionMixin):
_attrs_ = 'eip', 'mem', 'code_segment_end'
_funcs_ = 'run',
def execute(self: CPU32, fname: str, offset=0):
with open(fname, 'rb') as f:
data = f.read()
l = len(data)
self.mem.set_bytes(offset, l, data)
self.eip = offset
self.code_segment_end = self.eip + l - 1
self.mem.program_break = self.code_segment_end
return self.run()
class ExecuteELF(ExecutionMixin):
_attrs_ = 'eip', 'mem', 'reg', 'code_segment_end'
_funcs_ = 'run', 'stack_init', 'stack_push'
def execute(self: CPU32, fname: str, args=()):
with ELF32(fname) as elf:
if elf.hdr.e_type != enums.e_type.ET_EXEC:
raise ValueError(f'ELF file {elf.fname!r} is not executable (type: {elf.hdr.e_type})')
max_memsz = max(
phdr.p_vaddr + phdr.p_memsz
for phdr in elf.phdrs
if phdr.p_type == enums.p_type.PT_LOAD
)
if self.mem.size < max_memsz * 2:
self.mem.size = max_memsz * 2
self.stack_init()
for phdr in elf.phdrs:
if phdr.p_type not in (enums.p_type.PT_LOAD, enums.p_type.PT_GNU_EH_FRAME):
continue
logger.info(f'LOAD {phdr.p_memsz:10,d} bytes at address 0x{phdr.p_vaddr:09_x}')
elf.file.seek(phdr.p_offset)
data = elf.file.read(phdr.p_filesz)
self.mem.set_bytes(phdr.p_vaddr, len(data), data)
self.mem.set_bytes(phdr.p_vaddr + phdr.p_filesz, phdr.p_memsz - phdr.p_filesz, bytearray(phdr.p_memsz - phdr.p_filesz))
self.eip = elf.hdr.e_entry
self.code_segment_end = self.eip + max_memsz - 1
self.mem.program_break = self.code_segment_end
# INITIALIZE STACK LAYOUT:
# http://asm.sourceforge.net/articles/startup.html
# https://lwn.net/Articles/631631/
environment = ["USER=ForceBru"]
args = [fname] + list(args)
arg_addresses, env_addresses = [], []
for arg in args:
arg = arg.encode() + b'\0'
l = len(arg)
self.mem.set_bytes(self.reg.esp - l, l, arg)
self.reg.esp -= l
arg_addresses.append(self.reg.esp)
for env in environment:
env = env.encode() + b'\0'
l = len(env)
self.mem.set_bytes(self.reg.esp - l, l, env)
self.reg.esp -= l
env_addresses.append(self.reg.esp)
# auxiliary vector (just NULL)
self.stack_push(0)
# environment (array of pointers + NULL)
self.stack_push(0)
for addr in env_addresses[::-1]:
self.stack_push(addr)
# argv
self.stack_push(0) # end of argv
for addr in arg_addresses[::-1]:
self.stack_push(addr)
# argc
self.stack_push(len(args))
logger.info(f'EXEC at 0x{self.eip:09_x}')
# logger.debug(f'Stack start at 0x{self.reg.esp:08x}')
# logger.debug(f'Stack end at 0x{self.reg.ebp:08x}')
return self.run()
| 33.328244 | 135 | 0.535502 | 8,541 | 0.978126 | 0 | 0 | 0 | 0 | 0 | 0 | 1,515 | 0.1735 |
a5cdc723644cccdf87dcd59c16c72ac9871189a8 | 2,753 | py | Python | polecat/deploy/aws/deployment.py | furious-luke/polecat | 7be5110f76dc42b15c922c1bb7d49220e916246d | [
"MIT"
]
| 4 | 2019-08-10T12:56:12.000Z | 2020-01-21T09:51:20.000Z | polecat/deploy/aws/deployment.py | furious-luke/polecat | 7be5110f76dc42b15c922c1bb7d49220e916246d | [
"MIT"
]
| 71 | 2019-04-09T05:39:21.000Z | 2020-05-16T23:09:24.000Z | polecat/deploy/aws/deployment.py | furious-luke/polecat | 7be5110f76dc42b15c922c1bb7d49220e916246d | [
"MIT"
]
| null | null | null | from termcolor import colored
from ...utils.feedback import feedback
from .constants import DEPLOYMENT_PREFIX, DEPLOYMENT_REGISTRY
from .exceptions import EntityDoesNotExist, EntityExists
from .operations import delete_parameter, get_parameter, set_parameter
from .project import assert_project_exists
from .utils import aws_client
@feedback
def create_deployment(project, deployment, feedback):
ssm = aws_client('ssm')
with feedback(f'Create deployment {colored(project, "cyan")}/{colored(deployment, "green")}'):
assert_project_exists(project, ssm=ssm)
name = DEPLOYMENT_REGISTRY.format(project)
deployments = get_parameter(name, default=[], ssm=ssm)
if deployment in deployments:
raise EntityExists
try:
deployments.remove('_')
except ValueError:
pass
deployments.append(deployment)
set_parameter(name, deployments, ssm=ssm)
# The active flag is here really only to ensure when
# deployments are carried out for deployments without secrets
# etc, they are still used.
name = DEPLOYMENT_PREFIX.format(project, deployment) + 'active'
set_parameter(name, '1', ssm=ssm)
@feedback
def list_deployments(project, feedback):
ssm = aws_client('ssm')
with feedback(f'List deployments for {colored(project, "cyan")}'):
assert_project_exists(project, ssm=ssm)
name = DEPLOYMENT_REGISTRY.format(project)
deployments = get_parameter(name, default=[], ssm=ssm)
return [d for d in deployments if d != '_']
@feedback
def delete_deployment(project, deployment, feedback):
ssm = aws_client('ssm')
with feedback(f'Delete deployment {colored(project, "cyan")}/{colored(deployment, "green")}'):
assert_project_exists(project, ssm=ssm)
assert_deployment_exists(project, deployment, ssm=ssm)
# TODO: Check for related entities?
name = DEPLOYMENT_REGISTRY.format(project)
deployments = get_parameter(name, default=[], ssm=ssm)
deployments.remove(deployment)
if not deployments:
deployments.append('_')
set_parameter(name, deployments, ssm=ssm)
# TODO: Need to fetch the path, then delete all of them.
name = DEPLOYMENT_PREFIX.format(project, deployment)
delete_parameter(name, ssm=ssm)
def deployment_exists(project, deployment, ssm=None):
registry = get_parameter(DEPLOYMENT_REGISTRY.format(project), ssm=ssm)
return deployment in registry
def assert_deployment_exists(project, deployment, ssm=None):
if not deployment_exists(project, deployment, ssm=ssm):
raise EntityDoesNotExist(
f'deployment {deployment} does not exist'
)
| 38.774648 | 98 | 0.698147 | 0 | 0 | 0 | 0 | 2,025 | 0.735561 | 0 | 0 | 513 | 0.186342 |
a5cef8d918f7406a1dd78059cb13a600f918323a | 5,897 | py | Python | mlpy/regression/logistic_regression.py | SNUDerek/MLPy | 0d47a8ef8522a663716cda6a831855e6482069ba | [
"MIT"
]
| 1 | 2019-05-10T10:39:12.000Z | 2019-05-10T10:39:12.000Z | mlpy/regression/logistic_regression.py | SNUDerek/MLPy | 0d47a8ef8522a663716cda6a831855e6482069ba | [
"MIT"
]
| null | null | null | mlpy/regression/logistic_regression.py | SNUDerek/MLPy | 0d47a8ef8522a663716cda6a831855e6482069ba | [
"MIT"
]
| null | null | null | import numpy as np
from ..tools import batchGenerator
# LOGISTIC REGRESSION
# for (binary) categorical data
class LogisticRegression():
'''
Logistic regression with Gradient Descent
binary Logistic regression
Parameters
----------
epochs : int
maximum epochs of gradient descent
lr : float
learning rate
lmb : float
(L2) regularization parameter lambda
sgd : int
batch size for stochastic gradient descent (0 = gradient descent)
tol : float
tolerance for convergence
weights : array
weights (coefficients) of linear model
Attributes
-------
'''
def __init__(self, epochs=1000, intercept=False, lmb=0.0, lr=0.01, sgd=0, tol=1e-5):
self.epochs = epochs
self.intercept = intercept
self.lmb=lmb
self.lr = lr
self.sgd = sgd
self.tol = tol
self.weights = np.array([])
self.costs = []
# internal function for sigmoid
def _sigmoid(self, estimates):
sigmoid = 1 / (1 + np.exp(-estimates))
return sigmoid
# internal function for making hypothesis and getting cost
def _getestimate(self, x_data, y_data, weights):
# get hypothesis 'scores' (features by weights)
scores = x_data.dot(weights).flatten()
# sigmoid these scores for predictions (0~1)
y_hat = self._sigmoid(scores)
# get the difference between the trues and the hypothesis
difference = y_data.flatten() - y_hat
# calculate cost function J (log-likelihood)
# loglik = sum y_i theta.T x_i - log( 1 + e^b.T x_i )
nloglik = -np.sum(y_data*scores - np.log(1 + np.exp(scores)))
return y_hat, difference, nloglik
# fit ("train") the function to the training data
# inputs : x and y data as np.arrays (x is array of x-dim arrays where x = features)
# params : verbose : Boolean - whether to print out detailed information
# outputs : none
def fit(self, x_data, y_data, verbose=False, print_iters=100):
# STEP 1: ADD X_0 TERM FOR BIAS (IF INTERCEPT==TRUE)
# add an 'x0' = 1.0 to our x data so we can treat intercept as a weight
# use numpy.hstack (horizontal stack) to add a column of ones:
if self.intercept:
x_data = np.hstack((np.ones((x_data.shape[0], 1)), x_data))
# STEP 2: INIT WEIGHT COEFFICIENTS
# one weight per feature (+ intercept)
# you can init the weights randomly:
# weights = np.random.randn(x_data.shape[1])
# or you can use zeroes with np.zeros():
weights = np.zeros(x_data.shape[1])
# STEP 3: INIT REGULARIZATION TERM LAMBDA
# make as array with bias = 0 so don't regularize bias
# then we can element-wise multiply with weights
# this is the second term in the ( 1 - lambda/m )
lmbda = np.array([self.lmb/x_data.shape[0] for i in range(x_data.shape[1])])
if self.intercept:
lmbda[0] = 0.0
iters = 0
# choose between iterations of sgd and epochs
if self.sgd==0:
maxiters = self.epochs
else:
maxiters = self.epochs * int(len(y_data)/self.sgd)
minibatch = batchGenerator(x_data, y_data, self.sgd)
for epoch in range(maxiters):
# make an estimate, calculate the difference and the cost
# gradient_ll = X.T(y - y_hat)
# GRADIENT DESCENT:
# get gradient over ~all~ training instances each iteration
if self.sgd==0:
y_hat, difference, cost = self._getestimate(x_data, y_data, weights)
gradient = -np.dot(x_data.T, difference)
# STOCHASTIC (minibatch) GRADIENT DESCENT
# get gradient over random minibatch each iteration
# for "true" sgd, this should be sgd=1
# though minibatches of power of 2 are more efficient (2, 4, 8, 16, 32, etc)
else:
x_batch, y_batch = next(minibatch)
y_hat, difference, cost = self._getestimate(x_batch, y_batch, weights)
gradient = -np.dot(x_data.T, difference)
# get new predicted weights by stepping "backwards' along gradient
# use lambda parameter for regularization (calculated above)
new_weights = (weights - lmbda) - gradient * self.lr
# check stopping condition
if np.sum(abs(new_weights - weights)) < self.tol:
if verbose:
print("converged after {0} iterations".format(iters))
break
# update weight values, save cost
weights = new_weights
self.costs.append(cost)
iters += 1
# print diagnostics
if verbose and iters % print_iters == 0:
print("iteration {0}: cost: {1}".format(iters, cost))
# update final weights
self.weights = weights
return self.costs
# predict probas on the test data
# inputs : x data as np.array
# outputs : y probabilities as list
def predict_proba(self, x_data):
# STEP 1: ADD X_0 TERM FOR BIAS (IF INTERCEPT==TRUE)
if self.intercept:
x_data = np.hstack((np.ones((x_data.shape[0], 1)), x_data))
# STEP 2: PREDICT USING THE y_hat EQN
scores = x_data.dot(self.weights).flatten()
y_hat = self._sigmoid(scores)
return y_hat
# predict on the test data
# inputs : x data as np.array
# outputs : y preds as list
def predict(self, x_data):
y_hat = self.predict_proba(x_data)
# ROUND TO 0, 1
preds = []
for p in y_hat:
if p > 0.5:
preds.append(1.0)
else:
preds.append(0.0)
return preds
| 32.944134 | 89 | 0.587248 | 5,787 | 0.981346 | 0 | 0 | 0 | 0 | 0 | 0 | 2,672 | 0.453112 |
a5d00dc3b88e76c00327d591e70ffe150f4013d2 | 1,946 | py | Python | esercizio_1/untitled1.py | navyzigz420/python_lab | a3496d8b170e334abfb5099bf6ee03df5e226b78 | [
"Apache-2.0"
]
| null | null | null | esercizio_1/untitled1.py | navyzigz420/python_lab | a3496d8b170e334abfb5099bf6ee03df5e226b78 | [
"Apache-2.0"
]
| null | null | null | esercizio_1/untitled1.py | navyzigz420/python_lab | a3496d8b170e334abfb5099bf6ee03df5e226b78 | [
"Apache-2.0"
]
| null | null | null | bits = '110'
def turnBitsIntoInteger(listOfBits):
valore = 0
lunghezza = len(listOfBits)
for x in range(lunghezza):
if listOfBits[x] != '0' and listOfBits[x] != '1':
raise Exception('Not a combinations of bits!')
valore = valore + 2**(lunghezza -1 - x) * int(listOfBits[x])
if valore != int(listOfBits,2):
raise Exception('Porca l\'oca. {} diverso da {}!'.format(valore, int(listOfBits,2)))
return valore
#print(str(valore))
print(turnBitsIntoInteger(bits))
| 1.3637 | 92 | 0.167009 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 92 | 0.047276 |
a5d187baa7ec34c04b159476ef8dc6d77a915eac | 4,175 | py | Python | generated/azure-cli/aro/custom.py | audevbot/autorest.devops.debug | a507fb6e2dd7826212537f27d583f203aac1c28f | [
"MIT"
]
| null | null | null | generated/azure-cli/aro/custom.py | audevbot/autorest.devops.debug | a507fb6e2dd7826212537f27d583f203aac1c28f | [
"MIT"
]
| null | null | null | generated/azure-cli/aro/custom.py | audevbot/autorest.devops.debug | a507fb6e2dd7826212537f27d583f203aac1c28f | [
"MIT"
]
| null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
# pylint: disable=too-many-statements
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=unused-argument
import json
# module equivalent: azure_rm_openshiftmanagedcluster
# URL: /subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}/providers/Microsoft.ContainerService/openShiftManagedClusters/{{ open_shift_managed_cluster_name }}
def create_aro(cmd, client,
resource_group,
name,
location,
open_shift_version,
tags=None,
plan=None,
network_profile=None,
router_profiles=None,
master_pool_profile=None,
agent_pool_profiles=None,
auth_profile=None):
body = {}
body['location'] = location # str
body['tags'] = tags # dictionary
body['plan'] = json.loads(plan) if isinstance(plan, str) else plan
body['open_shift_version'] = open_shift_version # str
body['network_profile'] = json.loads(network_profile) if isinstance(network_profile, str) else network_profile
body['router_profiles'] = json.loads(router_profiles) if isinstance(router_profiles, str) else router_profiles
body['master_pool_profile'] = json.loads(master_pool_profile) if isinstance(master_pool_profile, str) else master_pool_profile
body['agent_pool_profiles'] = json.loads(agent_pool_profiles) if isinstance(agent_pool_profiles, str) else agent_pool_profiles
body['auth_profile'] = json.loads(auth_profile) if isinstance(auth_profile, str) else auth_profile
return client.create_or_update(resource_group_name=resource_group, resource_name=name, parameters=body)
# module equivalent: azure_rm_openshiftmanagedcluster
# URL: /subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}/providers/Microsoft.ContainerService/openShiftManagedClusters/{{ open_shift_managed_cluster_name }}
def update_aro(cmd, client, body,
resource_group,
name,
location,
open_shift_version,
tags=None,
plan=None,
network_profile=None,
router_profiles=None,
master_pool_profile=None,
agent_pool_profiles=None,
auth_profile=None):
body = client.get(resource_group_name=resource_group, resource_name=name).as_dict()
body.location = location # str
body.tags = tags # dictionary
body.plan = json.loads(plan) if isinstance(plan, str) else plan
body.open_shift_version = open_shift_version # str
body.network_profile = json.loads(network_profile) if isinstance(network_profile, str) else network_profile
body.router_profiles = json.loads(router_profiles) if isinstance(router_profiles, str) else router_profiles
body.master_pool_profile = json.loads(master_pool_profile) if isinstance(master_pool_profile, str) else master_pool_profile
body.agent_pool_profiles = json.loads(agent_pool_profiles) if isinstance(agent_pool_profiles, str) else agent_pool_profiles
body.auth_profile = json.loads(auth_profile) if isinstance(auth_profile, str) else auth_profile
return client.create_or_update(resource_group_name=resource_group, resource_name=name, parameters=body)
# module equivalent: azure_rm_openshiftmanagedcluster
# URL: /subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}/providers/Microsoft.ContainerService/openShiftManagedClusters/{{ open_shift_managed_cluster_name }}
def list_aro(cmd, client,
resource_group):
if resource_group is not None:
return client.list_by_resource_group(resource_group_name=resource_group)
return client.list()
| 55.666667 | 180 | 0.683353 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,400 | 0.335329 |
a5d2df25221764ec5395b74a6c3cb30a216ee3ff | 12,269 | py | Python | server.py | satriabw/Tugas_Sisdis | b1e152f35834e52806071b9b1424b114dce65148 | [
"MIT"
]
| null | null | null | server.py | satriabw/Tugas_Sisdis | b1e152f35834e52806071b9b1424b114dce65148 | [
"MIT"
]
| null | null | null | server.py | satriabw/Tugas_Sisdis | b1e152f35834e52806071b9b1424b114dce65148 | [
"MIT"
]
| null | null | null | # coding: utf-8
from random import randint
from urllib.parse import parse_qs
import socket
import sys
import json
import traceback
import os
import base64
import yaml
import datetime
import requests
import re
class Route:
def __init__(self):
self._route = []
def route(self, method, path, handler):
self._route.append({"method": method, "path": path, "handler": handler})
def dispatch(self, path, method):
pattern = re.compile(r'/api/plusone/[0-9]*[0-9]$')
match = re.match(pattern, path)
if match != None:
path = "/api/plusone/<:digit>"
for item in self._route:
if item["path"] == path and item["method"] == method:
return item["handler"]
return None
def findPath(self, path):
for item in self._route:
if item["path"] == path:
return True
return False
route = Route()
class HTTPRequest:
def __init__(self, request):
self._raw_request = request
self._build_header()
self._build_body()
def _build_header(self):
raw_head = self._split_request()[0]
head = raw_head.split("\n")
# Get method, path, and http version
temp = head[0].split(" ")
self.header = {
"method" : temp[0],
"path" : temp[1],
"http_version" : temp[2],
}
# Get Content-type and Content-length
for info in head:
if "Content-Type" in info:
self.header["content_type"] = info.split(" ")[1]
continue
if "Content-Length" in info:
self.header["content_length"] = info.split(" ")[1]
def _build_body(self):
self._raw_body = self._split_request()[1]
def _split_request(self):
return self._raw_request.decode(
"utf-8").replace("\r", "").split("\n\n")
def body_json(self):
return json.loads('[{}]'.format(self._raw_body))
def body_query(self, query):
return parse_qs(self._raw_body)[query]
def validation(func):
def func_wrapper(conn, request):
if (request.header["http_version"] not in "HTTP/1.0") and (request.header["http_version"] not in "HTTP/1.1"):
badRequest(conn, request)
else:
func(conn, request)
return func_wrapper
@validation
def getRoot(conn, request):
debugger = "Hooray getRoot end point is hitted\n"
print(debugger)
status = "302 Found"
loc = "/hello-world"
c_type = "text/plain; charset=UTF-8"
data = '302 Found: Location: /hello-world'
msgSuccess = renderMessage(status, str(21+len(loc)), loc, None, c_type, data)
writeResponse(conn, msgSuccess)
@validation
def getHelloWorld(conn, request):
with open("./hello-world.html", "r") as f:
html = f.read()
data = html.replace("__HELLO__", "World")
status = "200 OK"
c_type = "text/html"
msgSuccess = renderMessage(status, str(len(data)), None, None, c_type, data)
writeResponse(conn, msgSuccess)
@validation
def getStyle(conn, request):
with open("./style.css", "r") as f:
css = f.read()
status = "200 OK"
c_type = "text/css"
msgSuccess = renderMessage(status, str(len(css)), None, None, c_type, css)
writeResponse(conn, msgSuccess)
@validation
def getBackground(conn, request):
with open("./background.jpg", "rb") as f:
img = f.read()
status = "200 OK"
c_type = "image/jpeg"
enc = "base64"
msgSuccess = renderMessage(status, str(len(img)), None, enc, c_type, "")
msgSuccess = msgSuccess + img
writeResponse(conn, msgSuccess)
@validation
def getSpesifikasi(conn, request):
with open("./spesifikasi.yaml", "r") as f:
yaml = f.read()
status = "200 OK"
c_type = "text/plain; charset=UTF-8"
msgSuccess = renderMessage(status, str(len(yaml)), None, None, c_type, yaml)
writeResponse(conn, msgSuccess)
@validation
def getInfo(conn, request):
query = request.header["path"].split("?")
data = "No Data"
try:
tipe = exctractUrl(query[1], "type")
if tipe == "time":
data = "{}".format(datetime.datetime.now())
elif tipe == "random":
data = "{}".format(randint(111111,999999))
except (IndexError, ValueError) as e:
pass
status = "200 OK"
c_type = "text/plain; charset=UTF-8"
msgSuccess = renderMessage(status, str(len(data)), None, None, c_type, data)
writeResponse(conn, msgSuccess)
def notFound(conn, request):
if "/api" in request.header["path"]:
notFoundJson(conn)
status = "404 Not Found"
c_type = "text/plain; charset=UTF-8"
msgErr = renderMessage(status, str(len(status)), None, None, c_type, status)
writeResponse(conn, msgErr)
def notImplemented(conn, request):
status = "501 Not Implemented"
c_type = "text/plain; charset=UTF-8"
msgErr = renderMessage(status, str(len(status)), None, None, c_type, status)
writeResponse(conn, msgErr)
def badRequest(conn, request):
if "/api" in request.header["path"]:
badRequestJson(conn, "Please use proper http version")
status = "400 Bad Request"
c_type = "text/plain; charset=UTF-8"
msgErr = renderMessage(status, str(len(status)), None, None, c_type, status)
writeResponse(conn, msgErr)
@validation
def postHelloWorld(conn, request):
debugger = "Hooray postHelloWorld end point is hitted\n"
print(debugger)
try:
if request.header["content_type"] == "application/x-www-form-urlencoded":
name = request.body_query("name")[0]
with open("./hello-world.html", "r") as f:
html = f.read()
data = html.replace("__HELLO__", str(name))
status = "200 OK"
c_type = "text/html; charset=UTF-8"
msgSuccess = renderMessage(status, str(len(data)), None, None, c_type, data)
writeResponse(conn, msgSuccess)
else:
raise ValueError("Cannot parse the request")
except (IndexError, KeyError, ValueError) as e:
badRequest(conn, request)
def validateHelloAPI(func):
def func_wrapper(conn, request):
if (request.header["http_version"] not in "HTTP/1.0") and (request.header["http_version"] not in "HTTP/1.1"):
badRequestJson(conn, "Please use proper http version")
elif request.header["method"] != "POST":
methodNotAllowedJson(conn, "Method is not allowed, please use POST method")
elif request.header["content_type"] != "application/json":
methodNotAllowedJson(conn, "please use application/json")
else:
func(conn, request)
return func_wrapper
@validateHelloAPI
def helloAPI(conn, request):
req = requests.get(url='http://172.22.0.222:5000')
data = req.json()
current_visit = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
try:
name = request.body_json()[0]["request"]
count = getCounter() + 1
writeCounter(count)
res = "Good {}, {}".format(data["state"], name)
json_http_ok(conn, count=count, currentvisit=current_visit, response=res)
except KeyError:
badRequestJson(conn, "'request' is a required property")
@validation
def plusOneAPI(conn, request):
val = int(request.header["path"].split("/")[-1])
json_http_ok(conn, plusoneret=val+1)
def getTime(t_raw):
t = datetime.datetime.strptime(t_raw, "%Y-%m-%d %H:%M:%S")
return t.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
def getCounter():
with open('counter.json', 'r') as json_file:
data = json.load(json_file)
return data["count"]
def writeCounter(c):
count = {"count": c}
with open('counter.json', 'w') as json_file:
data = json.dump(count, json_file)
def getApiVersion():
with open('./spesifikasi.yaml', 'r') as f:
doc = yaml.load(f)
return doc["info"]["version"]
def notFoundJson(conn):
detail = "The requested URL was not found on the server. If you entered the URL manually please check your spelling and try again."
status = "404"
title = "Not Found"
json_http_error(conn, detail, status, title)
def methodNotAllowedJson(conn, d):
detail = d
status = "405"
title = "Method Not Allowed"
json_http_error(conn, detail, status, title)
def badRequestJson(conn, d):
detail = d
status = "400"
title = "Bad Request"
json_http_error(conn, detail, status, title)
def json_http_ok(conn, **kwargs):
res_dict = {'apiversion': getApiVersion()}
for key, value in kwargs.items():
res_dict[key] = value
data = json.dumps(res_dict)
# Build Response
status = "200 OK"
c_type = "application/json; charset=UTF-8"
msgErr = renderMessage(status, str(len(data)), None, None, c_type, data)
writeResponse(conn, msgErr)
def json_http_error(conn, detail, status, title):
res_dict = {'detail': detail, 'status': status, 'title': title}
data = json.dumps(res_dict)
status = "{} {}".format(status, title)
c_type = "application/json; charset=UTF-8"
msgErr = renderMessage(status, str(len(data)), None, None, c_type, data)
writeResponse(conn, msgErr)
def main():
# HOST = socket.gethostbyname(socket.gethostname())
HOST = "0.0.0.0"
PORT = int(sys.argv[1])
#Get method
route.route("GET", "/", getRoot)
route.route("GET", "/hello-world", getHelloWorld)
route.route("GET", "/style", getStyle)
route.route("GET", "/background", getBackground)
route.route("GET", "/info", getInfo)
route.route("GET", "/api/hello", helloAPI)
route.route("GET", "/api/plusone/<:digit>", plusOneAPI)
route.route("GET", "/api/spesifikasi.yaml", getSpesifikasi)
#Post Method
route.route("POST", "/api/hello", helloAPI)
route.route("POST", "/hello-world", postHelloWorld)
# PUT
route.route("PUT", "/api/hello", helloAPI)
#PATCH
route.route("PATCH", "/api/hello", helloAPI)
#DELETE
route.route("DELETE", "/api/hello", helloAPI)
#HEAD
route.route("HEAD", "/api/hello", helloAPI)
# Serve the connection
connect(HOST, PORT)
def handler(conn, req):
try:
debugger = "=== Got Request ===\n{}\n===Got Header====\n{}\n".format(req._raw_request, req.header)
print(debugger)
route.dispatch(cleanURL(req.header["path"]), req.header["method"])(conn, req)
except TypeError as e:
print(traceback.format_exc())
if route.findPath(cleanURL(req.header["path"])):
notImplemented(conn, req)
return
notFound(conn, req)
return
def cleanURL(url):
return url.split("?")[0]
def writeResponse(conn, message):
debugger = "=== Got Message ===\n{}\n".format(message)
print(debugger)
conn.sendall(message)
def renderMessage(stat, c_length, location, encoding, c_type, data):
msg = ""
if stat != None:
status = "HTTP/1.1 {}\r\n".format(stat)
msg = msg + status
msg = msg + "Connection: close\r\n"
if c_length != None:
content_length = "Content-Length: {}\r\n".format(c_length)
msg = msg + content_length
if location != None:
loc = "Location: {}\r\n".format(location)
msg = msg + loc
if encoding != None:
enc = "Content-Transfer-Encoding: {}\r\n".format(encoding)
msg = msg + enc
if c_type != None:
content_type = "Content-Type: {}\r\n".format(c_type)
msg = msg + content_type
if data != None:
msg = msg + "\r\n" + data
return bytes(msg, "utf-8")
def exctractUrl(url, query):
return parse_qs(url)[query][0]
def connect(host, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host, port))
s.listen()
while True:
try:
conn, addr = s.accept()
data = conn.recv(1024)
req = HTTPRequest(data)
handler(conn, req)
conn.shutdown(socket.SHUT_WR)
conn.close()
except Exception:
print(traceback.format_exc())
continue
main() | 31.060759 | 136 | 0.605102 | 1,895 | 0.154454 | 0 | 0 | 3,666 | 0.298802 | 0 | 0 | 2,561 | 0.208737 |
a5d51824f01f43c1a1f7165def53773a506fe72b | 656 | py | Python | glia/widgets/editor/tabs.py | gliahq/Glia | 4951569f2759ea886bad165b6d74a569c14bbd2a | [
"Apache-2.0"
]
| 1 | 2020-08-20T08:22:33.000Z | 2020-08-20T08:22:33.000Z | glia/widgets/editor/tabs.py | gliahq/Glia | 4951569f2759ea886bad165b6d74a569c14bbd2a | [
"Apache-2.0"
]
| 3 | 2021-04-20T18:20:45.000Z | 2021-06-01T23:56:13.000Z | glia/widgets/editor/tabs.py | gliahq/Glia | 4951569f2759ea886bad165b6d74a569c14bbd2a | [
"Apache-2.0"
]
| null | null | null | from PyQt5.QtWidgets import QTabWidget
from glia.widgets.editor import Editor
class EditorTabs(QTabWidget):
def __init__(self, parent=None):
"""
Generates tab with editor depending on the file and it's type
selected.
"""
super(EditorTabs, self).__init__(parent)
# Tab Properties
self.setTabsClosable(True)
self.setMovable(True)
# Default Editor
self.editor = Editor(self)
self.addTab(self.editor, "main.py")
# Slots
self.tabCloseRequested.connect(self.handle_tab_closed)
def handle_tab_closed(self, index):
self.removeTab(index)
| 24.296296 | 69 | 0.643293 | 574 | 0.875 | 0 | 0 | 0 | 0 | 0 | 0 | 151 | 0.230183 |
a5d8c45707967ee83846553d1837407bb63fcb57 | 2,296 | py | Python | tests/test_aws_tdd.py | Fauxsys/offprem | 9fe4764b24578b1ada43cfab600379b55fed038d | [
"MIT"
]
| null | null | null | tests/test_aws_tdd.py | Fauxsys/offprem | 9fe4764b24578b1ada43cfab600379b55fed038d | [
"MIT"
]
| null | null | null | tests/test_aws_tdd.py | Fauxsys/offprem | 9fe4764b24578b1ada43cfab600379b55fed038d | [
"MIT"
]
| null | null | null | """ Test Driven Development. """
import pytest
@pytest.fixture
def profiles() -> dict:
production = 'production'
staging = 'staging'
qa = 'qa'
return {'production': production, 'staging': staging, 'qa': qa}
@pytest.mark.skip(reason='Method not yet implemented.')
def test_add_profile(environment, profiles):
production = profiles['production']
environment.add_profile(production)
assert environment.profile == [production]
@pytest.mark.skip(reason='Method not yet implemented.')
def test_add_profiles(environment, profiles):
add_profiles = list(profiles.values())
environment.add_profiles(add_profiles)
assert environment.profile == add_profiles
@pytest.mark.skip(reason='Method not yet implemented.')
def test_primary_profile(environment, profiles):
add_profiles = list(profiles.values())
environment.add_profiles(add_profiles)
assert environment.primary_profile == add_profiles[0]
@pytest.mark.skip(reason='Method not yet implemented.')
def test_no_primary_profile(environment):
assert environment.primary_profile is None
@pytest.mark.skip(reason='Method not yet implemented.')
def test_no_primary_profile(environment):
with pytest.raises(IndexError) as error:
environment.primary_profile
assert 'list index out of range' == f'{error.value!s}'
@pytest.mark.skip(reason='Method not yet implemented.')
def test_compare_environments_with_configuration_file(api, environments_file):
api = api(configuration_file=configure_vpc)
api.compare('vpc name 1', 'vpc name 2', 'vpc name 3')
@pytest.mark.skip(reason='Method not yet implemented.')
def test_compare_environments_with_Environment_objects(api, environment):
# WHEN I compare multiple environments
api.compare(environment, environment, environment)
# THEN
@pytest.mark.skip(reason='Method not yet implemented.')
def test_firewall_rules(api, environment):
api.security_groups(environment=environment)
@pytest.mark.skip(reason='Method not yet implemented.')
def test_inventory_output(api, environment):
api.inventory(environment=environment, detailed=False)
@pytest.mark.skip(reason='Method not yet implemented.')
def test_inventory_detailed_output(api, environment):
api.inventory(environment=environment, detailed=True)
| 31.888889 | 78 | 0.761324 | 0 | 0 | 0 | 0 | 2,216 | 0.965157 | 0 | 0 | 507 | 0.220819 |
a5d993daf62319705e260124c70d45da91cc0c68 | 1,743 | py | Python | CursoPython/dia 2/copia de clase.py | hamfree/PYTHON | 2df83c1da393f05cadf0fe3f8d3173d4420eda60 | [
"Apache-2.0"
]
| null | null | null | CursoPython/dia 2/copia de clase.py | hamfree/PYTHON | 2df83c1da393f05cadf0fe3f8d3173d4420eda60 | [
"Apache-2.0"
]
| null | null | null | CursoPython/dia 2/copia de clase.py | hamfree/PYTHON | 2df83c1da393f05cadf0fe3f8d3173d4420eda60 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/python
# -*- coding: UTF-8 -*-
# pylint: disable=E1601
"""
17/02/2018
Profesores: Álvaro Sánchez de Cruz / ???
- Se hace un repaso de lo que se impartió el día anterior.
- Subprogramas o funciones:
- Viene de la programación estructurada.
- Sintaxis:
def nombreFuncion([parametro1,][parametro2,]...)
(codigo de la funcion)
[return valor]
Los valores se pasan siempre por valor a las funciones.
Ahora vamos a ver un ejemplo:
def saludo()
print "Hola Mundo"
- Se explican los ejercicios propuestos del día 1.
- Ficheros
var = open('ruta al fichero','a'|'r'|'w')
- JSON
Es un formato de datos originario de JavaScript, que hablando en terminologia de python es una
lista de diccionarios, de la siguiente forma:
[
{},{},...
]
En python los JSON se pueden pasar a un diccionario muy facilmente.
"""
# modo 'r' read, que lee el fichero si existe y si no dara un error
def leerFichero():
f = open('clase.py','r')
print "Contenido de 'clase.py'"
print "-----------------------"
for l in f:
print l
# modo 'a' append, que añade al fichero si existe y si no lo crea
def sobreescribirFichero():
f = open('prueba.txt','a')
for i in [1,3,5,7,9,21,12,32,23]:
f.write('Hola' + str(i) + '\n')
f.close()
# modo 'w' write, que escribe en el fichero si existe, sobreescribiendo lo que tuviera, y si no lo crea.
def leerEscribirFichero():
fr = open('clase.py','r')
fw = open('copia de clase.py','w')
for l in fr:
fw.write(l)
fr.close()
fw.close()
leerFichero()
sobreescribirFichero()
leerEscribirFichero()
| 24.549296 | 104 | 0.596672 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,332 | 0.761143 |
a5d9baaf2337daeafdfe9b9a22db73d38a684f6f | 576 | py | Python | functions-and-keda/src/python-function-publisher/QueueTrigger/__init__.py | emctl/samples | 569f81035a6c214d4cda3687173e24003f17f95e | [
"MIT"
]
| 3 | 2021-11-16T11:24:27.000Z | 2021-11-21T17:11:24.000Z | functions-and-keda/src/python-function-publisher/QueueTrigger/__init__.py | emctl/samples | 569f81035a6c214d4cda3687173e24003f17f95e | [
"MIT"
]
| 7 | 2021-09-01T06:50:41.000Z | 2021-09-03T23:12:07.000Z | functions-and-keda/src/python-function-publisher/QueueTrigger/__init__.py | emctl/samples | 569f81035a6c214d4cda3687173e24003f17f95e | [
"MIT"
]
| 4 | 2021-02-05T17:30:28.000Z | 2021-08-16T21:26:55.000Z | import logging
import requests
import json
import azure.functions as func
dapr_url = "http://localhost:3500/v1.0"
def main(msg: func.QueueMessage):
logging.info(f"Python queue-triggered function received a message!")
message = msg.get_body().decode('utf-8')
logging.info(f"Message: {message}")
# Publish an event
url = f'{dapr_url}/publish/myTopic'
content = { "message": message }
logging.info(f'POST to {url} with content {json.dumps(content)}')
p = requests.post(url, json=content)
logging.info(f'Got response code {p.status_code}')
| 28.8 | 72 | 0.697917 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 253 | 0.439236 |
a5da4714ac6a7f9235bc1e8123d0bcfaf76ea57c | 260 | py | Python | CH_10_testing_and_logging/T_26_logging_json_config.py | mastering-python/code_2 | 441af8b67402c8216c482cca7c002e1d7f0f1baa | [
"MIT"
]
| null | null | null | CH_10_testing_and_logging/T_26_logging_json_config.py | mastering-python/code_2 | 441af8b67402c8216c482cca7c002e1d7f0f1baa | [
"MIT"
]
| null | null | null | CH_10_testing_and_logging/T_26_logging_json_config.py | mastering-python/code_2 | 441af8b67402c8216c482cca7c002e1d7f0f1baa | [
"MIT"
]
| null | null | null | import os
import json
from logging import config
name = os.path.splitext(__file__)[0]
json_filename = os.path.join(os.path.dirname(__file__),
f'{name}.json')
with open(json_filename) as fh:
config.dictConfig(json.load(fh))
| 20 | 55 | 0.661538 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 0.053846 |
a5dac3d6ca2b3d760f8736d068bcd1c838b5581c | 2,618 | py | Python | tests/unit/test_upstream_dataset.py | ianbakst/tamr-client | ae7a6190a2251d31f973f9d5a7170ac5dc097f97 | [
"Apache-2.0"
]
| 9 | 2019-08-13T11:07:06.000Z | 2022-01-14T18:15:13.000Z | tests/unit/test_upstream_dataset.py | ianbakst/tamr-client | ae7a6190a2251d31f973f9d5a7170ac5dc097f97 | [
"Apache-2.0"
]
| 166 | 2019-08-09T18:51:05.000Z | 2021-12-02T15:24:15.000Z | tests/unit/test_upstream_dataset.py | ianbakst/tamr-client | ae7a6190a2251d31f973f9d5a7170ac5dc097f97 | [
"Apache-2.0"
]
| 21 | 2019-08-12T15:37:31.000Z | 2021-06-15T14:06:23.000Z | import responses
from tamr_unify_client import Client
from tamr_unify_client.auth import UsernamePasswordAuth
@responses.activate
def test_upstream_dataset():
dataset_json = {
"id": "unify://unified-data/v1/datasets/12",
"name": "Project_1_unified_dataset_dedup_features",
"description": "Features for all the rows and values in the source dataset. Used in Dedup Workflow.",
"version": "543",
"keyAttributeNames": ["entityId"],
"tags": [],
"created": {
"username": "admin",
"time": "2019-06-05T18:31:59.327Z",
"version": "212",
},
"lastModified": {
"username": "admin",
"time": "2019-07-18T14:19:28.133Z",
"version": "22225",
},
"relativeId": "datasets/12",
"upstreamDatasetIds": ["unify://unified-data/v1/datasets/8"],
"externalId": "Project_1_unified_dataset_dedup_features",
}
upstream_json = ["unify://unified-data/v1/datasets/8"]
upstream_ds_json = {
"id": "unify://unified-data/v1/datasets/8",
"name": "Project_1_unified_dataset",
"description": "",
"version": "529",
"keyAttributeNames": ["tamr_id"],
"tags": [],
"created": {
"username": "admin",
"time": "2019-06-05T16:28:11.639Z",
"version": "83",
},
"lastModified": {
"username": "admin",
"time": "2019-07-22T20:31:23.968Z",
"version": "23146",
},
"relativeId": "datasets/8",
"upstreamDatasetIds": ["unify://unified-data/v1/datasets/6"],
"externalId": "Project_1_unified_dataset",
"resourceId": "8",
}
tamr = Client(UsernamePasswordAuth("username", "password"))
url_prefix = "http://localhost:9100/api/versioned/v1/"
dataset_url = url_prefix + "datasets/12"
upstream_url = url_prefix + "datasets/12/upstreamDatasets"
upstream_ds_url = url_prefix + "datasets/8"
responses.add(responses.GET, dataset_url, json=dataset_json)
responses.add(responses.GET, upstream_url, json=upstream_json)
responses.add(responses.GET, upstream_ds_url, json=upstream_ds_json)
project_ds = tamr.datasets.by_relative_id("datasets/12")
actual_upstream_ds = project_ds.upstream_datasets()
uri_dataset = actual_upstream_ds[0].dataset()
assert actual_upstream_ds[0].relative_id == upstream_ds_json["relativeId"]
assert actual_upstream_ds[0].resource_id == upstream_ds_json["resourceId"]
assert uri_dataset.name == upstream_ds_json["name"]
| 34.906667 | 109 | 0.615737 | 0 | 0 | 0 | 0 | 2,504 | 0.956455 | 0 | 0 | 1,137 | 0.434301 |
a5daeaca530d32aa4078eb1a40a959857dd7e442 | 14,531 | py | Python | pmaf/sequence/_multiple/_multiple.py | mmtechslv/PhyloMAF | bab43dd4a4d2812951b1fdf4f1abb83edb79ea88 | [
"BSD-3-Clause"
]
| 1 | 2021-07-02T06:24:17.000Z | 2021-07-02T06:24:17.000Z | pmaf/sequence/_multiple/_multiple.py | mmtechslv/PhyloMAF | bab43dd4a4d2812951b1fdf4f1abb83edb79ea88 | [
"BSD-3-Clause"
]
| 1 | 2021-06-28T12:02:46.000Z | 2021-06-28T12:02:46.000Z | pmaf/sequence/_multiple/_multiple.py | mmtechslv/PhyloMAF | bab43dd4a4d2812951b1fdf4f1abb83edb79ea88 | [
"BSD-3-Clause"
]
| null | null | null | import warnings
warnings.simplefilter("ignore", category=FutureWarning)
from skbio import TabularMSA
from skbio.sequence import GrammaredSequence
from io import StringIO, IOBase
from shutil import copyfileobj
import copy
import numpy as np
from pmaf.internal.io._seq import SequenceIO
from pmaf.sequence._sequence._nucleotide import Nucleotide
from pmaf.sequence._metakit import MultiSequenceMetabase, NucleotideMetabase
from pmaf.sequence._shared import validate_seq_mode
from typing import Union, Optional, Any, Sequence, Generator
from pmaf.internal._typing import AnyGenericIdentifier
class MultiSequence(MultiSequenceMetabase):
"""Class responsible for handling multiple sequences."""
def __init__(
self,
sequences: Any,
name: Optional[str] = None,
mode: Optional[str] = None,
metadata: Optional[dict] = None,
aligned: bool = False,
**kwargs: Any
):
"""Constructor for :class:`.MultiSequence`
Parameters
----------
sequences
Anything that can be parsed as multiple sequences.
name
Name of the multi-sequence instance
mode
Mode of the sequences. All sequences must have same mode/type.
Otherwise error will be raised
metadata
Metadata of the multi-sequence instance
aligned
True if sequences are aligned. Default is False
kwargs
Compatibility
"""
if name is None or np.isscalar(name):
tmp_name = name
else:
raise TypeError("`name` can be any scalar")
if isinstance(metadata, dict):
tmp_metadata = metadata
elif metadata is None:
tmp_metadata = {}
else:
raise TypeError("`metadata` can be dict or None")
if mode is not None:
if validate_seq_mode(mode):
tmp_mode = mode.lower()
else:
raise ValueError("`mode` is invalid.")
else:
tmp_mode = mode
tmp_sequences = []
if isinstance(sequences, list):
if all(
[isinstance(sequence, NucleotideMetabase) for sequence in sequences]
):
tmp_sequences = sequences
elif all(
[isinstance(sequence, GrammaredSequence) for sequence in sequences]
):
tmp_sequences = [
Nucleotide(skbio_seq, mode=None, **kwargs)
for skbio_seq in sequences
]
else:
raise ValueError(
"`sequences` must have same type when provided as list."
)
else:
if tmp_mode is not None:
seq_gen = SequenceIO(sequences, upper=True).pull_parser(
parser="simple", id=True, description=True, sequence=True
)
for sid, desc, seq_str in seq_gen:
tmp_sequences.append(
Nucleotide(
seq_str,
name=sid,
mode=tmp_mode,
metadata={"description": desc},
**kwargs
)
)
else:
raise ValueError("`mode` cannot be None if raw read is performed.")
if aligned:
if len(set([sequence.length for sequence in tmp_sequences])) != 1:
raise ValueError("`sequences` must be all of the length if aligned.")
tmp_indices = [sequence.name for sequence in tmp_sequences]
if len(tmp_indices) != len(set(tmp_indices)):
raise ValueError("`sequences` must have unique names.")
tmp_modes = set([sequence.mode for sequence in tmp_sequences])
if len(tmp_modes) > 1:
raise ValueError("`sequences` cannot have different modes.")
if tmp_mode is not None:
if tmp_mode not in tmp_modes:
raise ValueError("`mode` must match modes of sequences.")
else:
tmp_mode = tmp_modes.pop()
tmp_internal_id = kwargs.get("internal_id", None)
if tmp_internal_id is not None:
for sequence in tmp_sequences:
if tmp_internal_id not in sequence.metadata.keys():
raise ValueError(
"Metadata of all sequences must contain same internal_id."
)
self.__indices = np.asarray([seq.name for seq in tmp_sequences])
self.__sequences = tmp_sequences
self.__metadata = tmp_metadata
self.__aligned = bool(aligned)
self.__internal_id = tmp_internal_id
self.__skbio_mode = tmp_sequences[0].skbio_mode
self.__mode = tmp_mode
self.__name = tmp_name
self.__buckled = bool(kwargs.get("buckled", None))
def __repr__(self):
class_name = self.__class__.__name__
name = self.__name if self.__name is not None else "N/A"
count = len(self.__sequences)
metadata_state = "Present" if len(self.__metadata) > 0 else "N/A"
aligned = "Yes" if self.__aligned else "No"
mode = self.__mode.upper() if self.__mode is not None else "N/A"
repr_str = (
"<{}:[{}], Name:[{}], Mode:[{}], Aligned: [{}], Metadata:[{}]>".format(
class_name, count, name, mode, aligned, metadata_state
)
)
return repr_str
def to_skbio_msa(
self, indices: Optional[AnyGenericIdentifier] = None
) -> TabularMSA:
"""Convert to :mod:`skbio` :class:`~skbio.alignment.TabularMSA`
instance.
Parameters
----------
indices
List of target sequences to select. Default is None for all sequences.
Returns
-------
Instance of :class:`skbio.alignment.TabularMSA`
"""
if self.__aligned:
tmp_sequences = self.__get_seqs_by_index(indices)
return TabularMSA([sequence.skbio for sequence in tmp_sequences])
else:
raise RuntimeError("TabularMSA can only be retrieved for alignment.")
def __get_seqs_by_index(self, ids: Optional[AnyGenericIdentifier]):
"""Get sequences by indices/ids."""
if ids is not None:
target_ids = np.asarray(ids)
else:
target_ids = self.__indices
if np.isin(self.__indices, target_ids).sum() == len(target_ids):
return [seq for seq in self.__sequences if seq.name in target_ids]
else:
raise ValueError("Invalid indices are provided.")
def get_consensus(
self, indices: Optional[AnyGenericIdentifier] = None
) -> Nucleotide:
"""If sequence are aligned, estimate consensus sequence from the
:term:`MSA`
Parameters
----------
indices
List of target sequences to select. Default is None for all sequences.
Returns
-------
Consensus sequence.
"""
if self.__aligned:
tmp_msa = self.to_skbio_msa(indices)
return Nucleotide(
tmp_msa.consensus(),
name=self.__name,
metadata=self.__metadata,
mode=self.__mode,
)
else:
raise RuntimeError("Consensus can be retrieved only from alignment.")
def get_subset(
self, indices: Optional[AnyGenericIdentifier] = None
) -> "MultiSequence":
"""Get subset of the mutli-sequence instance.
Parameters
----------
indices
Indices to subset for.
Returns
-------
Subset instance of :class:`.MultiSequence`
"""
return type(self)(
self.__get_seqs_by_index(indices),
name=self.__name,
metadata=self.__metadata,
mode=self.__mode,
aligned=self.__aligned,
)
def buckle_for_alignment(self) -> dict:
"""Buckle individual sequences for alignment.
Returns
------
Packed metadata of all sequences.
"""
if not self.__buckled:
from collections import defaultdict
from random import random
if self.__internal_id is None:
self.__internal_id = round(random() * 100000, None)
packed_metadata = {
"master-metadata": self.__metadata,
"__name": self.__name,
"__internal_id": self.__internal_id,
}
children_metadata = defaultdict(dict)
for tmp_uid, sequence in enumerate(self.__sequences):
tmp_uid_str = "TMP_ID_{}".format(str(tmp_uid))
children_metadata[tmp_uid_str] = sequence.buckle_by_uid(tmp_uid_str)
packed_metadata.update({"children-metadata": dict(children_metadata)})
self.__buckled = True
return packed_metadata
else:
raise RuntimeError("MultiSequence instance is already buckled.")
def restore_buckle(self, buckled_pack: dict) -> None:
"""Restore the buckled :class:`MultiSequence` instance.
Parameters
----------
buckled_pack
Backed up packed metadata of all individual sequences
Returns
-------
None if success or raise error
"""
if self.__buckled:
self.__metadata = buckled_pack["master-metadata"]
self.__name = buckled_pack["__name"]
self.__internal_id = buckled_pack["__internal_id"]
for sequence in self.__sequences:
tmp_uid = sequence.unbuckle_uid()
child_packed_metadata = buckled_pack["children-metadata"][tmp_uid]
sequence.restore_buckle(child_packed_metadata)
self.__indices = np.asarray([seq.name for seq in self.__sequences])
else:
raise RuntimeError("MultiSequence instance is not buckled.")
def get_iter(self, method: str = "asis") -> Generator:
"""Get generator for the idividual sequences.
Parameters
----------
method
Method indicate how generator must yield the sequence data
Returns
-------
Generator for the sequences.
Depending on `method` result can yield on of following:
- 'asis' - (name[str], sequence[Instance])
- 'string' - (name[str], sequence[str])
- 'skbio' - (name[str], sequence[skbio])
"""
def make_generator():
for sequence in self.__sequences:
if method == "asis":
yield sequence.name, sequence
elif method == "string":
yield sequence.name, sequence.text
elif method == "skbio":
yield sequence.name, sequence.skbio
else:
raise ValueError("`method` is invalid.")
return make_generator()
def copy(self):
"""Copy current instance."""
return copy.deepcopy(self)
def write(self, file: Union[str, IOBase], mode: str = "w", **kwargs: Any) -> None:
"""Write the sequence data into the file.
Parameters
----------
file
File path or IO stream to write into
mode
File write mode such as "w" or "a" or "w+"
kwargs
Compatibility.
"""
buffer_io = self.__make_fasta_io(**kwargs)
if isinstance(file, IOBase):
file_handle = file
elif isinstance(file, str):
file_handle = open(file, mode=mode)
else:
raise TypeError("`file` has invalid type.")
copyfileobj(buffer_io, file_handle)
buffer_io.close()
def get_string_as(self, **kwargs):
"""Get string of all sequences.
Parameters
----------
kwargs
Compatibility. Will be passed to :meth:`pmaf.sequence.Nucleotide.write` method.
Returns
-------
String with formatted sequence data
"""
buffer_io = self.__make_fasta_io(**kwargs)
ret = buffer_io.getvalue()
buffer_io.close()
return ret
def __make_fasta_io(self, **kwargs):
"""Make a FASTA file IO stream."""
buffer_io = StringIO()
for sequence in self.__sequences:
sequence.write(buffer_io, mode="a", **kwargs)
buffer_io.seek(0)
return buffer_io
@classmethod
def from_buckled(
cls, sequences: Any, buckled_pack: dict, **kwargs: Any
) -> "MultiSequence":
"""Factory method to create :class:`.MultiSequence` using packed
metadata from buckling.
Parameters
----------
sequences
Sequences that will be passed to constructor
buckled_pack
Packed metadata produced during buckling
kwargs
Compatibility
Returns
-------
New instance of :class:`.MultiSequence`
"""
if not isinstance(buckled_pack, dict):
raise TypeError("`buckled_pack` must have dict type.")
tmp_multiseq = cls(sequences, buckled=True, **kwargs)
tmp_multiseq.restore_buckle(buckled_pack)
return tmp_multiseq
@property
def count(self):
"""Total number of sequences."""
return len(self.__sequences)
@property
def metadata(self):
"""Instance metadata."""
return self.__metadata
@property
def mode(self):
"""Mode/type of the sequences."""
return self.__mode
@property
def skbio_mode(self):
"""The :mod:`skbio` mode of the sequence."""
return self.__skbio_mode
@property
def sequences(self):
"""List of individual sequence instances."""
return self.__sequences
@property
def name(self):
"""Name of the instance."""
return self.__name
@property
def is_alignment(self):
"""Is mutli-sequence is aligned or not."""
return self.__aligned
@property
def is_buckled(self):
"""Is mulit-sequence instance is buckled or not."""
return self.__buckled
@property
def index(self):
"""Indices of the internals sequences."""
return self.__indices
| 33.871795 | 91 | 0.566719 | 13,938 | 0.959191 | 1,019 | 0.070126 | 1,797 | 0.123667 | 0 | 0 | 4,793 | 0.329847 |
a5db8882e50338e2cfe3830ff393ba99f5232ba1 | 1,498 | py | Python | arvore_derivacao.py | rjribeiro/trabalho-formais | 358de668cc256c696fdc4b426a69cf5a3d17b511 | [
"MIT"
]
| 3 | 2018-04-28T15:55:50.000Z | 2018-05-11T22:57:20.000Z | arvore_derivacao.py | rjribeiro/trabalho-formais | 358de668cc256c696fdc4b426a69cf5a3d17b511 | [
"MIT"
]
| null | null | null | arvore_derivacao.py | rjribeiro/trabalho-formais | 358de668cc256c696fdc4b426a69cf5a3d17b511 | [
"MIT"
]
| null | null | null |
class ArvoreDerivacao:
def __init__(self, conteudo, esquerda=None, direita=None):
self._conteudo = conteudo
self._esquerda = esquerda
self._direita = direita
self.children = [self._esquerda, self._direita]
@property
def conteudo(self):
return self._conteudo
def print_arvore(self, nivel=1):
"""
Objetivo: imprimir toda a árvore, cuja raíz tem o nivel fornecido.
:param nivel:
:type nivel: int
:rtype: None
"""
print("Nível {espacos}: {:>{espacos}}".format(self._conteudo, espacos=nivel))
if self._direita:
self._direita.print_arvore(nivel + 1)
if self._esquerda:
self._esquerda.print_arvore(nivel + 1)
def palavra_gerada(self):
"""
Objetivo: Obter a palavra gerada pela árvore de derivação.
:return: Palavra derivada.
:rtype: str
"""
if not self._esquerda and not self._direita:
return self._conteudo
if self._esquerda:
prefixo = self._esquerda.palavra_gerada()
else:
prefixo = ""
if self._direita:
sufixo = self._direita.palavra_gerada()
else:
sufixo = ""
return prefixo + sufixo
if __name__ == '__main__':
a = ArvoreDerivacao('a')
b = ArvoreDerivacao('b')
A = ArvoreDerivacao('A', a)
B = ArvoreDerivacao('B', b)
S = ArvoreDerivacao('S', A, B)
S.print_arvore()
| 27.740741 | 85 | 0.579439 | 1,296 | 0.861702 | 0 | 0 | 63 | 0.041888 | 0 | 0 | 362 | 0.240691 |
a5dca4db049c83c9e0aaf82c2743e38347886e01 | 1,404 | py | Python | src/test.py | biqar/hypergraph-study | 04b54117eb8f684a72259b27b03162efb4c18cd0 | [
"MIT"
]
| 2 | 2021-12-24T12:02:48.000Z | 2021-12-25T00:00:22.000Z | src/test.py | biqar/hypergraph-study | 04b54117eb8f684a72259b27b03162efb4c18cd0 | [
"MIT"
]
| null | null | null | src/test.py | biqar/hypergraph-study | 04b54117eb8f684a72259b27b03162efb4c18cd0 | [
"MIT"
]
| 1 | 2021-07-19T02:05:13.000Z | 2021-07-19T02:05:13.000Z | import re
import sys
from operator import add
from pyspark.sql import SparkSession
def computeContribs(urls, rank):
"""Calculates URL contributions to the rank of other URLs."""
num_urls = len(urls)
for url in urls:
yield (url, rank / num_urls)
def parseNeighbors(urls):
"""Parses a urls pair string into urls pair."""
parts = re.split(r'\s+', urls)
for i in range(len(parts)):
for j in range(i,len(parts)):
if i!=j:
yield parts[i],parts[j]
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: pagerank <file> <iterations>", file=sys.stderr)
sys.exit(-1)
print("WARN: This is a naive implementation of PageRank and is given as an example!\n" +
"Please refer to PageRank implementation provided by graphx",
file=sys.stderr)
# Initialize the spark context.
spark = SparkSession\
.builder\
.appName("PythonPageRank")\
.getOrCreate()
# Loads in input file. It should be in format of:
# URL neighbor URL
# URL neighbor URL
# URL neighbor URL
# ...
lines = spark.read.text(sys.argv[1]).rdd.map(lambda r: r[0])
print("ALL LINKS",lines.collect())
links = lines.flatMap(lambda urls: parseNeighbors(urls)).distinct().groupByKey().cache()
print("ALL LINKS",links.collect())
| 29.25 | 92 | 0.608262 | 0 | 0 | 424 | 0.301994 | 0 | 0 | 0 | 0 | 518 | 0.368946 |
a5df0a5e25ad5c8a611b093330f6ecc81a28362f | 1,312 | py | Python | wagtail_lightadmin/wagtail_hooks.py | leukeleu/wagtail_lightadmin | 6aa465e2673f4eb8865f7b4dc6cd2c7c41ed71a5 | [
"MIT"
]
| 4 | 2019-02-22T14:07:26.000Z | 2020-04-20T05:33:39.000Z | wagtail_lightadmin/wagtail_hooks.py | leukeleu/wagtail_lightadmin | 6aa465e2673f4eb8865f7b4dc6cd2c7c41ed71a5 | [
"MIT"
]
| 1 | 2019-05-18T08:04:32.000Z | 2019-05-20T13:39:14.000Z | wagtail_lightadmin/wagtail_hooks.py | leukeleu/wagtail_lightadmin | 6aa465e2673f4eb8865f7b4dc6cd2c7c41ed71a5 | [
"MIT"
]
| 2 | 2017-06-06T09:34:53.000Z | 2019-09-10T16:16:12.000Z | from __future__ import absolute_import, unicode_literals
from django.conf import settings
from django.templatetags.static import static
from django.utils.html import format_html
from django.utils.module_loading import import_string
from wagtail.core import hooks
@hooks.register('insert_editor_css')
def editor_css():
return format_html(
'<link rel="stylesheet" href="{}">',
static('css/admin_editor.css')
)
@hooks.register('insert_editor_js')
def editor_js():
return format_html(
"""
<script type="text/javascript" src="{0}"></script>
<script type="text/javascript" src="{1}"></script>
""",
static('js/wagtailadmin/admin_link_widget.js'),
static('wagtailadmin/js/page-chooser-modal.js'),
)
@hooks.register('insert_editor_js')
def editor_js_hallo():
"""
We need an extra JS file for Wagtail<1.12.x
"""
import wagtail
_, version, _, = wagtail.__version__.split('.')
if int(version) < 12:
# Use our custom hallo-bootstrap
js = static('js/wagtailadmin/lighter-hallo-bootstrap.js')
else:
js = static('wagtailadmin/js/hallo-bootstrap.js')
return format_html(
"""
<script type="text/javascript" src="{0}"></script>
""",
js
)
| 26.24 | 65 | 0.641768 | 0 | 0 | 0 | 0 | 1,038 | 0.791159 | 0 | 0 | 582 | 0.443598 |
a5df58684b3949214fa0f306fa78ff1bd3a232de | 3,333 | py | Python | examples/datamining/page_rank.py | pooya/disco | e03a337b3b20e191459c74a367b9e89e873f71ff | [
"BSD-3-Clause"
]
| 786 | 2015-01-01T12:35:40.000Z | 2022-03-19T04:39:22.000Z | examples/datamining/page_rank.py | pooya/disco | e03a337b3b20e191459c74a367b9e89e873f71ff | [
"BSD-3-Clause"
]
| 51 | 2015-01-19T20:07:01.000Z | 2019-10-19T21:03:06.000Z | examples/datamining/page_rank.py | pooya/disco | e03a337b3b20e191459c74a367b9e89e873f71ff | [
"BSD-3-Clause"
]
| 122 | 2015-01-05T18:16:03.000Z | 2021-07-10T12:35:22.000Z | # Copyright 2009-2010 Yelp
# Copyright 2013 David Marin
# Copyright 2014 Disco Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Iterative implementation of the PageRank algorithm:
This example has been ported from the mrjob project.
http://en.wikipedia.org/wiki/PageRank
The format of the input should be of the form:
node_id initial_score neighbor_1 weight_1 neighbor_2 weight_2 ...
For example, the following input is derieved from wikipedia:
$ cat input
0 1
1 1 2 1
2 1 1 1
3 1 0 0.5 1 0.5
4 1 1 0.33 3 0.33 5 0.33
5 1 1 0.5 4 0.5
6 1 1 0.5 4 0.5
7 1 1 0.5 4 0.5
8 1 1 0.5 4 0.5
9 1 4 1
10 1 4 1
$ cat input | ddfs chunk pages -
$ python page_rank.py --iterations 10 pages
The results are:
0 : 0.303085470793
1 : 3.32372143585
2 : 3.39335760361
3 : 0.360345571947
4 : 0.749335470793
5 : 0.360345571947
6 : 0.15
7 : 0.15
8 : 0.15
9 : 0.15
10 : 0.15
"""
from optparse import OptionParser
from disco.core import Job, result_iterator
from disco.worker.classic.worker import Params
from disco.worker.task_io import chain_reader
def send_score(line, params):
"""Mapper: send score from a single node to other nodes.
Input: ``node_id, node``
Output:
``node_id, ('n', node)`` OR
``node_id, ('s', score)``
"""
if not isinstance(line, str):
line = line[1]
fields = line.split()
node_id = int(fields[0])
score = float(fields[1])
yield node_id, ("n", " ".join(fields[2:]))
for i in range(2, len(fields), 2):
dest_id = int(fields[i])
weight = float(fields[i+1])
yield dest_id, ("s", score * weight)
def receive_score(iter, params):
from disco.util import kvgroup
d = params.damping_factor
for node_id, vals in kvgroup(sorted(iter)):
sum_v = 0
neighbors = None
for t, v in vals:
if t == "s":
sum_v += v
else:
neighbors = v
score = 1 - d + d * sum_v
yield node_id, str(node_id) + " " + str(score) + " " + neighbors
if __name__ == '__main__':
parser = OptionParser(usage='%prog [options] inputs')
parser.add_option('--iterations',
default=10,
help='Numbers of iteration')
parser.add_option('--damping-factor',
default=0.85,
help='probability a web surfer will continue clicking on links')
(options, input) = parser.parse_args()
results = input
params = Params(damping_factor=float(options.damping_factor))
for j in range(int(options.iterations)):
job = Job().run(input=results, map=send_score, map_reader = chain_reader, reduce=receive_score, params = params)
results = job.wait()
for _, node in result_iterator(results):
fields = node.split()
print fields[0], ":", fields[1]
| 26.452381 | 120 | 0.645965 | 0 | 0 | 970 | 0.291029 | 0 | 0 | 0 | 0 | 1,690 | 0.507051 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.