seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
27066073984
|
import datetime
def print_progress_bar(curr_time, start_time, stop_time, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = "\r"):
"""
Call in a loop to create terminal progress bar
@params:
curr_time - Required : current time (datetime.datetime)
start_time - Required : process start time (datetime.datetime)
stop_time - Required : end time (datetime.datetime)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
printEnd - Optional : end character (e.g. "\r", "\r\n") (Str)
Based on: https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console
"""
elapsed_time = curr_time - start_time
process_time = stop_time - start_time
percent = ("{0:." + str(decimals) + "f}").format(100 * (elapsed_time / process_time))
filledLength = int(length * elapsed_time // process_time)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = printEnd)
# Print New Line on Complete
if curr_time >= stop_time:
print()
|
ak5793/stopwatch
|
stopwatch.py
|
stopwatch.py
|
py
| 1,386 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28969370099
|
from django.urls import path
from apps.cafes.urls import CAFE_URL_KEYWORD
from apps.products import views
CATEGORY_LIST_URL_NAME = "category-list"
CATEGORY_DETAIL_URL_NAME = "category-detail"
CATEGORY_URL_KEYWORD = "category_id"
OPTION_GROUP_LIST_URL_NAME = "optiongroup-list"
OPTION_GROUP_DETAIL_URL_NAME = "optiongroup-detail"
OPTION_GROUP_URL_KEYWORD = "optiongroup_id"
PRODUCT_LIST_URL_NAME = "product-list"
PRODUCT_DETAIL_URL_NAME = "product-detail"
PRODUCT_URL_KEYWORD = "product_id"
urlpatterns = [
path(
f"<uuid:{CAFE_URL_KEYWORD}>/categories/",
views.CategoryAPIViewSet.as_view({"get": "list", "post": "create"}),
name=CATEGORY_LIST_URL_NAME,
),
path(
f"<uuid:{CAFE_URL_KEYWORD}>/categories/<int:{CATEGORY_URL_KEYWORD}>/",
views.CategoryAPIViewSet.as_view(
{"get": "retrieve", "put": "update", "delete": "destroy"}
),
name=CATEGORY_DETAIL_URL_NAME,
),
path(
f"<uuid:{CAFE_URL_KEYWORD}>/optiongroups/",
views.OptionGroupAPIViewSet.as_view({"get": "list", "post": "create"}),
name=OPTION_GROUP_LIST_URL_NAME,
),
path(
f"<uuid:{CAFE_URL_KEYWORD}>/optiongroups/<int:{OPTION_GROUP_URL_KEYWORD}>/",
views.OptionGroupAPIViewSet.as_view(
{"get": "retrieve", "put": "update", "delete": "destroy"}
),
),
path(
f"<uuid:{CAFE_URL_KEYWORD}>/products/",
views.ProductAPIViewSet.as_view({"get": "list", "post": "create"}),
name=PRODUCT_LIST_URL_NAME,
),
path(
f"<uuid:{CAFE_URL_KEYWORD}>/products/<int:{PRODUCT_URL_KEYWORD}>/",
views.ProductAPIViewSet.as_view(
{"get": "retrieve", "put": "update", "delete": "destroy"}
),
name=PRODUCT_DETAIL_URL_NAME,
),
]
|
TGoddessana/cafehere
|
apps/products/urls.py
|
urls.py
|
py
| 1,800 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41489732999
|
import numpy as np
from gym import spaces
import gym
import json
import pickle
class StateNormWrapper(gym.Wrapper):
"""
Normalize state value for environments.
"""
def __init__(self, env, file_name):
super(StateNormWrapper, self).__init__(env)
with open(file_name, "r") as read_file:
rl_confs = json.load(read_file) # hyperparameters for rl training
print(env.spec.id)
data_path_prefix = rl_confs["data_collect_confs"]["data_path"]+env.spec.id.split("-")[0].lower()+'/'
with open(data_path_prefix+'state_info.pkl', 'rb') as f:
self.state_stats=pickle.load(f)
def norm(self, s):
mean = self.state_stats['mean']
std = self.state_stats['std']
s = (s-mean)/std
return s
def step(self, a):
observation, reward, done, info = self.env.step(a)
return self.norm(observation), reward, done, info
def reset(self, **kwargs):
observation = self.env.reset(**kwargs)
return self.norm(observation)
def render(self, **kwargs):
pass
if __name__ == '__main__':
import matplotlib.pyplot as plt
# test
# EnvName = 'CartPole-v1'
EnvName = 'LunarLander-v2'
env = StateNormWrapper(gym.make(EnvName), file_name="rl_train.json")
for _ in range(10):
env.reset()
for _ in range(1000):
# env.render()
a = env.action_space.sample()
s, r, d, _ = env.step(a) # take a random action
if d:
break
print(s)
# print(s.shape)
env.close()
|
quantumiracle/Cascading-Decision-Tree
|
src/rl/env_wrapper.py
|
env_wrapper.py
|
py
| 1,617 |
python
|
en
|
code
| 32 |
github-code
|
6
|
39005364665
|
import numpy as np
import datetime
import math
def anagram(s1,s2):
s1=list(s1)
s2=list(s2)
if len(s1)==(len(s2)):
s1=set(s1)
s2=set(s2)
s3=set()
if s1^s2==s3:
print("Anagram")
else:
print("not an anagram")
else:
print("String are ****NOT*** Anagram")
def primerange(num):
newarr=[]
for num in range(0,num+1):
if num>1:
for i in range(2,num):
if num%i==0:
break
else:
#print(num,' ',sep=',',end='')
newarr.append(num)
print(newarr)
def primeanagram(num):
cnt=0
newarr=[]
new=[]
for num in range(0,num+1):
if num>1:
for i in range(2,num):
if num%i==0:
break
else:
#print(num,' ',sep=',',end='')
newarr.append(num)
print(newarr)
for i in range(0,len(newarr)):
for j in range(i+1,len(newarr)):
newarr[i]=str(newarr[i])
newarr[j]=str(newarr[j])
if len(newarr[i])== len(newarr[j]):
s1=set(newarr[i])
s2=set(newarr[j])
s3=set()
if s1^s2==s3:
print(" ******Anagram******")
cnt+=1
new.append(s1)
new.append(s2)
print("Anagram:-",new)
print("total count",cnt)
else:
print("not an anagram")
else:
print("String are ****NOT*** Anagram")
for i in range(0,len(newarr)):
newarr[i]=str(newarr[i])
newarr[j]=newarr[::-1]
if len(newarr[i])==len(newarr[j]):
if newarr[i]==newarr[j]:
print("palindrome")
cnt+=1
print(cnt)
else:
print()
else:
print()
def insertionsort(alist):
# alist=alist.split(" ")
for i in range(0,len(alist)):
print(len(alist))
current=alist[i]
while i>0 and alist[i-1]>current:
alist[i]=alist[i-1]
i=i-1
alist[i]=current
print (alist)
def bubblesort(alist):
# alist=alist.split(" ")
for i in range(1,len(alist)):
for j in range(i):
if alist[j]>alist[j+1]:
temp=alist[j]
alist[j]=alist[j+1]
alist[j+1]=temp
print(alist)
print(len(alist))
def convert(string):
li=list(string.split(" "))
return li
def binaryserach(alist,key,length):
start=0
end=length-1
mid=0
print(start,end)
while start<=end:
mid=end//2
if key == (alist[mid]):
print("\nEntered number is present at position",key,mid)
return -1
elif key<alist[mid]:
end=mid-1
elif key > alist[mid]:
start=mid +1
print("\n Element not found")
def dayofweek(m,d,y):
# m=int(input("Enter the month :"))
# d=int(input("Enter the date :"))
# y=int(input("Enter the year :"))
today=datetime.datetime(y,m,d)
Day=today.weekday()
print(Day)
yo=y-(14-m)/12
x=yo +(yo/4)-(yo/100)+(yo/400)
print(yo,x)
mo= m+12*((14-m)/12)-2
do=(d+x+(31*mo/12))%7
print(x,mo,do)
d1=math.floor(do)
print(d1)
if Day==0:
print("Monday")
elif Day ==1:
print("Tuesday")
elif Day ==2:
print("Wednesday")
elif Day ==3:
print("Thursday")
elif Day ==4:
print("Friday")
elif Day ==5:
print("Saturday")
else:
print("Sunday")
if d1==1:
print("Monday")
elif d1 ==2:
print("Tuesday")
elif d1 ==3:
print("Wednesday")
elif d1 ==4:
print("Thursday")
elif d1 ==5:
print("Friday")
elif d1 ==6 :
print("Saturday")
else:
print("Sunday")
def tempCon(c,f):
a=c*9/5 +32
print("Celsius to fahrenheit: ",a)
b = (f-32)*5/9
print("fahrenheit to Celsius: ",b)
def monpay(Y,R,P):
r=R/(12*100)
n=Y*12
p1=P*r
p2=math.pow(1/(1+r),n)
p3=1-p2
print("Enter the number of years in months :- ",n)
print("Enter the rate of interset ")
print("Payment to be paid monthly:",p1/p3)
print("Total amount to be paid back all together",(p1/p3)*n)
print(n,r)
print(p1,p2)
def dectobinary(n):
binaryarr=[0]*8
i=0
while n>0:
binaryarr[i]=n%2
n=int(n/2)
i+=1
for j in range(7,-1,-1):
print(binaryarr[j],end=" ")
return binaryarr
def swap(dec):
j=7
for i in range(3,-1,-1):
temp=dec[i]
dec[i]=dec[j]
dec[j]=temp
j-=1
print()
for j in range(7,-1,-1):
print(dec[j],end=" ")
def bintodec(binaryarr):
for i in range(0,len(binaryarr)):
if binaryarr[i]==1:
k=math.pow(2,i)
print(k)
elif binaryarr[i]==0:
print()
def mergesort(alist):
if len(alist)>1:
mid=len(alist)//2
lefthalf=alist[:mid]
righthalf=alist[mid:]
mergesort(lefthalf)
mergesort(righthalf)
print(mid)
print(lefthalf)
print(righthalf)
for i in range(1,len(lefthalf)):
for j in range(i):
if lefthalf[j]> lefthalf[j+1]:
temp=lefthalf[j]
lefthalf[j]=lefthalf[j+1]
lefthalf[j+1]=temp
i+=1
print(lefthalf)
for i in range(1,len(righthalf)):
for j in range(i):
if righthalf[j] > righthalf[j+1]:
temp=righthalf[j]
righthalf[j]=righthalf[j+1]
righthalf[j+1]=temp
print(righthalf)
for i in range(1,len(alist)):
for j in range(0,i):
if alist[j]>alist[j+1]:
temp=alist[j]
alist[j]=alist[j+1]
alist[j+1]=temp
print(alist)
def vendmac(notes):
print("Amount Enterds into vebding machine",notes)
no=[]
n1=[1000,500,200,100,50,20,10,5,2,1]
i=-1
while notes>=0:
if i<len(n1)-1:
i+=1
if notes>= n1[i]:
notes=notes-n1[i]
print(n1[i])
i=-1
|
Rohan2596/Python_1_moth
|
Python_1_Month/Algorithms_programs/AlogoUtility.py
|
AlogoUtility.py
|
py
| 4,962 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26629807154
|
import random
nomes = ["nome1","nome2","nome3","nome4","nome5","nome6","nome7","nome8","nome9","nome10","nome11","nome12","nome13","nome14","nome15"]
qtd_times = 3
random.shuffle(nomes)
separar_times = [nomes[i::qtd_times] for i in range(qtd_times)]
times = list(separar_times)
indice = 1
for time in times:
print(f" Time {indice}: {time}")
indice +=1
|
flaviofontes29/sorteio_divisao_times
|
Escolha_time.py
|
Escolha_time.py
|
py
| 361 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
41380069765
|
from functools import wraps
import time
from utils.mics import colorstr
def fun_run_time(func):
'''
装饰器,用于获取函数的执行时间
放在函数前,如
@fun_run_time()
def xxx():
'''
@wraps(func)#可删去,是用来显示原始函数名的
def _inner(*args, **kwargs):
s_time = time.time()
ret = func(*args, **kwargs)
e_time = time.time()
#
print(colorstr("\t----function [{}] costs {} s".format(func.__name__, e_time-s_time), 'yellow'))
return ret
return _inner
def tic():
'''
开始计时。
t = tic()
'''
s_time = time.time()
return s_time
def toc(s_time, word='tic-toc', act_number = 1, mute=True):
'''
结束计时,返回毫秒数。
t = toc(t, '模块函数名', '处理次数', True)\n
mute代表不打印。
'''
e_time = time.time()
temp = int((e_time-s_time)*1000)
if not mute:
if act_number > 1:
print(colorstr(f"\t----module [{word}] costs {temp} ms, for {act_number} actions, ({int(temp/act_number)}ms/action)", 'yellow'))
else:
print(colorstr(f"\t----module [{word}] costs {temp} ms", 'yellow'))
return temp
|
Backlory/motionDetection
|
utils/timers.py
|
timers.py
|
py
| 1,229 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10422312368
|
import math
def main():
times = int(input())
local_best_length = 0.0000000000
best_length = 0.0000000000
for i in range(times): # test cases
conjunt = int(input())
for j in range(conjunt): # number of conjunts
robocopies = int(input())
list_points = []
for k in range(robocopies): # number of robocopies
line = input()
list_input = line.split()
x = float(list_input[0])
y = float(list_input[1])
list_points.append((x, y))
for p in range(len(list_points)):
for p1 in range(p + 1, len(list_points)):
d = calc_distance_even(list_points[p], list_points[p1])
if d > 0:
if d < local_best_length or local_best_length == 0.0:
local_best_length = d
if local_best_length > best_length or best_length == 0.0000000000:
best_length = local_best_length
local_best_length = 0.0000000000
print('{0:.10f}'.format(best_length))
def calc_distance_odd(p1, p2):
if p1[0] == p2[0]:
return p1[1] - p2[1]
if p1[1] == p2[1]:
return p1[0] - p2[0]
d = math.sqrt(math.pow((p2[0] - p1[0]), 2) + math.pow((p2[1] - p1[1]), 2))
return d
def calc_distance_even(p1, p2):
if p1[0] == p2[0]:
return p1[1] - p2[1]
elif p1[1] == p2[1]:
return p1[0] - p2[0]
else:
return 0
main()
|
epaes90/uri-problems
|
1625.py
|
1625.py
|
py
| 1,540 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21764619292
|
# Approach 1: Backtracking with Trie
class Solution:
def findWords(self, board: List[List[str]], words: List[str]) -> List[str]:
WORD_KEY = '$'
trie = {}
for word in words:
node = trie
for letter in word:
node = node.setdefault(letter, {})
node[WORD_KEY] = word
rowNum = len(board)
colNum = len(board[0])
matchedWords = []
def backtrack(row, col, parent):
letter = board[row][col]
currNode = parent[letter]
# check if we find a match
word_match = currNode.pop(WORD_KEY, False)
if word_match:
matchedWords.append(word_match)
# mark cell as visited
board[row][col] = '#'
# explore neighbors in 4 directions
for rowOffset, colOffset in [(-1, 0), (0, 1), (1, 0), (0, -1)]:
newRow, newCol = row + rowOffset, col + colOffset
if newRow < 0 or newRow >= rowNum or newCol < 0 or newCol >= colNum:
continue
if not board[newRow][newCol] in currNode:
continue
backtrack(newRow, newCol, currNode)
# end of exploration; restore the cell
board[row][col] = letter
# Optimization: incrementally remove the matched leaf node in Trie
if not currNode:
parent.pop(letter)
for row in range(rowNum):
for col in range(colNum):
if board[row][col] in trie:
backtrack(row, col, trie)
return matchedWords
|
jimit105/leetcode-submissions
|
problems/word_search_ii/solution.py
|
solution.py
|
py
| 1,831 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1008765012
|
'''Problem 37: Truncatable primes'''
#g = open('primelist.txt','r')
g = open('primes1.txt','r')
print("g:",type(g),"Opened Prime list. Now reading it...")
h = g.read()
print("h: ",type(h),"Now splitting it into a list...")
j = h.split()
k = [int(x) for x in j]
print("PrimeList is",len(j),"numbers long")
print (k[:10])
primes = [x for x in k if str(x)[0] in ['3','7'] and str(x)[-1] in ['3','7']]
print("The last 10 T-Primes are",primes[-10:])
'''for x in j:
primes.append(int(x))'''
def isTPrime(n):
strn = str(n)
strn = strn.replace(' ','')
'''if int(strn[0]) in [1,5,9] or int(strn[-1]) in [1,5,9]:
return False'''
#print(strn,type(strn))
for i in range(len(strn)):
if strn[:i] != '' and strn[i:] != '':
print(strn[:i])
if int(strn[:i]) not in k:
return False
print(strn[i:])
if int(strn[i:]) not in k:
return False
return True
'''tprimes = [p for p in primes if isTPrime(p)]
tprimesInts = [int(p) for p in tprimes]
print(tprimes,sum(tprimesInts))'''
'''Output:
g: <class '_io.TextIOWrapper'> Opened Prime list. Now reading it...
h: <class 'str'> Now splitting it into a list...
PrimeList is 1000000 numbers long
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29]
The last 10 T-Primes are [7999727, 7999753, 7999757, 7999787, 7999793, 7999813, 7999847, 7999913, 7999963, 7999993]
[3, 7, 37, 73, 313, 317, 373, 797, 3137, 3797, 739397] 748251
'''
|
hackingmath/Project-Euler
|
euler37.py
|
euler37.py
|
py
| 1,521 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27213609715
|
from collections import deque, defaultdict
def bfs(n, adj):
visited = [False] * (n+1)
min_dist = [1e9] * (n+1)
visited[1] = True
min_dist[1] = 0
q = deque([1])
while q:
cur = q.popleft()
for a in adj[cur]:
if not visited[a]:
q.append(a)
visited[a] = True
min_dist[a] = min_dist[cur]+1
max_dist = max(min_dist[1:])
return min_dist.count(max_dist)
def solution(n, edge):
edge.sort()
adj = defaultdict(list)
for start, end in edge:
adj[start].append(end)
adj[end].append(start)
return bfs(n, adj)
|
hammii/Algorithm
|
Programmers_python/가장_먼_노드.py
|
가장_먼_노드.py
|
py
| 677 |
python
|
en
|
code
| 2 |
github-code
|
6
|
42739931950
|
import os
import pickle
import shutil
import numpy as np
from tqdm import tqdm
import time
class ModelManager:
'''
Model manager is designed to load and save all models
No matter what dataset name.
'''
path_name = './checkpoints/'
@classmethod
def __init__(cls, cfg):
if not cfg.MODEL.TRAINING and cfg.PATH.MODEL_PATH is not None:
cls.path_name = cfg.PATH.MODEL_PATH
elif cfg.MODEL.TRAINING and cfg.MODEL.MODEL_NAME:
cls.path_name += cfg.MODEL.MODEL_NAME+"-"+ time.strftime("%Y_%m_%d__%H_%M_%S", time.localtime()) +'/'
cfg.PATH.MODEL_PATH = cls.path_name
else:
raise Exception('Model path initialization error, please check your config.py')
def save_model(self, model, model_name):
'''
Save model to model/ dir
:param model: model to be saved
:param model_name: model name
:return: None
'''
if 'pkl' not in model_name:
model_name += '.pkl'
if not os.path.exists('checkpoints'):
os.makedirs('checkpoints')
if not os.path.exists( self.path_name):
os.makedirs(self.path_name)
pickle.dump(model,open(self.path_name+model_name,'wb'))
def save_config(self,cfg):
'''
Save config to model/ dir as yaml file
:param cfg: config
:return: None
'''
if not os.path.exists(self.path_name):
os.makedirs(self.path_name)
cfg.PATH.CONFIG_PATH = self.path_name+'config.yaml'
with open(self.path_name+'config.yaml','w') as f:
f.write(cfg.dump())
def load_model(self, model_name):
'''
load model from model/ dir
:param model_name: model name
:return: model
'''
if 'pkl' not in model_name:
model_name += '.pkl'
if not os.path.exists(self.path_name+model_name):
raise Exception('Model not found %s'%(self.path_name+model_name))
return pickle.load(open(self.path_name+model_name,'rb'))
def save_test_result(self,test_result):
'''
Save test result to model/ dir
:param test_result: test result, as txt file
:return: None
'''
if not os.path.exists(self.path_name):
os.makedirs(self.path_name)
with open(self.path_name+'test_result.txt','w') as f:
for item in test_result:
f.write(str(item)+'\n')
@staticmethod
def clean_workspace():
'''
clean model/ dir
:return: None
'''
if os.path.exists('checkpoints'):
shutil.rmtree('checkpoints')
def get_time_cost(begin_time, end_time):
'''
get the time cost
:param begin_time: the start time
:param end_time: the end time
:return: the time cost
'''
time_cost = end_time - begin_time
return "%d day %d hour %d minute %.2f second"%(time_cost // 86400, time_cost % 86400 // 3600, time_cost % 3600 // 60, time_cost % 60)
def k_neighbors(sim_vector, k):
'''
input the similarity matrix, the index of the user, and the k
return the k nearest neighbor of the user
:param sim_vector: the similarity matrix
:param k: the k
:return: the k nearest neighbor of the user and the similarity between the user and the neighbor
'''
# get the similarity matrix
sim_vector = sim_vector
# get the k
k = k
# get the k nearest neighbor of the user
neighbor = np.argsort(sim_vector)[-k-1:-1]
neighbor_sim = np.sort(sim_vector)[-k-1:-1] # do not include the user itself
return neighbor, neighbor_sim
def get_score_matrix(train_rating,user_map,movie_map):
'''
get the score matrix
@param: train_rating, the train rating
@param: user_map, the user map
@param: movie_map, the movie map
@return: score_matrix, the movie popularity, the movie count
'''
print("<<<< begin to conduct the score matrix")
score_matrix = np.zeros((len(user_map.keys()),len(movie_map.keys())))
movie_popular = np.zeros(len(movie_map.keys()))
movie_count = len(movie_map.keys())
tqdm_process = tqdm(total=train_rating.shape[0])
for row in train_rating.itertuples(index=True,name="Pandas"):
user = user_map[getattr(row,'userId')]
movie = movie_map[getattr(row,'movieId')]
rate = getattr(row,'rating')
score_matrix[user][movie] = rate
movie_popular[movie] += 1
tqdm_process.update(1)
tqdm_process.close()
print(">>>> end to conduct the score matrix")
print("@ score matrix shape:",score_matrix.shape)
print('movie_popular shape:',movie_popular.shape)
print('movie_count:',movie_count)
return score_matrix, movie_popular, movie_count
def calculate_movie_similarity(train_set,pre_sim_calcul = False):
'''
calculate the tfidf of the movies
:param train_set: the train set, a tuple of (trainset,user_map,movie_map,movie_type_features)
:return: score_matrix, movie_popular, movie_sim, movie_count
'''
# get the train set
train_rating, user_map, movie_map, movie_type_features = train_set
score_matrix, movie_popular, movie_count = get_score_matrix(train_rating,user_map,movie_map)
movie_sim= np.zeros((movie_count, movie_count))
if pre_sim_calcul:
print("<<<< begin to conduct the movie similarity matrix")
begin_time = time.time() # record the start time
for i in tqdm(range(movie_count)):
movie_sim[i][i] = 1
for j in range(i+1,movie_count):
movie_sim[i][j] = cosine_similarity(movie_type_features[i],movie_type_features[j])
movie_sim[j][i] = movie_sim[i][j]
end_time = time.time() # record the end time
print(">>>> end to conduct the movie similarity matrix")
print("@ time cost: %s"%get_time_cost(begin_time,end_time))
else:
print("post calculate the similarity during prediction!")
return score_matrix, movie_popular, movie_sim, movie_count,user_map,movie_map,movie_type_features
def cosine_similarity(list1,list2):
'''
calculate the cosine_similarity of list1 and list2
:param list1: the first list
:param list2: the second list
:return: the cosine_similarity
'''
# get the number of common items
assert(len(list1) == len(list2))
n = len(list1)
assert(n > 0)
# calculate the sum of the two lists
sum1 = sum(list1*list2)
# calculate the square of the two lists
den = np.sqrt(sum(list1**2)) * np.sqrt(sum(list2**2))
# calculate the cosine similarity
if den == 0:
return 0
else:
return sum1/den
def calculate_user_sim_matrix(train_set,pre_sim_calcul = True):
'''
calculate the similarity matrix between users
:param train_set: the train set, a tuple of (trainset,user_map,movie_map,movie_type_features)
"""
:return: the score_matrix, the similarity matrix, movie_popular, movie_count
'''
# conduct the score matrix
print("<<<<<< begin to caculate the similarity matrix, the movie popularity and the movie count")
train_rating, user_map, movie_map, movie_type_features = train_set
score_matrix, movie_popular, movie_count = get_score_matrix(train_rating,user_map,movie_map)
# get the similarity matrix between users
user_sim_matrix = np.zeros((score_matrix.shape[0],score_matrix.shape[0]))
if pre_sim_calcul:
user_sim_matrix = get_user_sim_matrix(score_matrix)
else:
print("post calculate the similarity during prediction!")
print(">>>> end to caculate the similarity matrix.")
print('user_sim_matrix shape:',user_sim_matrix.shape)
return score_matrix,user_sim_matrix, movie_popular, movie_count,user_map,movie_map,movie_type_features
def get_user_sim_matrix(input_matrix):
'''
get the similarity matrix between users with pearson similarity
:param input_matrix: the input matrix with shape (n_users, n_items)
:return: the similarity matrix
'''
# get the shape of the input matrix
begin_time = time.time() # record the start time
print("<<<< begin to get the similarity matrix")
input_matrix = np.array(input_matrix) # convert to numpy array
print('input score matrix shape:',input_matrix.shape)
# get the number of users
n_users = input_matrix.shape[0]
# calculate the similarity matrix between users with person similarity
user_sim_matrix = np.zeros((n_users, n_users))
print('user_sim_matrix shape:',user_sim_matrix.shape)
for i in tqdm(range(n_users)):
user_sim_matrix[i][i] = 1
for j in range(i+1,n_users):
user_sim_matrix[i][j] = pearson_sim(input_matrix[i],input_matrix[j])
user_sim_matrix[j][i] = user_sim_matrix[i][j]
print(">>>> end to get the similarity matrix")
end_time = time.time() # record the end time
print('@ time cost: '+get_time_cost(begin_time, end_time))
return user_sim_matrix
def pearson_sim(list1,list2):
'''
calculate the pearson similarity between two lists
:param list1: the first list
:param list2: the second list
:return: the pearson similarity
'''
# get the number of common items
assert len(list1) == len(list2)
n = len(list1)
assert n > 0
# calculate the sum of the two lists
avg1 = sum(list1)/n
avg2 = sum(list2)/n
norm1 = list1 - avg1
norm2 = list2 - avg2
# calculate the sum of the two lists
sum1 = sum(norm1*norm2)
# calculate the square of the two lists
den = np.sqrt(sum(norm1**2)) * np.sqrt(sum(norm2**2))
# calculate the pearson similarity
if den == 0:
return 0.0
else:
return sum1/den
def SSE_error(prediction,real_rating):
'''
calculate the SSE error
:param prediction: the prediction of the user
:param real_rating: the real rating of the user
:return: the SSE error
'''
# get the prediction and the real rating
prediction = np.array(prediction)
real_rating = np.array(real_rating)
# calculate the SSE error
SSE = sum((prediction - real_rating)**2)
return SSE
if __name__ == '__main__':
# test the similarity matrix
from dataset import Dataset
from config import cfg
dataset = Dataset(cfg)
train_set = dataset.get_trainset()
a = pearson_sim(np.array([1,2,3,4,5]),np.array([1,2,3,4,5]))
b = pearson_sim(np.array([1,2,3,4,5]),np.array([5,4,3,2,1]))
print(a,b)
score_matrix,user_sim_matrix, movie_popular, movie_count,user_map,movie_map = calculate_user_sim_matrix(train_set,pre_sim_calcul = False)
pickle.dump(user_sim_matrix,open('user_map.pkl','wb'))
pickle.dump(movie_map,open('movie_map.pkl','wb'))
# print(user_sim_matrix, movie_popular, movie_count)
# pickle.dump(train_set, open('./checkpoints\CF-2022_04_11__11_32_40/trainset.pkl', 'wb'))
# pickle.dump(score_matrix,open('checkpoints\CF-2022_04_11__11_32_40\score_matrix.pkl','wb'))
# test the model_manager
# model_manager = ModelManager(cfg)
# model_manager.clean_workspace()
# model_manager.save_model(user_sim_matrix, 'user_sim_matrix')
# model_manager.save_model(movie_popular, 'movie_popular')
# model_manager.save_model(movie_count, 'movie_count')
# d = model_manager.load_model('score_matrix')
# a = model_manager.load_model('user_sim_mat')
# b = model_manager.load_model('movie_popular')
# c = model_manager.load_model('movie_count')
# print(a[0:3],b,c,d[0:3])
# test the time cost
# begin_time = time.time() # record the start time
# time.sleep(3)
# end_time = time.time() # record the end time
# # print the time cost
# print('@ time cost:',get_time_cost(begin_time,end_time))
|
Jack-Lio/RecommenderSystem
|
utls.py
|
utls.py
|
py
| 11,868 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33344135925
|
import os
import logging
from pathlib import Path
from llama_index import (
GPTSimpleVectorIndex,
GPTSimpleKeywordTableIndex,
SimpleDirectoryReader
)
from llama_index.indices.composability import ComposableGraph
# Initialise Logger
logging.basicConfig(level=logging.INFO, format="[{asctime}] - {funcName} - {message}", style='{')
logger = logging.getLogger("BUILD_INDEX")
openai_api_key = os.environ.get('OPENAI_API_KEY')
# Load Documents
cv_root_directory = Path()/'data'
for directory_index in range(1,4):
document = SimpleDirectoryReader(cv_root_directory/f'cv{directory_index}').load_data()
index = GPTSimpleVectorIndex.from_documents(document)
index_file = Path()/'data'/f'cv_{directory_index}_index.json'
# save index to disk
index.save_to_disk(index_file)
# Select one index to prove need for composability
# load index from disk
index = GPTSimpleVectorIndex.load_from_disk(cv_root_directory/'cv_1_index.json')
# Query index
spock_address = index.query("Where does Spock Sarek Live ?")
logger.info(spock_address)
uhura_address = index.query("Where does Uhura Live ?")
logger.info(uhura_address)
# Compose indices for query
# Generate indices from files
index_cv_1 = GPTSimpleVectorIndex.load_from_disk(cv_root_directory/'cv_1_index.json')
index_cv_2 = GPTSimpleVectorIndex.load_from_disk(cv_root_directory/'cv_2_index.json')
index_cv_3 = GPTSimpleVectorIndex.load_from_disk(cv_root_directory/'cv_3_index.json')
# Write up summaries
cv_1_summary="Curriculum Vitae of Nyota Uhura"
cv_2_summary="Curriculum Vitae of Spock Sarek"
cv_3_summary="Curriculum Vitae of James T. Kirk"
# set query config
query_configs = [
{
"index_struct_type": "simple_dict",
"query_mode": "default",
"query_kwargs": {
"similarity_top_k": 1
}
},
{
"index_struct_type": "keyword_table",
"query_mode": "simple",
"query_kwargs": {}
},
]
index_all_cvs = ComposableGraph.from_indices(
GPTSimpleKeywordTableIndex,
[index_cv_1, index_cv_2, index_cv_3],
index_summaries=[cv_1_summary, cv_2_summary, cv_3_summary],
max_keywords_per_chunk=50
)
# Query again across indices
spock_address = index_all_cvs.query("Where does Spock Sarek Live ?")
uhura_actress = index_all_cvs.query("Who played Nyota Uhura ?")
kirk_players = index_all_cvs.query("Where has James Kirk been portrayed ?")
logger.info(spock_address)
logger.info(uhura_actress)
logger.info(kirk_players)
|
gilgamesh7/iliad_llama
|
04_local_data_update_index.py
|
04_local_data_update_index.py
|
py
| 2,482 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24988570911
|
from osv import fields, osv
class account_journal_simulation(osv.osv):
_name = "account.journal.simulation"
_description = "Simulation level"
_columns = {
'name': fields.char('Simulation name', size=32, required=True),
'code': fields.char('Simulation code', size=8, required=True),
}
_sql_constraints = [
('code_uniq', 'unique (code)', 'The code of the simulation must be unique !')
]
_order = "name"
account_journal_simulation()
def _state_simul_get(self, cr, uid, context={}):
obj = self.pool.get('account.journal.simulation')
ids = obj.search(cr, uid, [])
res = obj.read(cr, uid, ids, ['code', 'name'], context)
return [('valid','Base')]+ [(r['code'], r['name']) for r in res]
class account_journal(osv.osv):
_inherit = "account.journal"
_columns = {
'state': fields.selection(_state_simul_get, 'Status', required=True),
'parent_ids': fields.many2many('account.journal', 'account_journal_simulation_rel', 'journal_src_id', 'journal_dest_id', 'Child journals'),
'child_ids': fields.many2many('account.journal', 'account_journal_simulation_rel', 'journal_dest_id', 'journal_src_id', 'Parent journal'),
}
_defaults = {
'state': lambda self,cr,uid,context: 'valid'
}
account_journal()
class account_move_line(osv.osv):
_inherit = "account.move.line"
def search_not_run(self, cr, uid, crit, offset=0, limit=None, order=None, context={}):
if not 'fiscalyear' in context:
context['fiscalyear'] = self.pool.get('account.fiscalyear').find(cr, uid)
ok = True
for c in crit:
if c[0]=='journal_id':
ok = False
break
if 'journal_id' in context:
ok=False
if ok:
plus = ''
for state in context.get('journal_state', []):
plus+=",'"+state+"'"
cr.execute("select id from account_journal where state in ('valid'"+plus+")")
crit.append(('journal_id', 'in', map(lambda x: x[0], cr.fetchall())))
res = super(account_move_line, self).search(cr, uid, crit, offset, limit, order, context)
return res
def _query_get(self, cr, uid, obj='l', context={}):
res = super(account_move_line, self)._query_get(cr, uid, obj, context)
if context.get('journal_state', []):
plus = " and ("+obj+".journal_id in (select id from account_journal where state in ('valid', "+','.join(map(lambda x: "'"+x+"'", context['journal_state']))+")))"
else:
plus = " and ("+obj+".journal_id in (select id from account_journal where state='valid'))"
return res+plus
account_move_line()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
factorlibre/openerp-extra-6.1
|
account_simulation/account_simulation.py
|
account_simulation.py
|
py
| 2,781 |
python
|
en
|
code
| 9 |
github-code
|
6
|
10548067106
|
#!/usr/bin/env python3
"""
From a set of zone transits representing trips between stops, work out
the effective trip time for a passenger arriving at the the origin every
minute from the departure time of the first bus to the departure time of
the last one
"""
import collections
import datetime
import logging
import sys
import csv
import isodate
zones = ['milton_pandr_south', 'milton_pandr_north']
logger = logging.getLogger('__name__')
header = [
'Passenger_Arrival',
'Passenger_Wait',
'Bus_Departure',
'Bus_Arrival',
'Bus_Duration',
'Bus_Interval',
'Passenger_Duration',
]
def process_zones():
for zone in zones:
logger.debug('Processing %s', zone)
# Read in...
in_filename = 'transits-{}.csv'.format(zone)
logger.info('Reading %s', in_filename)
with open(in_filename, 'r', newline='') as in_file:
input = csv.reader(in_file, dialect='excel', quoting=csv.QUOTE_ALL)
next(input) # Skip headers
previous_depart = None
trip_table = collections.OrderedDict()
for row in input:
trip = {}
raw_arrive, raw_duration, raw_distance = row
trip['arrive'] = isodate.parse_datetime(raw_arrive)
trip['duration'] = datetime.timedelta(seconds=float(raw_duration))
trip['depart'] = trip['arrive'] - trip['duration']
day = trip['depart'].date()
trip['distance'] = float(raw_distance)
trip['interval'] = (trip['depart'] - previous_depart).total_seconds() if previous_depart else None
if day not in trip_table:
trip_table[day] = []
trip_table[day].append(trip)
previous_depart = trip['depart']
# ... write out
step = datetime.timedelta(minutes=1)
out_filename = 'trips-{}.csv'.format(zone)
logger.info('writing %s', out_filename)
with open(out_filename, 'w', newline='') as out_file:
output = csv.writer(out_file, dialect='excel', quoting=csv.QUOTE_ALL)
output.writerow(header)
for day in trip_table:
logger.info('Processing %s %s', zone, day)
todays_trips = trip_table[day]
# Find the minute before the first bus of the day
start = todays_trips[0]['depart'].replace(second=0)
# And the last departure of the day
end = todays_trips[-1]['depart']
logger.debug("Start %s, end %s, step %s", start, end, step)
# Step through the day from 'start' to 'end' in steps of 'step'
# Find the next bus to depart after 'start'
while start < end:
# Find first departure after 'start'
for row in todays_trips:
logger.debug("row depart: %s, start: %s", row['depart'], start)
if row['depart'] > start:
wait = int((row['depart'] - start).total_seconds())
traveling = int((row['duration']).total_seconds())
trip_duration = wait + traveling
output.writerow([
start,
wait,
row['depart'],
row['arrive'],
traveling,
row['interval'],
trip_duration,
])
break
else:
logger.error("No bus for a departure at %s", start)
start = start + step
def main():
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
logger.info('Start')
process_zones()
logger.info('Stop')
if __name__ == "__main__":
main()
|
SmartCambridge/milton_road_study
|
initial_investigation/expand_transits.py
|
expand_transits.py
|
py
| 4,037 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22799615615
|
from django.shortcuts import render
from django.http import HttpResponse
from myapp.models import City,Country,Person
from myapp.forms import PersonForm
from django.shortcuts import redirect
# Create your views here.
def index(request):
country=Country.objects.all()
context={
'country':country,
}
#return HttpResponse("hey%s"%slug)
return render(request, 'myapp/home.html', context)
def add_person(request):
if request.method=="POST":
form=PersonForm(request.POST)
if form.is_valid():
form.save()
return redirect("/")
else:
form=PersonForm()
context={
'form':form,
}
return render(request, 'myapp/person.html', context)
def get_city(request,id):
opt2_html = ""
try:
country=Country.objects.get(pk = id)
city = City.objects.filter(country_id = country.id)
# make_models = company.makemodel_set.all()
for c in city:
opt2_html += "<option value='"+str(c.id)+"'>"+c.name+"</option>"
print(opt2_html)
context={
'country':country,
'city':city,
}
except:
write_exception("Error in fetching options 2")
return HttpResponse(opt2_html)
# return render(request, 'myapp/home.html', context)
|
pappubca005/dynamic-dropdown
|
myapp/views.py
|
views.py
|
py
| 1,346 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20289549716
|
from stat_arb.src.data_loader.dao.dataframe.RawPostgresSampledDataLoader import RawPostgresSampledDataLoader
from stat_arb.src.data_loader.dao.dataframe.ClickhouseTradesDataLoader import ClickhouseTradesDataLoader
from stat_arb.src.data_loader.database import database_config
from datetime import datetime
from stat_arb.src.data_loader.general.Interval import Interval
from stat_arb.src.data_loader.general.SamplingSchemas import SamplingSchemas
from static_data import PATH
# queries = [
# {'source': 'MOEX_DIRECT', 'instrument': 'USD/RUB_T+1', 'size': 1_000_000},
# {'source': 'MOEX_DIRECT', 'instrument': 'EUR/USD_T+1', 'size': 1_000_000},
# {'source': 'MOEX_DIRECT', 'instrument': 'CNH/RUB_T+1', 'size': 1_000_000},
# {'source': 'RBI', 'instrument': 'EUR/USD_T+2', 'size': 1_000_000},
# {'source': 'RBI', 'instrument': 'USD/CNH_T+2', 'size': 1_000_000},
# ]
queries = [
{'source': 'MOEX', 'instrument': 'USD/RUB_T+1', 'size': 3_000_000}
]
interval = Interval(datetime(2021, 1, 1), datetime(2021, 12, 31))
def load_data(query: dict, interval: Interval):
print('loading:\n', query, '\n', interval, '\n')
with database_config.sql_engine_fxet_db1.connect() as connection:
loader = RawPostgresSampledDataLoader(connection.connection.connection)
vwap = loader.load_vwap_for_interval(query['source'],
query['instrument'],
interval,
SamplingSchemas.FIRST_PRICE_PREDICTION_SCHEMA,
query['size'])
return vwap
if __name__ == '__main__':
for q in queries:
source = q['source'].split('_')[0].lower()
instrument = q['instrument'].split('_')[0].replace('/', '').upper()
spot_data = load_data(q, interval)
spot_data.to_csv(f'{PATH}/{source}/{instrument}.csv')
|
v-buchkov/statistical_arbitrage_backtester
|
download_hourly_data.py
|
download_hourly_data.py
|
py
| 1,939 |
python
|
en
|
code
| 2 |
github-code
|
6
|
15398422361
|
# Реализуйте RLE алгоритм: реализуйте модуль сжатия и восстановления данных.
# Входные и выходные данные хранятся в отдельных текстовых файлах.
def get_coding(text):
with open(text, 'r') as data:
txt = data.readline()
count = 1
res = ''
for i in range(len(txt)-1):
if txt[i] == txt[i+1]:
count += 1
else:
res = res + str(count) + txt[i]
count = 1
if count > 1 or (txt[len(txt)-2] != txt[-1]):
res = res + str(count) + txt[-1]
with open('coding.txt', 'w') as data:
data.write(res)
def get_decoding(text):
with open(text, 'r') as data:
txt = data.readline()
number = ''
res = ''
for i in range(len(txt)):
if not txt[i].isalpha():
number += txt[i]
else:
res = res + txt[i] * int(number)
number = ''
with open('decoding.txt', 'w') as data:
data.write(res)
return res
get_coding('text.txt')
get_decoding('coding.txt')
|
iiiivanCh/dz05python
|
task05_04.py
|
task05_04.py
|
py
| 1,135 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
73439288188
|
import argparse
from subcommands.setup.parser import parser as setup_parser
from subcommands.export.parser import parser as export_parser
from subcommands.info.parser import parser as info_parser
from subcommands.process.parser import parser as process_parser
from subcommands.prune.parser import parser as prune_parser
from subcommands.version.parser import parser as version_parser
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Runs data processing live for incoming data'
)
subparsers = parser.add_subparsers()
subparsers.add_parser(
name='setup',
help='Generate config files for setting up Hotspur',
parents=[setup_parser]
)
subparsers.add_parser(
name='process',
help='Automatically find and process EM data',
parents=[process_parser]
)
subparsers.add_parser(
name='info',
help='Retrieve info about projects and sessions',
parents=[info_parser]
)
subparsers.add_parser(
name='export',
help='Export data alongside Relion metadata star files',
parents=[export_parser]
)
subparsers.add_parser(
name='prune',
help='Remove processed data and databases for projects or sessions',
parents=[prune_parser]
)
subparsers.add_parser(
name='version',
help='Print the current version',
parents=[version_parser]
)
args = parser.parse_args()
if 'config' in args:
from utils.config import load_config
load_config(args.config)
if 'func' in args:
args.func(args)
else:
parser.print_help()
|
zruan/hotspur_command
|
hotspur.py
|
hotspur.py
|
py
| 1,681 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27578228523
|
#!/usr/bin/env python3
import argparse
import configparser
from pathlib import Path
from rich import console
import sys
sys.path.append("/home/vermin/IdeaProjects/summalarva")
from summalarva.openai_client import OpenAIClient
from summalarva.orgnoter import OrgNoter
console = console.Console()
config = configparser.ConfigParser()
argparser = argparse.ArgumentParser()
argparser.add_argument("input", type=str, help="Input file")
argparser.add_argument("--config", type=str, help="Config file")
args = argparser.parse_args()
input_path = Path(args.input).expanduser()
if args.config:
config.read(args.config)
else:
config.read(Path("~/.config/summalarva.ini").expanduser())
openai_api_key = config["openai"]["api_key"]
if config["openai"]["host"]:
openai_host = config["openai"]["host"]
openai_client = OpenAIClient(openai_api_key, openai_host)
else:
openai_client = OpenAIClient(openai_api_key)
console.print("Start processing file", args.input)
summarises = openai_client.summarize_document(args.input)
try:
org_noter = OrgNoter(args.input)
for page_num,summary in summarises.items():
org_noter.page_summarize_model_append(page_num, summary)
console.print("Start create org noter")
org_noter.create_note()
except Exception as e:
raise e
summary_text = ""
for page_num, summary in summarises.items():
summary_text += f"Page {page_num}\n\n{summary}\n\n"
with open("summary.txt", "w") as f:
f.write(summary_text)
|
nhannht/summalarva
|
summalarva/summarize_pdf.py
|
summarize_pdf.py
|
py
| 1,484 |
python
|
en
|
code
| 1 |
github-code
|
6
|
36621325200
|
import pygame
import sys
from moviepy.editor import VideoFileClip
from PIL import Image
pygame.init()
music_background = pygame.mixer.music.load("assets/LostCompanionTomboFry.mp3")
pygame.mixer.music.play()
pygame.mixer.music.set_volume(0.2)
lar = 550
hut = 700
screen = pygame.display.set_mode((lar, hut))
pygame.display.set_caption("Menu")
gif_path = "assets/bg.gif"
clip = VideoFileClip(gif_path)
fps = clip.fps
frames = []
for t in range(0, int(clip.duration * fps)):
frame = clip.get_frame(t / fps)
pil_image = Image.fromarray((frame * 255).astype('uint8'))
pil_image = pil_image.resize((lar, hut))
pygame_image = pygame.image.fromstring(pil_image.tobytes(), pil_image.size, pil_image.mode)
frames.append(pygame_image)
# Carregar recursos do menu antecipadamente
fonte = pygame.font.Font(None, 30)
texto_play = fonte.render("Play", True, (0, 0, 0))
texto_quit = fonte.render("Quit", True, (0, 0, 0))
Title = fonte.render("Pythongoras-Game", True, (255, 255, 255))
def mostrar_menu():
frame_index = 0
clock = pygame.time.Clock()
while True:
for evento in pygame.event.get():
if evento.type == pygame.QUIT:
pygame.quit()
sys.exit()
elif evento.type == pygame.MOUSEBUTTONDOWN:
if batom_play.collidepoint(evento.pos):
pygame.time.delay(100)
iniciar_jogo()
elif batom_quit.collidepoint(evento.pos):
pygame.quit()
sys.exit()
screen.blit(frames[frame_index], (0, 0))
batom_Title = pygame.Rect(190, 100 + 50, 150, 50)
pos_text_Title = Title.get_rect(center=batom_Title.center)
screen.blit(Title, pos_text_Title)
batom_play = pygame.Rect(lar/2 - 75, hut/2 + 50, 150, 50)
pygame.draw.rect(screen, (255, 255, 255), batom_play)
pos_text_play = texto_play.get_rect(center=batom_play.center)
screen.blit(texto_play, pos_text_play)
if batom_play.collidepoint(pygame.mouse.get_pos()):
pygame.draw.rect(screen, (200, 200, 200), batom_play)
batom_quit = pygame.Rect(lar/2 - 75, hut/2 + 140, 150, 50)
pygame.draw.rect(screen, (255, 255, 255), batom_quit)
pos_text_quit = texto_quit.get_rect(center=batom_quit.center)
screen.blit(texto_quit, pos_text_quit)
if batom_quit.collidepoint(pygame.mouse.get_pos()):
pygame.draw.rect(screen, (200, 200, 200), batom_quit)
pygame.display.flip()
frame_index = (frame_index + 1) % len(frames)
clock.tick(fps)
def iniciar_jogo():
print("O jogo começou!")
import Chose
mostrar_menu()
|
RuFiripo/Pythongoras-Game
|
menu.py
|
menu.py
|
py
| 2,712 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32100325594
|
import jittor as jt
from jittor import Module
from jittor import nn
import pygmtools as pygm
import numpy as np
import parameter
class AlexNet(Module):
def __init__(self, *args, **kw) -> None:
super().__init__(*args, **kw)
self.padsize = parameter.parameters().pad
self.kernel_size = parameter.parameters().kernel_size
self.side_len = parameter.parameters().side_len
self.CNNoutSize = int(self.side_len / 2 + self.padsize - (self.kernel_size - 1) / 2)
self.layer1 = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=16, kernel_size=self.kernel_size, stride=1, padding=self.padsize),
)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
self.layer2 = nn.Sequential(
nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3, stride=1, padding=1),
)
self.fc = nn.Sequential(
nn.Linear(in_features=self.CNNoutSize*self.CNNoutSize*32, out_features=128),
nn.Relu(),
nn.Linear(in_features=128, out_features=16)
)
def execute(self, x) -> None:
x = self.layer1(x)
x = self.pool(x)
x = self.layer2(x)
x = x.view(-1, self.CNNoutSize*self.CNNoutSize*32)
x = self.fc(x)
return x
class Net(Module):
def __init__(self, sinkhorn_norm) -> None:
pygm.BACKEND = 'jittor'
self.slice = parameter.parameters().slice**2
self.side_len = parameter.parameters().side_len
if (sinkhorn_norm):
self.execute = self.execute_sinkhorn
else:
self.execute = self.execute_sigmoid
self.AlexNet = AlexNet()
self.fc = nn.Sequential(
nn.Linear(in_features=self.slice*16, out_features=64),
nn.ReLU(),
nn.Linear(in_features=64, out_features=self.slice ** 2),
)
def execute_sinkhorn(self, input) -> None:
x = input
x = jt.reshape(x, (-1, 3, self.side_len, self.side_len))
x = self.AlexNet(x)
x = jt.reshape(x, (-1, self.slice*16))
x = self.fc(x)
x = jt.reshape(x, (-1, self.slice, self.slice))
x = pygm.sinkhorn(x)
return x
def execute_sigmoid(self, input) -> None:
x = input
x = jt.reshape(x, (-1, 3, self.side_len, self.side_len))
x = self.AlexNet(x)
x = jt.reshape(x, (-1, self.slice*16))
x = self.fc(x)
x = jt.nn.Sigmoid()(x)
x = jt.reshape(x, (-1, self.slice, self.slice))
return x
|
kizunawl/SJTU-AI-courses
|
Deep Learning/Task4/model.py
|
model.py
|
py
| 2,616 |
python
|
en
|
code
| 1 |
github-code
|
6
|
28383267446
|
import copy
import functools
import os
import random
import torch
import torch.nn.functional as F
import blobfile as bf
import torchvision.utils as vutils
import numpy as np
import torch as th
import torch.distributed as dist
from torch.nn.parallel.distributed import DistributedDataParallel as DDP
from torch.optim import Adam
from ..models.unet.fp16_util import zero_grad
from tqdm import tqdm
from ..utils import dist_util
import matplotlib.pyplot as plt
from .plotters import ImPlotter
from .config_getters import get_model
class IPFStepBase(th.nn.Module):
def __init__(
self,
model,
forward_diffusion,
backward_diffusion,
data_loader,
prior_loader,
cache_data_loader = None,
args = None,
forward_model = None,
cache_loader = False,
resume_checkpoint = 0,
checkpoint_directory = './',
plot_directory = './',
):
super().__init__()
self.set_seed(dist.get_rank()+0)
ema_rate = args.ema_rate
save_interval=args.save_interval
lr_anneal_steps = 0
self.args = args
self.model = model
self.forward_diffusion = forward_diffusion
self.backward_diffusion = backward_diffusion
self.forward_model = forward_model
self.prior_loader = prior_loader
self.data_loader = data_loader
self.cache_data_loader = cache_data_loader
self.num_steps = self.args.nit
self.num_iter = self.args.num_iter
self.lr_anneal_steps = lr_anneal_steps
self.batch_size = self.args.batch_size
self.cache_loader = cache_loader
self.cache_refresh = self.args.cache_refresh_stride
self.lr = self.args.lr
self.classes = self.args.num_data_classes > 0
self.weight_decay = self.args.weight_decay
self.ema_rate = (
[ema_rate]
if isinstance(ema_rate, float)
else [float(x) for x in ema_rate.split(",")]
)
self.save_interval = save_interval
self.checkpoint_dir = checkpoint_directory
self.plot_dir = plot_directory
self.plotter = ImPlotter(im_dir=self.plot_dir, plot_level=1)
self.step = 0
self.resume_step = resume_checkpoint
self.resume_checkpoint = resume_checkpoint
self.global_batch = self.batch_size * dist.get_world_size()
self.model_params = list(self.model.parameters())
self.master_params = self.model_params
self.sync_cuda = th.cuda.is_available()
self._load_and_sync_parameters()
# Optimizers
self.opt = Adam(self.master_params, lr=self.lr)
if self.resume_step:
self._load_optimizer_state()
# Model was resumed, either due to a restart or a checkpoint
# being specified at the command line.
self.ema_params = [
self._load_ema_parameters(rate) for rate in self.ema_rate
]
else:
self.ema_params = [
copy.deepcopy(self.master_params) for _ in range(len(self.ema_rate))
]
if th.cuda.is_available():
self.use_ddp = True
self.ddp_model = DDP(
self.model,
device_ids=[dist_util.dev()],
output_device=dist_util.dev(),
broadcast_buffers=False,
bucket_cap_mb=128,
find_unused_parameters=False,
)
else:
if dist.get_world_size() > 1:
self.use_ddp = False
self.ddp_model = self.model
def _load_and_sync_parameters(self):
resume_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
if resume_checkpoint:
self.resume_step = parse_resume_step_from_filename(resume_checkpoint)
if dist.get_rank() == 0:
self.model.load_state_dict(
dist_util.load_state_dict(
resume_checkpoint, map_location=dist_util.dev()
)
)
dist_util.sync_params(self.model.parameters())
def _load_ema_parameters(self, rate):
ema_params = copy.deepcopy(self.master_params)
main_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
ema_checkpoint = find_ema_checkpoint(main_checkpoint, self.resume_step, rate)
if ema_checkpoint:
if dist.get_rank() == 0:
state_dict = dist_util.load_state_dict(
ema_checkpoint, map_location=dist_util.dev()
)
ema_params = self._state_dict_to_master_params(state_dict)
dist_util.sync_params(ema_params)
return ema_params
def _load_optimizer_state(self):
main_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
opt_checkpoint = bf.join(
bf.dirname(main_checkpoint), f"opt{self.resume_step:06}.pt"
)
if bf.exists(opt_checkpoint):
state_dict = dist_util.load_state_dict(
opt_checkpoint, map_location=dist_util.dev()
)
self.opt.load_state_dict(state_dict)
def optimize_step(self):
#self._anneal_lr()
# if self.args.grad_clipping:
# clipping_param = self.args.grad_clip
# total_norm = torch.nn.utils.clip_grad_norm_(self.model.parameters(), clipping_param)
self.opt.step()
for rate, params in zip(self.ema_rate, self.ema_params):
update_ema(params, self.master_params, rate=rate)
def _anneal_lr(self):
if not self.lr_anneal_steps:
return
frac_done = (self.step + self.resume_step) / self.lr_anneal_steps
lr = self.lr * (1 - frac_done)
for param_group in self.opt.param_groups:
param_group["lr"] = lr
def save(self):
if dist.get_rank() == 0:
self.set_seed(0)
init_samples, labels = next(self.prior_loader)
init_samples = init_samples.to(dist_util.dev())
labels = labels.to(dist_util.dev()) if labels is not None else None
sample_model = get_model(self.args)
for rate, params in zip(self.ema_rate, self.ema_params):
state_dict = self._master_params_to_state_dict(params)
sample_model.load_state_dict(state_dict)
sample_model = sample_model.to(dist_util.dev())
x_tot_plot = self.backward_diffusion.sample(init_samples, labels, t_batch=None, net=sample_model)
filename = 'ema{0}_step{1}.png'.format(rate, self.step)
self.plotter.plot(init_samples, x_tot_plot, filename)
sample_model = None
torch.cuda.empty_cache()
# init_samples, labels = next(self.data_loader)
# init_samples = init_samples.to(dist_util.dev())
# labels = labels.to(dist_util.dev()) if labels is not None else None
# x_tot_plot = self.forward_diffusion.sample(init_samples, labels, t_batch=None, net=self.forward_model)
# filename = 'sample{0}_step{1}.png'.format(rate, self.step)
# self.plotter.plot(init_samples, x_tot_plot, filename)
def save_checkpoint(rate, params):
state_dict = self._master_params_to_state_dict(params)
if dist.get_rank() == 0:
if not rate:
filename = f"model{(self.step+self.resume_step):06d}.pt"
else:
filename = f"ema_{rate}_{(self.step+self.resume_step):06d}.pt"
with bf.BlobFile(bf.join(self.checkpoint_dir, filename), "wb") as f:
th.save(state_dict, f)
save_checkpoint(0, self.master_params)
for rate, params in zip(self.ema_rate, self.ema_params):
save_checkpoint(rate, params)
if dist.get_rank() == 0:
with bf.BlobFile(
bf.join(self.checkpoint_dir, f"opt{(self.step+self.resume_step):06d}.pt"),
"wb",
) as f:
th.save(self.opt.state_dict(), f)
dist.barrier()
def _master_params_to_state_dict(self, master_params):
state_dict = self.model.state_dict()
for i, (name, _value) in enumerate(self.model.named_parameters()):
assert name in state_dict
state_dict[name] = master_params[i]
return state_dict
def _state_dict_to_master_params(self, state_dict):
params = [state_dict[name] for name, _ in self.model.named_parameters()]
return params
def log_step(self):
return
def get_blob_logdir(self):
return self.plot_dir
def set_seed(self, seed=0):
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
torch.cuda.manual_seed_all(seed)
def parse_resume_step_from_filename(filename):
"""
Parse filenames of the form path/to/modelNNNNNN.pt, where NNNNNN is the
checkpoint's number of steps.
"""
split = filename.split("model")
if len(split) < 2:
return 0
split1 = split[-1].split(".")[0]
try:
return int(split1)
except ValueError:
return 0
def find_resume_checkpoint():
# On your infrastructure, you may want to override this to automatically
# discover the latest checkpoint on your blob storage, etc.
return None
def find_ema_checkpoint(main_checkpoint, step, rate):
if main_checkpoint is None:
return None
filename = f"ema_{rate}_{(step):06d}.pt"
path = bf.join(bf.dirname(main_checkpoint), filename)
if bf.exists(path):
return path
return None
def update_ema(target_params, source_params, rate=0.99):
"""
Update target parameters to be closer to those of source parameters using
an exponential moving average.
:param target_params: the target parameter sequence.
:param source_params: the source parameter sequence.
:param rate: the EMA rate (closer to 1 means slower).
"""
for targ, src in zip(target_params, source_params):
targ.detach().mul_(rate).add_(src, alpha=1 - rate)
|
JTT94/schrodinger_bridge
|
bridge/trainer/ipf_base.py
|
ipf_base.py
|
py
| 10,216 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40024543800
|
import datetime,time,os,sys
if(sys.platform.lower().startswith('linux')):
OS_TYPE = 'linux'
elif(sys.platform.lower().startswith('mac')):
OS_TYPE = 'macintosh'
elif(sys.platform.lower().startswith('win')):
OS_TYPE = 'windows'
else:
OS_TYPE = 'invalid'
# Get our current directory
OUTPUT_FILE_DIRECTORY = os.getcwd()
def find_all(a_str, sub):
"""
Returns the indexes of {sub} where they were found in {a_str}. The values
returned from this function should be made into a list() before they can
be easily used.
Last Update: 03/01/2017
By: LB023593
"""
start = 0
while True:
start = a_str.find(sub, start)
if start == -1: return
yield start
start += 1
# Create variables for all the paths
if((OS_TYPE == 'windows')):
# Clear Screen Windows
os.system('cls')
directories = list(find_all(OUTPUT_FILE_DIRECTORY,'\\'))
OUTPUTS_DIR = OUTPUT_FILE_DIRECTORY[:directories[-1]] + '\\outputs\\'
INPUTS_DIR = OUTPUT_FILE_DIRECTORY[:directories[-1]] + '\\inputs\\'
SCRIPTS_DIR = OUTPUT_FILE_DIRECTORY[:directories[-1]] + '\\scripts\\'
MODULES_DIR = OUTPUT_FILE_DIRECTORY[:directories[-1]] + '\\modules\\'
MODULES_GITHUB_DIR = OUTPUT_FILE_DIRECTORY[:directories[-1]] + '\\modules\\github\\'
CLASSES_DIR = OUTPUT_FILE_DIRECTORY[:directories[-1]] + '\\classes\\'
elif((OS_TYPE == 'linux') or (OS_TYPE == 'macintosh')):
# Clear Screen Linux / Mac
os.system('clear')
directories = list(find_all(OUTPUT_FILE_DIRECTORY,'/'))
OUTPUTS_DIR = OUTPUT_FILE_DIRECTORY[:directories[-1]] + '/outputs/'
INPUTS_DIR = OUTPUT_FILE_DIRECTORY[:directories[-1]] + '/inputs/'
SCRIPTS_DIR = OUTPUT_FILE_DIRECTORY[:directories[-1]] + '/scripts/'
MODULES_DIR = OUTPUT_FILE_DIRECTORY[:directories[-1]] + '/modules/'
MODULES_GITHUB_DIR = OUTPUT_FILE_DIRECTORY[:directories[-1]] + '/modules/github/'
CLASSES_DIR = OUTPUT_FILE_DIRECTORY[:directories[-1]] + '/classes/'
# OS Compatibility for importing Class Files
if((OS_TYPE == 'linux') or (OS_TYPE == 'macintosh')):
sys.path.insert(0,'../classes/')
sys.path.insert(0,MODULES_DIR)
elif((OS_TYPE == 'windows')):
sys.path.insert(0,'..\\classes\\')
sys.path.insert(0,MODULES_DIR)
# < --- Begin Custom Classes Import --- >
# Custom Colors for printing to the screen
from custom_colors import *
from benchmark import *
from crypto_pairs import *
from command_line_arguments import *
from pseudothreading import *
from tracking import *
from pretty_formatting import *
# < --- End Custom Classes Import --- >
# Time all the things!
runtime = Benchmark()
# Text Coloration
cc = ColoredText(['exchange'],['38;5;214m'])
# Get parameters from commandline
parameters = Parse()
# Define what we're expecting to be passed in
parameters.add_expectation('-crypto-main', 'string', True, False)
parameters.add_expectation('-crypto-alt', 'string', True, False)
# Assign passed in values
parameters.parse_commandline()
# Check expectations were met
parameters.validate_requirements()
# World Reserve Crypto
main = parameters.get_parameter('-crypto-main').value
# Poor wanna be Crypto
alt = parameters.get_parameter('-crypto-alt').value
master = Metrics(main, alt)
selling, buying = master.call_order_book('binance')
# keys
# ['symbol', 'timestamp', 'datetime', 'high', 'low', 'bid', 'bidVolume', 'ask', 'askVolume', 'vwap', 'open', 'close', 'last', 'previousClose', 'change', 'percentage', 'average', 'baseVolume', 'quoteVolume', 'info']
ticker = master.call_fetch_ticker('binance')
print("\n bid:\t" + format(ticker['DOGE/BTC']['bid'], '.8f'))
print(" bidVolume:\t" + str(ticker['DOGE/BTC']['bidVolume']))
print(" ask:\t" + format(ticker['DOGE/BTC']['ask'], '.8f'))
print(" askVolume:\t" + str(ticker['DOGE/BTC']['askVolume']))
print("")
print(buying)
print("")
#print(buying[ticker['DOGE/BTC']['bid']])
print(str(buying[0][1]) + " @ " + format(buying[0][0],'.8f'))
print(buying[-1])
print("Buying:")
for counter in range(5,-1,-1):
print(str(buying[counter][1]) + " @ " + format(buying[counter][0],'.8f'))
print("")
print("Selling:")
for counter in range(0,5):
print(str(selling[counter][1]) + " @ " + format(selling[counter][0],'.8f'))
#https://www.binance.com/en/trade/DOGE_BTC
|
isajediknight/Sleep-Is-Overrated
|
scripts/watch_v4.py
|
watch_v4.py
|
py
| 4,274 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8407981184
|
from abc import ABC, abstractmethod
import threading
import boto3
import botocore
import sys
import logging
import logging.config
from enum import Enum
from itertools import cycle
from botocore.config import Config
from botocore.endpoint import MAX_POOL_CONNECTIONS
from collections.abc import Iterable
class AWS_SVC_BASE(ABC):
''' Represent an AWS service that contain multiple resources(workers) '''
aws_config = Config(
retries=dict(
total_max_attempts=25,
mode='adaptive'
),
max_pool_connections=MAX_POOL_CONNECTIONS,
)
def __init__(self, svc_type, session, svc_config):
if not isinstance(session, boto3.Session):
logging.error('session must be of type boto3.Session')
raise(ValueError)
if not isinstance(svc_type, AWS_SVC_TYPE):
logging.error('svc_type must be of type AWS_SVC_TYPE')
raise(ValueError)
if not isinstance(svc_config, dict):
logging.error('svc_config must be of type AWS_SVC_TYPE')
raise(ValueError)
self.session = session
self.account_id = 0
self.service_type = svc_type
self.svc_config = svc_config
self.rsc_prefix = svc_config['resource_prefix']
self._key_lock = threading.Lock()
self.worker_cycle = cycle(list())
super().__init__()
@abstractmethod
def get_existing_workers(self):
''' Query the existing workers based on the rsc_prefix '''
# pass
@abstractmethod
def create_workers(self):
''' Create workers/resources of this service '''
# pass
@abstractmethod
def delete_workers(self):
''' Delete the workers created by create_workers() function '''
# pass
@abstractmethod
def _check_existing_identity(self, identiy_arn):
''' Check if identiy_arn exists in AWS '''
# pass
def check_existing_user(self, aws_id, target_user, aws_partition = 'aws'):
''' Check if the target_user exists in AWS account aws_id '''
user_arn = 'arn:{}:iam::{}:user/{}'.format(aws_partition, aws_id, target_user)
return self._check_existing_identity(user_arn)
def check_existing_role(self, aws_id, target_role, aws_partition = 'aws'):
''' Check if the target_role exists in AWS account aws_id '''
role_arn = 'arn:{}:iam::{}:role/{}'.format(aws_partition, aws_id, target_role)
return self._check_existing_identity(role_arn)
def precheck(self):
''' Check if there is at least one resrouce to perform the test '''
# If no object is in the cycle, the default value None will be returned
if next(self.worker_cycle, None) is None:
return False
return True
def _get_next_worker(self):
with self._key_lock:
try:
return next(self.worker_cycle)
except StopIteration:
logging.error('Empty worker cycle')
return None
def _set_worker_cycle(self, iterable_obj):
if not isinstance(iterable_obj, Iterable):
logging.error('set_worker_cycle function expects an Iterable input')
return
self.worker_cycle = cycle(iterable_obj)
def _check_boto3_response(self, resp):
return 'ResponseMetadata' in resp and resp['ResponseMetadata']['HTTPStatusCode'] >= 200 and resp['ResponseMetadata']['HTTPStatusCode'] < 300
def _enable_logging(self):
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': True,
})
logging.basicConfig(level=logging.DEBUG, format='%(module)s: %(message)s')
class AWS_SVC_TYPE(Enum):
IAM = 'iam'
S3 = 's3'
KMS = 'kms'
SQS = 'sqs'
|
prisma-cloud/IAMFinder
|
aws_svc/aws_service_base.py
|
aws_service_base.py
|
py
| 3,839 |
python
|
en
|
code
| 102 |
github-code
|
6
|
38381779624
|
# 给定一个二叉搜索树,编写一个函数 kthSmallest 来查找其中第 k 个最小的元素。
# 说明:
# 你可以假设 k 总是有效的,1 ≤ k ≤ 二叉搜索树元素个数。
# 示例 1:
# 输入: root = [3,1,4,null,2], k = 1
# 3
# / \
# 1 4
# \
# 2
# 输出: 1
# 示例 2:
# 输入: root = [5,3,6,2,4,null,null,1], k = 3
# 5
# / \
# 3 6
# / \
# 2 4
# /
# 1
# 输出: 3
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
# Simple idea of combining dfs and quicksort.
# High time and memory cost.
class Solution0:
def kthSmallest(self, root: TreeNode, k: int) -> int:
def dfs(root, res):
res.append(root.val)
if root.left != None:
dfs(root.left, res)
if root.right != None:
dfs(root.right, res)
res = []
dfs(root, res)
def quicksort(seq, low, high):
i = low
j = high
if low < high:
base = seq[low]
while i < j:
while seq[j] > base and j > i:
j -= 1
if j > i:
seq[i] = seq[j]
i += 1
while seq[i] < base and i < j:
i += 1
if i < j:
seq[j] = seq[i]
j -= 1
seq[i] = base
quicksort(seq, low, i-1)
quicksort(seq, i+1, high)
quicksort(res, 0, len(res)-1)
return res[k-1]
# Inorder traversal.
class Solution1:
def kthSmallest(self, root: TreeNode, k: int) -> int:
def inorder(root, res):
if root.left != None:
inorder(root.left, res)
res.append(root.val)
if root.right != None:
inorder(root.right, res)
res = []
inorder(root, res)
return res[k-1]
|
1lch2/PythonExercise
|
leetcode/binary_tree/230.py
|
230.py
|
py
| 2,160 |
python
|
en
|
code
| 1 |
github-code
|
6
|
30704086585
|
from collections import deque
with open('day6.txt') as day6:
lines = day6.readlines()
target_size = 14
current = 0
buffer = deque([''] * target_size)
for line in lines:
for char in line:
current = current + 1
buffer.popleft()
buffer.append(char)
if current > target_size and len(set(buffer)) == target_size:
print(current)
break
|
shanetreacy/aoc2022
|
day6aoc.py
|
day6aoc.py
|
py
| 365 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35303233339
|
from pages.investment_proposal.predefined_plan.predefined_plan import PredefinedPlanPage
from pages.investment_proposal.customized_plan.customized_plan import CustomizedPlanPage
from pages.investment_proposal.investment_proposal import InvestmentProposalPage
from pages.investment_proposal.investment_proposal_config import PREDEFINED_PLAN
import time
# Define the test case
def test_investment_proposal_page(driver):
predefined_plan = PREDEFINED_PLAN
predefined_plan_page = PredefinedPlanPage(driver)
if predefined_plan:
time.sleep(1)
predefined_plan_page.click_investment_plan()
predefined_plan_page.click_continue_button()
time.sleep(1)
else:
predefined_plan_page.click_customized_plan()
customized_plan_page = CustomizedPlanPage(driver)
customized_plan_page.fill_customized_plan()
investment_proposal_page = InvestmentProposalPage(driver)
investment_proposal_page.click_continue_button()
|
qateam-neo/fe-connect-automation
|
tests/investment_proposal_page_tests.py
|
investment_proposal_page_tests.py
|
py
| 989 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15257337134
|
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 10 17:30:55 2018
@author: Wioletta
"""
import cv2
from localbinarypatterns import LocalBinaryPatterns
img = cv2.imread('yaleB01_P00A+000E+00.pgm')
cv2.imshow('Image',img)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
desc = LocalBinaryPatterns(24, 8)
hist = desc.describe(gray)
cv2.imshow('Histogram', hist)
|
wiolettakolasa/IO
|
test.py
|
test.py
|
py
| 360 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5762269283
|
import os
scriptPath = os.path.dirname(os.path.abspath(__file__))
projRootPath = os.path.abspath(
os.path.join(scriptPath ,
os.path.join('..', '..')))
import numpy as np
# matplotlib for displaying the output
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
from scipy import signal
from scipy.io import wavfile
# and IPython.display for audio output
import IPython.display
# Librosa for audio
import librosa
# And the display module for visualization
import librosa.display
#### Path to data
# Get data files
two_up = os.path.abspath(os.path.join('.' ,"../.."))
print("Project root path is: ", two_up)
dataDirName = "data"
rawDataDirName = "converted_wav"
className = "violin"
# className = "guitar"
data_path = os.path.join(projRootPath, dataDirName, rawDataDirName, className)
print(data_path)
root_paths = []
# Get all files from data_path
# r=root, d=directories, f = files
(_, d, allFiles) = next(os.walk(data_path))
wavFiles = [f for f in allFiles if f.endswith(".wav")]
file = wavFiles[1]
sample_rate, samples = wavfile.read(os.path.join(data_path, file))
frequencies, times, spectrogram = signal.spectrogram(samples, sample_rate)
# all spectrogram
plt.pcolormesh(times, frequencies, spectrogram)
plt.imshow(spectrogram)
plt.ylabel('Frequency')
plt.gca().invert_yaxis()
plt.xlabel('Time')
plt.show()
|
mariusdgm/AudioMining
|
src/visualization/spectrogram.py
|
spectrogram.py
|
py
| 1,360 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33489081133
|
def isACoveredByB(a, b):
return a[0] >= b[0] and a[1] <= b[1]
class Solution:
def removeCoveredIntervals(self, intervals: List[List[int]]) -> int:
cntIntervals = len(intervals)
cntToRemove = 0
for i in range(cntIntervals):
isCovered = 0
for j in range(cntIntervals):
if i == j:
continue
if isACoveredByB(intervals[i], intervals[j]):
isCovered = 1
break
cntToRemove += isCovered
return cntIntervals - cntToRemove
|
sxu11/Algorithm_Design
|
Daily/20210316_1288_RemoveCoveredIntervals.py
|
20210316_1288_RemoveCoveredIntervals.py
|
py
| 582 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41688432470
|
from tkinter import *
from threading import Thread
from unpacker import *
from lookupData import *
from telemetryModule import *
import math
root = Tk()
root.title("F1 2021 Telemetry App")
root.geometry("{}x{}".format(1200, 800))
root.configure(background="#212026")
telemetry_modules = []
telemetry_data = [None] * 12
def update():
for telemetry_module in telemetry_modules:
telemetry_module.frame.after(1, telemetry_module.updateSize)
root.geometry("{}x{}".format(round_to_multiple(root.winfo_width(), 12), round_to_multiple(root.winfo_height(), 8)))
root.after(10, update)
def retrieve_packet_task():
while True:
packet = retrieve_packet()
telemetry_data[packet.packetHeader.packetID] = packet
def round_to_multiple(x, base):
return base * round(x / base)
def create_telemetry_module(name, column, row, x_span, y_span, colour):
new_module = TelemetryModule(root, name, column, row, x_span, y_span, colour)
telemetry_modules.append(new_module)
create_telemetry_module("Timing Tower", 0, 0, 3, 6, "gray")
create_telemetry_module("Pace Tower", 3, 0, 2, 6, "gray")
create_telemetry_module("Pace Graph", 5, 0, 7, 3, "gray")
create_telemetry_module("Predicted Finish Graph", 5, 3, 7, 3, "gray")
create_telemetry_module("Pit Monitor", 0, 6, 2, 2, "gray")
create_telemetry_module("Fuel Monitor", 2, 6, 2, 2, "gray")
create_telemetry_module("Weather Forecast", 4, 6, 8, 1, "gray")
create_telemetry_module("Pit Strategy", 4, 7, 8, 1, "gray")
thread = Thread(target = retrieve_packet_task)
thread.start()
root.after(1, update)
root.mainloop()
|
smuldoon1/F1-2021-Telemetry-App
|
telemetryApp.py
|
telemetryApp.py
|
py
| 1,601 |
python
|
en
|
code
| 1 |
github-code
|
6
|
12417770443
|
from redirect import config, cryptoDecrypt, datetime, GenericException,jwt, logger, messages, timezone
def getClientServerTimeDiff(auth):
try:
token = auth.split(' ')[-1]
decrypted = cryptoDecrypt(token)
client_timestamp = float(decrypted)/1000
dt = datetime.datetime.now(timezone.utc)
utc_time = dt.replace(tzinfo=timezone.utc)
server_timestamp = utc_time.timestamp()
diff = server_timestamp - client_timestamp
return diff
except:
raiseGenericException('errToken')
def validateTokenAndGetPayload(auth):
try:
token = auth.split(' ')[-1] # get last word
secret = config.get('authentication').get('jwt').get('secret')
algorithm = config.get('authentication').get('jwt').get('algorithm')
payload = jwt.decode(token, secret, algorithm)
print(payload)
return payload
except jwt.ExpiredSignatureError as error1:
logger.error(error1)
raiseGenericException('errTokenExpired')
except (Exception) as error:
logger.error(error)
raiseGenericException('errInvalidToken')
def raiseGenericException(errName):
raise GenericException(
code=messages[errName][0], name=errName, message=messages[errName][1])
def raiseGenericExceptionFn(errName, mess):
raise GenericException(
code= messages[errName](mess)[0], name = errName, message= messages[errName](mess)[1]
)
|
capitalch/bika
|
dev/KaterServer/data_handlers/graphql_sub_worker.py
|
graphql_sub_worker.py
|
py
| 1,467 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36578088832
|
# MIT 6.001 pset 1c
total_cost = 1000000.0
portion_down_payment = 0.25
total_down_payment = total_cost * portion_down_payment
current_savings = 0.0
r = 0.04
base_annual_sallary = 0.0
semi_annual_raise = 0.07
best_saving_rate = 0.0
money_range = 100.0
months = 36
init_upper = 10000
upper_bound = init_upper
lower_bound = 0
portion_saved = (upper_bound + lower_bound) / 2
steps = 0
base_annual_sallary = float(input("Whats your annual sallary? "))
while abs(current_savings - total_down_payment) > money_range:
steps += 1
current_savings = 0.0
annual_sallary = base_annual_sallary
monthly_salary = annual_sallary / 12
monthly_deposit = monthly_salary * (portion_saved / 10000)
for month in range(1, months + 1):
current_savings += current_savings * (r/12)
current_savings += monthly_deposit
if month % 6 == 0:
annual_sallary += annual_sallary * semi_annual_raise
monthly_salary = annual_sallary / 12
monthly_deposit = monthly_salary * (portion_saved / 10000)
prev_portion_saved = portion_saved
if current_savings > total_down_payment:
upper_bound = portion_saved
else:
lower_bound = portion_saved
portion_saved = int(round((upper_bound + lower_bound) / 2))
if prev_portion_saved == portion_saved:
break
if prev_portion_saved == portion_saved and portion_saved == init_upper:
print("it is not possible to pay the house in three years")
else:
print("Best savings rate is", portion_saved / 10000)
print("Steps in bisection search:", steps)
|
1kaLn/MIT-60001
|
pset1/ps1c.py
|
ps1c.py
|
py
| 1,599 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4714847905
|
import requests
import ast
import sys
import getopt
class XkcdClient():
def api_call(self, url):
self.urls = url
r = requests.get(url = self.urls)
byte_str = r.content
dict_str = byte_str.decode("UTF-8")
my_data = ast.literal_eval(dict_str)
return my_data
def get_image(self,img_url):
self.img_name = img_url.split('/')[-1]
img_data = requests.get(img_url).content
with open(self.img_name, 'wb') as handler:
handler.write(img_data)
# client = XkcdClient()
# response = client.api_call('https://xkcd.com/info.0.json')
# print(response)
if __name__ == '__main__':
cmd_line_args = sys.argv[1:]
unix_args = 'hn:os'
gnu_args = ['help','comicnum=','print','save-image']
oplist, args = getopt.getopt(cmd_line_args,unix_args,gnu_args)
print(args) #Extra arguments that are not part of the uni_args or gnu_args
print(oplist) #oplist is a list of tuples
comic_num = ''
client = XkcdClient()
url_latest = 'https://xkcd.com/info.0.json'
for opt, arg in oplist:
print(opt)
print(arg)
if opt == '-h' or opt == '--help':
print('help message')
print('Use -n or --comicnum to specify the comic number you want use 0 as argument for latest comic')
print('Use -o or --print to get info in text/json format')
print('Use -s or --save-image to download image in this directory')
elif opt == '-n' or opt == '--comicnum':
comic_num = arg
if comic_num is '0': #default get the latest comic
print('Get the comic number ' + str(arg))
response = client.api_call(url_latest)
print(response)
else:
url_specific = 'http://xkcd.com/'+arg+'/info.0.json'
response = client.api_call(url_specific)
elif opt == '-o' or opt == '--print':
if comic_num:
if comic_num is '0':
print('print output in format json/text')
print(response)
else:
print('The output in json/text is')
print(response)
else:
print('Set the -n parameter first')
elif opt == '-s' or opt == '--save-image':
if comic_num:
img_url = response['img']
client.get_image(img_url)
else:
print('Set the -n parameter first')
|
nishantasarma/XkcdClientApp
|
client.py
|
client.py
|
py
| 2,527 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31533763236
|
groups_number = int(input())
total_people = 0
musala_people = 0
mont_blanc_people = 0
kilimanjaro_people = 0
k2_people = 0
everest_people = 0
percent_musala = 0
percent_mont_blanc = 0
percent_kilimanjaro = 0
percent_k2 = 0
percent_everest = 0
for group in range(groups_number):
current_people = int(input())
if current_people <= 5:
musala_people += current_people
elif current_people <= 12:
mont_blanc_people += current_people
elif current_people <= 25:
kilimanjaro_people += current_people
elif current_people <= 40:
k2_people += current_people
elif current_people > 40:
everest_people += current_people
total_people = musala_people + mont_blanc_people + kilimanjaro_people\
+ k2_people + everest_people
percent_musala = musala_people / total_people * 100
percent_mont_blanc = mont_blanc_people / total_people * 100
percent_kilimanjaro = kilimanjaro_people / total_people * 100
percent_k2 = k2_people / total_people * 100
percent_everest = everest_people / total_people * 100
print(f"{percent_musala:.2f}%")
print(f"{percent_mont_blanc:.2f}%")
print(f"{percent_kilimanjaro:.2f}%")
print(f"{percent_k2:.2f}%")
print(f"{percent_everest:.2f}%")
|
iliyan-pigeon/Soft-uni-Courses
|
programming_basics_python/exams/exam_march_2020/trekking_mania.py
|
trekking_mania.py
|
py
| 1,210 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30827675825
|
import os
import sys
import json
import logging
from time import time
from PyQt5.Qt import PYQT_VERSION_STR
from PyQt5.QtCore import (
QT_VERSION_STR, QStandardPaths, QSysInfo, QLocale, QLibraryInfo,
QTranslator
)
from novelwriter.error import logException, formatException
from novelwriter.common import splitVersionNumber, formatTimeStamp, NWConfigParser
from novelwriter.constants import nwFiles, nwUnicode
logger = logging.getLogger(__name__)
class Config:
LANG_NW = 1
LANG_PROJ = 2
def __init__(self):
# Set Application Variables
self.appName = "novelWriter"
self.appHandle = self.appName.lower()
# Config Error Handling
self.hasError = False # True if the config class encountered an error
self.errData = [] # List of error messages
# Set Paths
self.cmdOpen = None # Path from command line for project to be opened on launch
self.confPath = None # Folder where the config is saved
self.confFile = None # The config file name
self.dataPath = None # Folder where app data is stored
self.lastPath = None # The last user-selected folder (browse dialogs)
self.appPath = None # The full path to the novelwriter package folder
self.appRoot = None # The full path to the novelwriter root folder
self.appIcon = None # The full path to the novelwriter icon file
self.assetPath = None # The full path to the novelwriter/assets folder
self.pdfDocs = None # The location of the PDF manual, if it exists
# Runtime Settings and Variables
self.confChanged = False # True whenever the config has chenged, false after save
# General
self.guiTheme = "" # GUI theme
self.guiSyntax = "" # Syntax theme
self.guiIcons = "" # Icon theme
self.guiFont = "" # Defaults to system default font
self.guiFontSize = 11 # Is overridden if system default is loaded
self.guiScale = 1.0 # Set automatically by Theme class
self.lastNotes = "0x0" # The latest release notes that have been shown
self.setDefaultGuiTheme()
self.setDefaultSyntaxTheme()
self.setDefaultIconTheme()
# Localisation
self.qLocal = QLocale.system()
self.guiLang = self.qLocal.name()
self.qtLangPath = QLibraryInfo.location(QLibraryInfo.TranslationsPath)
self.nwLangPath = None
self.qtTrans = {}
# Sizes
self.winGeometry = [1200, 650]
self.prefGeometry = [700, 615]
self.treeColWidth = [200, 50, 30]
self.novelColWidth = [200, 50]
self.projColWidth = [200, 60, 140]
self.mainPanePos = [300, 800]
self.docPanePos = [400, 400]
self.viewPanePos = [500, 150]
self.outlnPanePos = [500, 150]
self.isFullScreen = False
# Features
self.hideVScroll = False # Hide vertical scroll bars on main widgets
self.hideHScroll = False # Hide horizontal scroll bars on main widgets
self.emphLabels = True # Add emphasis to H1 and H2 item labels
# Project
self.autoSaveProj = 60 # Interval for auto-saving project in seconds
self.autoSaveDoc = 30 # Interval for auto-saving document in seconds
# Text Editor
self.textFont = None # Editor font
self.textSize = 12 # Editor font size
self.textWidth = 600 # Editor text width
self.textMargin = 40 # Editor/viewer text margin
self.tabWidth = 40 # Editor tabulator width
self.focusWidth = 800 # Focus Mode text width
self.hideFocusFooter = False # Hide document footer in Focus Mode
self.showFullPath = True # Show full document path in editor header
self.autoSelect = True # Auto-select word when applying format with no selection
self.doJustify = False # Justify text
self.showTabsNSpaces = False # Show tabs and spaces in edior
self.showLineEndings = False # Show line endings in editor
self.showMultiSpaces = True # Highlight multiple spaces in the text
self.doReplace = True # Enable auto-replace as you type
self.doReplaceSQuote = True # Smart single quotes
self.doReplaceDQuote = True # Smart double quotes
self.doReplaceDash = True # Replace multiple hyphens with dashes
self.doReplaceDots = True # Replace three dots with ellipsis
self.scrollPastEnd = 25 # Number of lines to scroll past end of document
self.autoScroll = False # Typewriter-like scrolling
self.autoScrollPos = 30 # Start point for typewriter-like scrolling
self.wordCountTimer = 5.0 # Interval for word count update in seconds
self.bigDocLimit = 800 # Size threshold for heavy editor features in kilobytes
self.incNotesWCount = True # The status bar word count includes notes
self.highlightQuotes = True # Highlight text in quotes
self.allowOpenSQuote = False # Allow open-ended single quotes
self.allowOpenDQuote = True # Allow open-ended double quotes
self.highlightEmph = True # Add colour to text emphasis
self.stopWhenIdle = True # Stop the status bar clock when the user is idle
self.userIdleTime = 300 # Time of inactivity to consider user idle
# User-Selected Symbols
self.fmtApostrophe = nwUnicode.U_RSQUO
self.fmtSingleQuotes = [nwUnicode.U_LSQUO, nwUnicode.U_RSQUO]
self.fmtDoubleQuotes = [nwUnicode.U_LDQUO, nwUnicode.U_RDQUO]
self.fmtPadBefore = ""
self.fmtPadAfter = ""
self.fmtPadThin = False
# Spell Checking
self.spellLanguage = None
# Search Bar Switches
self.searchCase = False
self.searchWord = False
self.searchRegEx = False
self.searchLoop = False
self.searchNextFile = False
self.searchMatchCap = False
# Backup
self.backupPath = ""
self.backupOnClose = False
self.askBeforeBackup = True
# State
self.showRefPanel = True # The reference panel for the viewer is visible
self.viewComments = True # Comments are shown in the viewer
self.viewSynopsis = True # Synopsis is shown in the viewer
# Check Qt5 Versions
verQt = splitVersionNumber(QT_VERSION_STR)
self.verQtString = QT_VERSION_STR
self.verQtMajor = verQt[0]
self.verQtMinor = verQt[1]
self.verQtPatch = verQt[2]
self.verQtValue = verQt[3]
verQt = splitVersionNumber(PYQT_VERSION_STR)
self.verPyQtString = PYQT_VERSION_STR
self.verPyQtMajor = verQt[0]
self.verPyQtMinor = verQt[1]
self.verPyQtPatch = verQt[2]
self.verPyQtValue = verQt[3]
# Check Python Version
self.verPyString = sys.version.split()[0]
self.verPyMajor = sys.version_info[0]
self.verPyMinor = sys.version_info[1]
self.verPyPatch = sys.version_info[2]
self.verPyHexVal = sys.hexversion
# Check OS Type
self.osType = sys.platform
self.osLinux = False
self.osWindows = False
self.osDarwin = False
self.osUnknown = False
if self.osType.startswith("linux"):
self.osLinux = True
elif self.osType.startswith("darwin"):
self.osDarwin = True
elif self.osType.startswith("win32"):
self.osWindows = True
elif self.osType.startswith("cygwin"):
self.osWindows = True
else:
self.osUnknown = True
# Other System Info
self.hostName = "Unknown"
self.kernelVer = "Unknown"
# Packages
self.hasEnchant = False # The pyenchant package
# Recent Cache
self.recentProj = {}
return
##
# Methods
##
def pxInt(self, theSize):
"""Used to scale fixed gui sizes by the screen scale factor.
This function returns an int, which is always rounded down.
"""
return int(theSize*self.guiScale)
def rpxInt(self, theSize):
"""Used to un-scale fixed gui sizes by the screen scale factor.
This function returns an int, which is always rounded down.
"""
return int(theSize/self.guiScale)
##
# Config Actions
##
def initConfig(self, confPath=None, dataPath=None):
"""Initialise the config class. The manual setting of confPath
and dataPath is mainly intended for the test suite.
"""
logger.debug("Initialising Config ...")
if confPath is None:
confRoot = QStandardPaths.writableLocation(QStandardPaths.ConfigLocation)
self.confPath = os.path.join(os.path.abspath(confRoot), self.appHandle)
else:
logger.info("Setting config from alternative path: %s", confPath)
self.confPath = confPath
if dataPath is None:
if self.verQtValue >= 50400:
dataRoot = QStandardPaths.writableLocation(QStandardPaths.AppDataLocation)
else:
dataRoot = QStandardPaths.writableLocation(QStandardPaths.DataLocation)
self.dataPath = os.path.join(os.path.abspath(dataRoot), self.appHandle)
else:
logger.info("Setting data path from alternative path: %s", dataPath)
self.dataPath = dataPath
logger.verbose("Config path: %s", self.confPath)
logger.verbose("Data path: %s", self.dataPath)
# Check Data Path Subdirs
dataDirs = ["syntax", "themes"]
for dataDir in dataDirs:
dirPath = os.path.join(self.dataPath, dataDir)
if not os.path.isdir(dirPath):
try:
os.mkdir(dirPath)
logger.info("Created folder: %s", dirPath)
except Exception:
logger.error("Could not create folder: %s", dirPath)
logException()
self.confFile = self.appHandle+".conf"
self.lastPath = os.path.expanduser("~")
self.appPath = getattr(sys, "_MEIPASS", os.path.abspath(os.path.dirname(__file__)))
self.appRoot = os.path.abspath(os.path.join(self.appPath, os.path.pardir))
if os.path.isfile(self.appRoot):
# novelWriter is packaged as a single file, so the app and
# root paths are the same, and equal to the folder that
# contains the single executable.
self.appRoot = os.path.dirname(self.appRoot)
self.appPath = self.appRoot
# Assets
self.assetPath = os.path.join(self.appPath, "assets")
self.appIcon = os.path.join(self.assetPath, "icons", "novelwriter.svg")
# Internationalisation
self.nwLangPath = os.path.join(self.assetPath, "i18n")
logger.debug("Assets: %s", self.assetPath)
logger.verbose("App path: %s", self.appPath)
logger.verbose("Last path: %s", self.lastPath)
# If the config folder does not exist, create it.
# This assumes that the os config folder itself exists.
if not os.path.isdir(self.confPath):
try:
os.mkdir(self.confPath)
except Exception as exc:
logger.error("Could not create folder: %s", self.confPath)
logException()
self.hasError = True
self.errData.append("Could not create folder: %s" % self.confPath)
self.errData.append(formatException(exc))
self.confPath = None
# Check if config file exists
if self.confPath is not None:
if os.path.isfile(os.path.join(self.confPath, self.confFile)):
# If it exists, load it
self.loadConfig()
else:
# If it does not exist, save a copy of the default values
self.saveConfig()
# If the data folder does not exist, create it.
# This assumes that the os data folder itself exists.
if self.dataPath is not None:
if not os.path.isdir(self.dataPath):
try:
os.mkdir(self.dataPath)
except Exception as exc:
logger.error("Could not create folder: %s", self.dataPath)
logException()
self.hasError = True
self.errData.append("Could not create folder: %s" % self.dataPath)
self.errData.append(formatException(exc))
self.dataPath = None
# Host and Kernel
if self.verQtValue >= 50600:
self.hostName = QSysInfo.machineHostName()
self.kernelVer = QSysInfo.kernelVersion()
# Load recent projects cache
self.loadRecentCache()
# Check the availability of optional packages
self._checkOptionalPackages()
if self.spellLanguage is None:
self.spellLanguage = "en"
# Look for a PDF version of the manual
pdfDocs = os.path.join(self.assetPath, "manual.pdf")
if os.path.isfile(pdfDocs):
logger.debug("Found manual: %s", pdfDocs)
self.pdfDocs = pdfDocs
logger.debug("Config initialisation complete")
return True
def initLocalisation(self, nwApp):
"""Initialise the localisation of the GUI.
"""
self.qLocal = QLocale(self.guiLang)
QLocale.setDefault(self.qLocal)
self.qtTrans = {}
langList = [
(self.qtLangPath, "qtbase"), # Qt 5.x
(self.nwLangPath, "qtbase"), # Alternative Qt 5.x
(self.nwLangPath, "nw"), # novelWriter
]
for lngPath, lngBase in langList:
for lngCode in self.qLocal.uiLanguages():
qTrans = QTranslator()
lngFile = "%s_%s" % (lngBase, lngCode.replace("-", "_"))
if lngFile not in self.qtTrans:
if qTrans.load(lngFile, lngPath):
logger.debug("Loaded: %s", os.path.join(lngPath, lngFile))
nwApp.installTranslator(qTrans)
self.qtTrans[lngFile] = qTrans
return
def listLanguages(self, lngSet):
"""List localisation files in the i18n folder. The default GUI
language 'en_GB' is British English.
"""
if lngSet == self.LANG_NW:
fPre = "nw_"
fExt = ".qm"
langList = {"en_GB": QLocale("en_GB").nativeLanguageName().title()}
elif lngSet == self.LANG_PROJ:
fPre = "project_"
fExt = ".json"
langList = {"en_GB": QLocale("en_GB").nativeLanguageName().title()}
else:
return []
for qmFile in os.listdir(self.nwLangPath):
if not os.path.isfile(os.path.join(self.nwLangPath, qmFile)):
continue
if not qmFile.startswith(fPre) or not qmFile.endswith(fExt):
continue
qmLang = qmFile[len(fPre):-len(fExt)]
qmName = QLocale(qmLang).nativeLanguageName().title()
if qmLang and qmName and qmLang != "en_GB":
langList[qmLang] = qmName
return sorted(langList.items(), key=lambda x: x[0])
def loadConfig(self):
"""Load preferences from file and replace default settings.
"""
logger.debug("Loading config file")
if self.confPath is None:
return False
theConf = NWConfigParser()
cnfPath = os.path.join(self.confPath, self.confFile)
try:
with open(cnfPath, mode="r", encoding="utf-8") as inFile:
theConf.read_file(inFile)
except Exception as exc:
logger.error("Could not load config file")
logException()
self.hasError = True
self.errData.append("Could not load config file")
self.errData.append(formatException(exc))
return False
# Main
cnfSec = "Main"
self.guiTheme = theConf.rdStr(cnfSec, "theme", self.guiTheme)
self.guiSyntax = theConf.rdStr(cnfSec, "syntax", self.guiSyntax)
self.guiIcons = theConf.rdStr(cnfSec, "icons", self.guiIcons)
self.guiFont = theConf.rdStr(cnfSec, "guifont", self.guiFont)
self.guiFontSize = theConf.rdInt(cnfSec, "guifontsize", self.guiFontSize)
self.lastNotes = theConf.rdStr(cnfSec, "lastnotes", self.lastNotes)
self.guiLang = theConf.rdStr(cnfSec, "guilang", self.guiLang)
self.hideVScroll = theConf.rdBool(cnfSec, "hidevscroll", self.hideVScroll)
self.hideHScroll = theConf.rdBool(cnfSec, "hidehscroll", self.hideHScroll)
# Sizes
cnfSec = "Sizes"
self.winGeometry = theConf.rdIntList(cnfSec, "geometry", self.winGeometry)
self.prefGeometry = theConf.rdIntList(cnfSec, "preferences", self.prefGeometry)
self.treeColWidth = theConf.rdIntList(cnfSec, "treecols", self.treeColWidth)
self.novelColWidth = theConf.rdIntList(cnfSec, "novelcols", self.novelColWidth)
self.projColWidth = theConf.rdIntList(cnfSec, "projcols", self.projColWidth)
self.mainPanePos = theConf.rdIntList(cnfSec, "mainpane", self.mainPanePos)
self.docPanePos = theConf.rdIntList(cnfSec, "docpane", self.docPanePos)
self.viewPanePos = theConf.rdIntList(cnfSec, "viewpane", self.viewPanePos)
self.outlnPanePos = theConf.rdIntList(cnfSec, "outlinepane", self.outlnPanePos)
self.isFullScreen = theConf.rdBool(cnfSec, "fullscreen", self.isFullScreen)
# Project
cnfSec = "Project"
self.autoSaveProj = theConf.rdInt(cnfSec, "autosaveproject", self.autoSaveProj)
self.autoSaveDoc = theConf.rdInt(cnfSec, "autosavedoc", self.autoSaveDoc)
self.emphLabels = theConf.rdBool(cnfSec, "emphlabels", self.emphLabels)
# Editor
cnfSec = "Editor"
self.textFont = theConf.rdStr(cnfSec, "textfont", self.textFont)
self.textSize = theConf.rdInt(cnfSec, "textsize", self.textSize)
self.textWidth = theConf.rdInt(cnfSec, "width", self.textWidth)
self.textMargin = theConf.rdInt(cnfSec, "margin", self.textMargin)
self.tabWidth = theConf.rdInt(cnfSec, "tabwidth", self.tabWidth)
self.focusWidth = theConf.rdInt(cnfSec, "focuswidth", self.focusWidth)
self.hideFocusFooter = theConf.rdBool(cnfSec, "hidefocusfooter", self.hideFocusFooter)
self.doJustify = theConf.rdBool(cnfSec, "justify", self.doJustify)
self.autoSelect = theConf.rdBool(cnfSec, "autoselect", self.autoSelect)
self.doReplace = theConf.rdBool(cnfSec, "autoreplace", self.doReplace)
self.doReplaceSQuote = theConf.rdBool(cnfSec, "repsquotes", self.doReplaceSQuote)
self.doReplaceDQuote = theConf.rdBool(cnfSec, "repdquotes", self.doReplaceDQuote)
self.doReplaceDash = theConf.rdBool(cnfSec, "repdash", self.doReplaceDash)
self.doReplaceDots = theConf.rdBool(cnfSec, "repdots", self.doReplaceDots)
self.scrollPastEnd = theConf.rdInt(cnfSec, "scrollpastend", self.scrollPastEnd)
self.autoScroll = theConf.rdBool(cnfSec, "autoscroll", self.autoScroll)
self.autoScrollPos = theConf.rdInt(cnfSec, "autoscrollpos", self.autoScrollPos)
self.fmtSingleQuotes = theConf.rdStrList(cnfSec, "fmtsinglequote", self.fmtSingleQuotes)
self.fmtDoubleQuotes = theConf.rdStrList(cnfSec, "fmtdoublequote", self.fmtDoubleQuotes)
self.fmtPadBefore = theConf.rdStr(cnfSec, "fmtpadbefore", self.fmtPadBefore)
self.fmtPadAfter = theConf.rdStr(cnfSec, "fmtpadafter", self.fmtPadAfter)
self.fmtPadThin = theConf.rdBool(cnfSec, "fmtpadthin", self.fmtPadThin)
self.spellLanguage = theConf.rdStr(cnfSec, "spellcheck", self.spellLanguage)
self.showTabsNSpaces = theConf.rdBool(cnfSec, "showtabsnspaces", self.showTabsNSpaces)
self.showLineEndings = theConf.rdBool(cnfSec, "showlineendings", self.showLineEndings)
self.showMultiSpaces = theConf.rdBool(cnfSec, "showmultispaces", self.showMultiSpaces)
self.wordCountTimer = theConf.rdFlt(cnfSec, "wordcounttimer", self.wordCountTimer)
self.bigDocLimit = theConf.rdInt(cnfSec, "bigdoclimit", self.bigDocLimit)
self.incNotesWCount = theConf.rdBool(cnfSec, "incnoteswcount", self.incNotesWCount)
self.showFullPath = theConf.rdBool(cnfSec, "showfullpath", self.showFullPath)
self.highlightQuotes = theConf.rdBool(cnfSec, "highlightquotes", self.highlightQuotes)
self.allowOpenSQuote = theConf.rdBool(cnfSec, "allowopensquote", self.allowOpenSQuote)
self.allowOpenDQuote = theConf.rdBool(cnfSec, "allowopendquote", self.allowOpenDQuote)
self.highlightEmph = theConf.rdBool(cnfSec, "highlightemph", self.highlightEmph)
self.stopWhenIdle = theConf.rdBool(cnfSec, "stopwhenidle", self.stopWhenIdle)
self.userIdleTime = theConf.rdInt(cnfSec, "useridletime", self.userIdleTime)
# Backup
cnfSec = "Backup"
self.backupPath = theConf.rdStr(cnfSec, "backuppath", self.backupPath)
self.backupOnClose = theConf.rdBool(cnfSec, "backuponclose", self.backupOnClose)
self.askBeforeBackup = theConf.rdBool(cnfSec, "askbeforebackup", self.askBeforeBackup)
# State
cnfSec = "State"
self.showRefPanel = theConf.rdBool(cnfSec, "showrefpanel", self.showRefPanel)
self.viewComments = theConf.rdBool(cnfSec, "viewcomments", self.viewComments)
self.viewSynopsis = theConf.rdBool(cnfSec, "viewsynopsis", self.viewSynopsis)
self.searchCase = theConf.rdBool(cnfSec, "searchcase", self.searchCase)
self.searchWord = theConf.rdBool(cnfSec, "searchword", self.searchWord)
self.searchRegEx = theConf.rdBool(cnfSec, "searchregex", self.searchRegEx)
self.searchLoop = theConf.rdBool(cnfSec, "searchloop", self.searchLoop)
self.searchNextFile = theConf.rdBool(cnfSec, "searchnextfile", self.searchNextFile)
self.searchMatchCap = theConf.rdBool(cnfSec, "searchmatchcap", self.searchMatchCap)
# Path
cnfSec = "Path"
self.lastPath = theConf.rdStr(cnfSec, "lastpath", self.lastPath)
# Check Certain Values for None
self.spellLanguage = self._checkNone(self.spellLanguage)
# If we're using straight quotes, disable auto-replace
if self.fmtSingleQuotes == ["'", "'"] and self.doReplaceSQuote:
logger.info("Using straight single quotes, so disabling auto-replace")
self.doReplaceSQuote = False
if self.fmtDoubleQuotes == ['"', '"'] and self.doReplaceDQuote:
logger.info("Using straight double quotes, so disabling auto-replace")
self.doReplaceDQuote = False
# Check deprecated settings
if self.guiIcons in ("typicons_colour_dark", "typicons_grey_dark"):
self.guiIcons = "typicons_dark"
elif self.guiIcons in ("typicons_colour_light", "typicons_grey_light"):
self.guiIcons = "typicons_light"
return True
def saveConfig(self):
"""Save the current preferences to file.
"""
logger.debug("Saving config file")
if self.confPath is None:
return False
theConf = NWConfigParser()
theConf["Main"] = {
"timestamp": formatTimeStamp(time()),
"theme": str(self.guiTheme),
"syntax": str(self.guiSyntax),
"icons": str(self.guiIcons),
"guifont": str(self.guiFont),
"guifontsize": str(self.guiFontSize),
"lastnotes": str(self.lastNotes),
"guilang": str(self.guiLang),
"hidevscroll": str(self.hideVScroll),
"hidehscroll": str(self.hideHScroll),
}
theConf["Sizes"] = {
"geometry": self._packList(self.winGeometry),
"preferences": self._packList(self.prefGeometry),
"treecols": self._packList(self.treeColWidth),
"novelcols": self._packList(self.novelColWidth),
"projcols": self._packList(self.projColWidth),
"mainpane": self._packList(self.mainPanePos),
"docpane": self._packList(self.docPanePos),
"viewpane": self._packList(self.viewPanePos),
"outlinepane": self._packList(self.outlnPanePos),
"fullscreen": str(self.isFullScreen),
}
theConf["Project"] = {
"autosaveproject": str(self.autoSaveProj),
"autosavedoc": str(self.autoSaveDoc),
"emphlabels": str(self.emphLabels),
}
theConf["Editor"] = {
"textfont": str(self.textFont),
"textsize": str(self.textSize),
"width": str(self.textWidth),
"margin": str(self.textMargin),
"tabwidth": str(self.tabWidth),
"focuswidth": str(self.focusWidth),
"hidefocusfooter": str(self.hideFocusFooter),
"justify": str(self.doJustify),
"autoselect": str(self.autoSelect),
"autoreplace": str(self.doReplace),
"repsquotes": str(self.doReplaceSQuote),
"repdquotes": str(self.doReplaceDQuote),
"repdash": str(self.doReplaceDash),
"repdots": str(self.doReplaceDots),
"scrollpastend": str(self.scrollPastEnd),
"autoscroll": str(self.autoScroll),
"autoscrollpos": str(self.autoScrollPos),
"fmtsinglequote": self._packList(self.fmtSingleQuotes),
"fmtdoublequote": self._packList(self.fmtDoubleQuotes),
"fmtpadbefore": str(self.fmtPadBefore),
"fmtpadafter": str(self.fmtPadAfter),
"fmtpadthin": str(self.fmtPadThin),
"spellcheck": str(self.spellLanguage),
"showtabsnspaces": str(self.showTabsNSpaces),
"showlineendings": str(self.showLineEndings),
"showmultispaces": str(self.showMultiSpaces),
"wordcounttimer": str(self.wordCountTimer),
"bigdoclimit": str(self.bigDocLimit),
"incnoteswcount": str(self.incNotesWCount),
"showfullpath": str(self.showFullPath),
"highlightquotes": str(self.highlightQuotes),
"allowopensquote": str(self.allowOpenSQuote),
"allowopendquote": str(self.allowOpenDQuote),
"highlightemph": str(self.highlightEmph),
"stopwhenidle": str(self.stopWhenIdle),
"useridletime": str(self.userIdleTime),
}
theConf["Backup"] = {
"backuppath": str(self.backupPath),
"backuponclose": str(self.backupOnClose),
"askbeforebackup": str(self.askBeforeBackup),
}
theConf["State"] = {
"showrefpanel": str(self.showRefPanel),
"viewcomments": str(self.viewComments),
"viewsynopsis": str(self.viewSynopsis),
"searchcase": str(self.searchCase),
"searchword": str(self.searchWord),
"searchregex": str(self.searchRegEx),
"searchloop": str(self.searchLoop),
"searchnextfile": str(self.searchNextFile),
"searchmatchcap": str(self.searchMatchCap),
}
theConf["Path"] = {
"lastpath": str(self.lastPath),
}
# Write config file
cnfPath = os.path.join(self.confPath, self.confFile)
try:
with open(cnfPath, mode="w", encoding="utf-8") as outFile:
theConf.write(outFile)
self.confChanged = False
except Exception as exc:
logger.error("Could not save config file")
logException()
self.hasError = True
self.errData.append("Could not save config file")
self.errData.append(formatException(exc))
return False
return True
def loadRecentCache(self):
"""Load the cache file for recent projects.
"""
if self.dataPath is None:
return False
self.recentProj = {}
cacheFile = os.path.join(self.dataPath, nwFiles.RECENT_FILE)
if not os.path.isfile(cacheFile):
return True
try:
with open(cacheFile, mode="r", encoding="utf-8") as inFile:
theData = json.load(inFile)
for projPath, theEntry in theData.items():
self.recentProj[projPath] = {
"title": theEntry.get("title", ""),
"time": theEntry.get("time", 0),
"words": theEntry.get("words", 0),
}
except Exception as exc:
self.hasError = True
self.errData.append("Could not load recent project cache")
self.errData.append(formatException(exc))
return False
return True
def saveRecentCache(self):
"""Save the cache dictionary of recent projects.
"""
if self.dataPath is None:
return False
cacheFile = os.path.join(self.dataPath, nwFiles.RECENT_FILE)
cacheTemp = os.path.join(self.dataPath, nwFiles.RECENT_FILE+"~")
try:
with open(cacheTemp, mode="w+", encoding="utf-8") as outFile:
json.dump(self.recentProj, outFile, indent=2)
except Exception as exc:
self.hasError = True
self.errData.append("Could not save recent project cache")
self.errData.append(formatException(exc))
return False
if os.path.isfile(cacheFile):
os.unlink(cacheFile)
os.rename(cacheTemp, cacheFile)
return True
def updateRecentCache(self, projPath, projTitle, wordCount, saveTime):
"""Add or update recent cache information on a given project.
"""
self.recentProj[os.path.abspath(projPath)] = {
"title": projTitle,
"time": int(saveTime),
"words": int(wordCount),
}
return True
def removeFromRecentCache(self, thePath):
"""Trying to remove a path from the recent projects cache.
"""
if thePath in self.recentProj:
del self.recentProj[thePath]
logger.verbose("Removed recent: %s", thePath)
self.saveRecentCache()
else:
logger.error("Unknown recent: %s", thePath)
return False
return True
##
# Setters
##
def setConfPath(self, newPath):
"""Set the path and filename to the config file.
"""
if newPath is None:
return True
if not os.path.isfile(newPath):
logger.error("File not found, using default config path instead")
return False
self.confPath = os.path.dirname(newPath)
self.confFile = os.path.basename(newPath)
return True
def setDataPath(self, newPath):
"""Set the data path.
"""
if newPath is None:
return True
if not os.path.isdir(newPath):
logger.error("Path not found, using default data path instead")
return False
self.dataPath = os.path.abspath(newPath)
return True
def setLastPath(self, lastPath):
"""Set the last used path (by the user).
"""
if lastPath is None or lastPath == "":
self.lastPath = ""
else:
self.lastPath = os.path.dirname(lastPath)
return True
def setWinSize(self, newWidth, newHeight):
"""Set the size of the main window, but only if the change is
larger than 5 pixels. The OS window manager will sometimes
adjust it a bit, and we don't want the main window to shrink or
grow each time the app is opened.
"""
newWidth = int(newWidth/self.guiScale)
newHeight = int(newHeight/self.guiScale)
if abs(self.winGeometry[0] - newWidth) > 5:
self.winGeometry[0] = newWidth
self.confChanged = True
if abs(self.winGeometry[1] - newHeight) > 5:
self.winGeometry[1] = newHeight
self.confChanged = True
return True
def setPreferencesSize(self, newWidth, newHeight):
"""Sat the size of the Preferences dialog window.
"""
self.prefGeometry[0] = int(newWidth/self.guiScale)
self.prefGeometry[1] = int(newHeight/self.guiScale)
self.confChanged = True
return True
def setTreeColWidths(self, colWidths):
"""Set the column widths of the main project tree.
"""
self.treeColWidth = [int(x/self.guiScale) for x in colWidths]
self.confChanged = True
return True
def setNovelColWidths(self, colWidths):
"""Set the column widths of the novel tree.
"""
self.novelColWidth = [int(x/self.guiScale) for x in colWidths]
self.confChanged = True
return True
def setProjColWidths(self, colWidths):
"""Set the column widths of the Load Project dialog.
"""
self.projColWidth = [int(x/self.guiScale) for x in colWidths]
self.confChanged = True
return True
def setMainPanePos(self, panePos):
"""Set the position of the main GUI splitter.
"""
self.mainPanePos = [int(x/self.guiScale) for x in panePos]
self.confChanged = True
return True
def setDocPanePos(self, panePos):
"""Set the position of the main editor/viewer splitter.
"""
self.docPanePos = [int(x/self.guiScale) for x in panePos]
self.confChanged = True
return True
def setViewPanePos(self, panePos):
"""Set the position of the viewer meta data splitter.
"""
self.viewPanePos = [int(x/self.guiScale) for x in panePos]
self.confChanged = True
return True
def setOutlinePanePos(self, panePos):
"""Set the position of the outline details splitter.
"""
self.outlnPanePos = [int(x/self.guiScale) for x in panePos]
self.confChanged = True
return True
def setShowRefPanel(self, checkState):
"""Set the visibility state of the reference panel.
"""
self.showRefPanel = checkState
self.confChanged = True
return self.showRefPanel
def setViewComments(self, viewState):
"""Set the visibility state of comments in the viewer.
"""
self.viewComments = viewState
self.confChanged = True
return self.viewComments
def setViewSynopsis(self, viewState):
"""Set the visibility state of synopsis comments in the viewer.
"""
self.viewSynopsis = viewState
self.confChanged = True
return self.viewSynopsis
##
# Default Setters
##
def setDefaultGuiTheme(self):
"""Reset the GUI theme to default value.
"""
self.guiTheme = "default"
def setDefaultSyntaxTheme(self):
"""Reset the syntax theme to default value.
"""
self.guiSyntax = "default_light"
def setDefaultIconTheme(self):
"""Reset the icon theme to default value.
"""
self.guiIcons = "typicons_light"
##
# Getters
##
def getWinSize(self):
return [int(x*self.guiScale) for x in self.winGeometry]
def getPreferencesSize(self):
return [int(x*self.guiScale) for x in self.prefGeometry]
def getTreeColWidths(self):
return [int(x*self.guiScale) for x in self.treeColWidth]
def getNovelColWidths(self):
return [int(x*self.guiScale) for x in self.novelColWidth]
def getProjColWidths(self):
return [int(x*self.guiScale) for x in self.projColWidth]
def getMainPanePos(self):
return [int(x*self.guiScale) for x in self.mainPanePos]
def getDocPanePos(self):
return [int(x*self.guiScale) for x in self.docPanePos]
def getViewPanePos(self):
return [int(x*self.guiScale) for x in self.viewPanePos]
def getOutlinePanePos(self):
return [int(x*self.guiScale) for x in self.outlnPanePos]
def getTextWidth(self, focusMode=False):
if focusMode:
return self.pxInt(max(self.focusWidth, 200))
else:
return self.pxInt(max(self.textWidth, 200))
def getTextMargin(self):
return self.pxInt(max(self.textMargin, 0))
def getTabWidth(self):
return self.pxInt(max(self.tabWidth, 0))
def getErrData(self):
"""Compile and return error messages from the initialisation of
the Config class, and clear the error buffer.
"""
errMessage = "<br>".join(self.errData)
self.hasError = False
self.errData = []
return errMessage
##
# Internal Functions
##
def _packList(self, inData):
"""Pack a list of items into a comma-separated string for saving
to the config file.
"""
return ", ".join([str(inVal) for inVal in inData])
def _checkNone(self, checkVal):
"""Return a NoneType if the value corresponds to None, otherwise
return the value unchanged.
"""
if checkVal is None:
return None
if isinstance(checkVal, str):
if checkVal.lower() == "none":
return None
return checkVal
def _checkOptionalPackages(self):
"""Cheks if we have the optional packages used by some features.
"""
try:
import enchant # noqa: F401
self.hasEnchant = True
logger.debug("Checking package 'pyenchant': OK")
except Exception:
self.hasEnchant = False
logger.debug("Checking package 'pyenchant': Missing")
return
# END Class Config
|
vaelue/novelWriter
|
novelwriter/config.py
|
config.py
|
py
| 38,609 |
python
|
en
|
code
| null |
github-code
|
6
|
26355881815
|
# this is nima nikrouz's midterm project
#=============================================library=====================================================
from tabulate import tabulate
#=============================================library=====================================================
#=============================================roots=====================================================
realBord=(("a1","b1","c1","d1","e1","f1","g1","h1"),
("a2","b2","c2","d2","e2","f2","g2","h2"),
("a3","b3","c3","d3","e3","f3","g3","h3"),
("a4","b4","c4","d4","e4","f4","g4","h4"),
("a5","b5","c5","d5","e5","f5","g5","h5"),
("a6","b6","c6","d6","e6","f6","g6","h6"),
("a7","b7","c7","d7","e7","f7","g7","h7"),
("a8","b8","c8","d8","e8","f8","g8","h8"))
primeryBord=[["a1","b1","c1","d1","e1","f1","g1","h1"],
["a2","b2","c2","d2","e2","f2","g2","h2"],
["a3","b3","c3","d3","e3","f3","g3","h3"],
["a4","b4","c4","d4","e4","f4","g4","h4"],
["a5","b5","c5","d5","e5","f5","g5","h5"],
["a6","b6","c6","d6","e6","f6","g6","h6"],
["a7","b7","c7","d7","e7","f7","g7","h7"],
["a8","b8","c8","d8","e8","f8","g8","h8"]]
queen="Q"
full="F"
#=============================================roots=====================================================
#=============================================defs=====================================================
def column(n):
#this def is for having a list of the column we want.
column=[]
for item in range(8):
column.append(primeryBord[item][n])
return column
def orib1(mylist):
#this def is for orib list from top left to bottom right.
i=int(mylist[0])
j=int(mylist[1])
orib1=[]
while 0<=j<len(realBord) and 0<=i<len(realBord):
orib1.append(realBord[i][j])
i+=1
j+=1
i=mylist[0]-1
j=mylist[1]-1
while 0<=j<len(realBord) and 0<=i<len(realBord):
orib1.append(realBord[i][j])
i+=-1
j+=-1
return orib1
def orib2(mylist):
# this def is for orib list from top right to bottom left.
i=mylist[0]
j=mylist[1]
orib=[]
while 0<=j < len(realBord) and i < len(realBord):
orib.append(realBord[i][j])
i += 1
j -= 1
i = mylist[0] - 1
j = mylist[1] +1
while 0 <= j < len(realBord) and 0 <= i < len(realBord):
orib.append(realBord[i][j])
i -= 1
j += 1
return orib
def index(k):
#this def is for finding the index of an elemnt.
for item in range(len(realBord)):
for j in range(len(realBord)):
if realBord[item][j]==k:
return [item,j]
def check(n):
#this def is for checking if there is any queen in those areas.
i=index(n)[0]
j=index(n)[1]
sets=list(set(realBord[i]).union(set(column(j))).union(set(orib1(index(n)))).union(set(orib2(index(n)))))
for item in range(len(sets)):
if sets[item]=="Q":
return False
def emptyplace(mylist):
#this def finds empty places for placing queens.
emptyPlace1 = []
for j in range(len(mylist)):
if mylist[j] == "Q" or mylist[j] == "F":
pass
else:
if check(mylist[j]) != False:
emptyPlace1.append(mylist[j])
return emptyPlace1
def queenfinder():
#this def is a primary memory for remembering where the queen was.
queenlist=[]
for item in range(8):
for j in range(8):
if primeryBord[item][j]=="Q":
queenlist.append([item,j])
return queenlist
def queenPlacer(x):
#this def is for placing queen and which areas are not empty for other queens.
i=index(x)[0]
j=index(x)[1]
y=orib1(index(x))
z=orib2(index(x))
oribs=list(set(y)^set(z))
for item in range(8):
primeryBord[i][item]=full
for item in range(8):
primeryBord[item][j]=full
for item in range(len(oribs)):
if oribs[item]=="F" or oribs[item]=="Q":
pass
else:
inorb=index(oribs[item])
primeryBord[int(inorb[0])][int(inorb[1])]=full
primeryBord[i][j]=queen
return primeryBord
def maindef():
#this def is the main def for finding different situations.
num=1
#these loops are for placing queens.
for item1 in range(8):
queenPlacer(column(0)[item1])
for item2 in range(len(emptyplace(column(1)))):
queenPlacer(emptyplace(column(1))[item2])
for item3 in range(len(emptyplace(column(2)))):
queenPlacer(emptyplace(column(2))[item3])
for item4 in range(len(emptyplace(column(3)))):
queenPlacer(emptyplace(column(3))[item4])
for item5 in range(len(emptyplace(column(4)))):
queenPlacer(emptyplace(column(4))[item5])
for item6 in range(len(emptyplace(column(5)))):
queenPlacer(emptyplace(column(5))[item6])
for item7 in range(len(emptyplace(column(6)))):
queenPlacer(emptyplace(column(6))[item7])
for item8 in range(len(emptyplace(column(7)))):
queenPlacer(emptyplace(column(7))[item8])
print("NO.",num,":")
print(tabulate(primeryBord,headers=["a","b","c","d","e","f","g","h"],
showindex=["1 ","2 ","3 ","4 ","5 ","6 ","7 ","8 "]))
print("-----------------------------------------------","\n")
num+=1
#these loops are for change primary bord to previous level.
for item9 in range(len(column(7))):
primeryBord[item9][7]=realBord[item9][7]
for j1 in range(len(queenfinder())):
ix=int(queenfinder()[j1][0])
jx=int(queenfinder()[j1][1])
x=realBord[ix][jx]
queenPlacer(x)
for item10 in range(len(column(6))):
primeryBord[item10][6] = realBord[item10][6]
primeryBord[item10][7]=realBord[item10][7]
for j1 in range(len(queenfinder())):
ix = int(queenfinder()[j1][0])
jx = int(queenfinder()[j1][1])
x = realBord[ix][jx]
queenPlacer(x)
for item11 in range(len(column(5))):
primeryBord[item11][5]=realBord[item11][5]
primeryBord[item11][6]=realBord[item11][6]
primeryBord[item11][7]=realBord[item11][7]
for j1 in range(len(queenfinder())):
ix = int(queenfinder()[j1][0])
jx = int(queenfinder()[j1][1])
x = realBord[ix][jx]
queenPlacer(x)
for item12 in range(len(column(4))):
primeryBord[item12][4]=realBord[item12][4]
primeryBord[item12][5]=realBord[item12][5]
primeryBord[item12][6]=realBord[item12][6]
primeryBord[item12][7]=realBord[item12][7]
for j1 in range(len(queenfinder())):
ix = int(queenfinder()[j1][0])
jx = int(queenfinder()[j1][1])
x = realBord[ix][jx]
queenPlacer(x)
for item13 in range(len(column(3))):
primeryBord[item13][3]=realBord[item13][3]
primeryBord[item13][4]=realBord[item13][4]
primeryBord[item13][5]=realBord[item13][5]
primeryBord[item13][6]=realBord[item13][6]
primeryBord[item13][7]=realBord[item13][7]
for j1 in range(len(queenfinder())):
ix = int(queenfinder()[j1][0])
jx = int(queenfinder()[j1][1])
x = realBord[ix][jx]
queenPlacer(x)
for item14 in range(len(column(2))):
primeryBord[item14][2]=realBord[item14][2]
primeryBord[item14][3]=realBord[item14][3]
primeryBord[item14][4]=realBord[item14][4]
primeryBord[item14][5]=realBord[item14][5]
primeryBord[item14][6]=realBord[item14][6]
primeryBord[item14][7]=realBord[item14][7]
for j1 in range(len(queenfinder())):
ix = int(queenfinder()[j1][0])
jx = int(queenfinder()[j1][1])
x = realBord[ix][jx]
queenPlacer(x)
for item15 in range(len(column(1))):
primeryBord[item15][1]=realBord[item15][1]
primeryBord[item15][2]=realBord[item15][2]
primeryBord[item15][3]=realBord[item15][3]
primeryBord[item15][4]=realBord[item15][4]
primeryBord[item15][5]=realBord[item15][5]
primeryBord[item15][6]=realBord[item15][6]
primeryBord[item15][7]=realBord[item15][7]
for j1 in range(len(queenfinder())):
ix = int(queenfinder()[j1][0])
jx = int(queenfinder()[j1][1])
x = realBord[ix][jx]
queenPlacer(x)
for item16 in range(len(column(0))):
primeryBord[item16][0]=realBord[item16][0]
primeryBord[item16][1]=realBord[item16][1]
primeryBord[item16][2]=realBord[item16][2]
primeryBord[item16][3]=realBord[item16][3]
primeryBord[item16][4]=realBord[item16][4]
primeryBord[item16][5]=realBord[item16][5]
primeryBord[item16][6]=realBord[item16][6]
primeryBord[item16][7]=realBord[item16][7]
#=============================================defs=====================================================
#=============================================action=====================================================
maindef()
#=============================================action=====================================================
# this is nima nikrouz's midterm project
|
nimankz/8queen-project
|
midterm1.2.py
|
midterm1.2.py
|
py
| 11,209 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22657330763
|
# -----------------
# Extension Details
# -----------------
name = "Space Station"
version = "0.1"
developer = "Type Supply"
developerURL = "http://typesupply.com"
roboFontVersion = "3.2"
pycOnly = False
menuItems = [
dict(
path="menu_glyphEditorSpaceStation.py",
preferredName="Glyph Editor",
shortKey=("command", "/")
),
dict(
path="menu_fontEditorSpaceStation.py",
preferredName="Font Editor",
shortKey=""
)
]
installAfterBuild = True
# ----------------------
# Don't edit below here.
# ----------------------
from AppKit import *
import os
import shutil
from mojo.extensions import ExtensionBundle
# Convert short key modifiers.
modifierMap = {
"command": NSCommandKeyMask,
"control": NSAlternateKeyMask,
"option": NSAlternateKeyMask,
"shift": NSShiftKeyMask,
"capslock": NSAlphaShiftKeyMask,
}
for menuItem in menuItems:
shortKey = menuItem.get("shortKey")
if isinstance(shortKey, tuple):
shortKey = list(shortKey)
character = shortKey.pop(-1)
modifiers = [modifierMap.get(modifier, modifier) for modifier in shortKey]
if len(modifiers) == 1:
modifiers = modifiers[0]
else:
m = None
for modifier in modifiers:
if m is None:
m = modifier
else:
m |= modifier
modifiers = m
converted = (modifiers, character)
menuItem["shortKey"] = tuple(converted)
# Make the various paths.
basePath = os.path.dirname(__file__)
sourcePath = os.path.join(basePath, "source")
libPath = os.path.join(sourcePath, "code")
licensePath = os.path.join(basePath, "license.txt")
requirementsPath = os.path.join(basePath, "requirements.txt")
extensionFile = "%s.roboFontExt" % name
buildPath = os.path.join(basePath, "build")
extensionPath = os.path.join(buildPath, extensionFile)
# Build the extension.
B = ExtensionBundle()
B.name = name
B.developer = developer
B.developerURL = developerURL
B.version = version
B.launchAtStartUp = True
B.mainScript = "main.py"
B.html = os.path.exists(os.path.join(sourcePath, "documentation", "index.html"))
B.requiresVersionMajor = roboFontVersion.split(".")[0]
B.requiresVersionMinor = roboFontVersion.split(".")[1]
B.addToMenu = menuItems
with open(licensePath) as license:
B.license = license.read()
with open(requirementsPath) as requirements:
B.requirements = requirements.read()
print("Building extension...", end=" ")
v = B.save(extensionPath, libPath=libPath, pycOnly=pycOnly)
print("done!")
errors = B.validationErrors()
if errors:
print("Uh oh! There were errors:")
print(errors)
# Install the extension.
if installAfterBuild:
print("Installing extension...", end=" ")
installDirectory = os.path.expanduser("~/Library/Application Support/RoboFont/plugins")
installPath = os.path.join(installDirectory, extensionFile)
if os.path.exists(installPath):
shutil.rmtree(installPath)
shutil.copytree(extensionPath, installPath)
print("done!")
print("RoboFont must now be restarted.")
|
typesupply/spacestation
|
build.py
|
build.py
|
py
| 2,958 |
python
|
en
|
code
| 12 |
github-code
|
6
|
25005501771
|
import wizard
import pooler
def _check_sections(self, cr, uid, data, context):
pool = pooler.get_pool(cr.dbname)
data_obj = pool.get('ir.model.data')
sec_obj = pool.get('crm.case.section')
bug_id = sec_obj.search(cr, uid, [('code','=','BugSup')])
if not bug_id:
raise wizard.except_wizard(_('Error !'),
_('You did not installed the Bug Tracking when you configured the crm module.' \
'\nyou must create a section with the code \'BugSup\'.'
))
else:
id1 = data_obj._get_id(cr, uid, 'crm', 'crm_case_form_view')
if id1:
id1 = data_obj.browse(cr, uid, id1, context=context).res_id
return {
'domain':"[('section_id.name','=','Bug Tracking')]",
'name': _('New Bug'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'crm.case',
'view_id': False,
'views': [(id1,'form')],
'type': 'ir.actions.act_window',
}
class check_section(wizard.interface):
states = {
'init': {
'actions': [],
'result': {'type': 'action', 'action':_check_sections, 'state' : 'end'}
},
}
check_section('portal.crm.check.section')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
factorlibre/openerp-extra-6.1
|
portal_project/wizard/wizard_check_section.py
|
wizard_check_section.py
|
py
| 1,342 |
python
|
en
|
code
| 9 |
github-code
|
6
|
19208530297
|
'''
This program is used to get information from a user and
make an email from that information
'''
#asking for the user's first name and then storing it in the variable firstName
firstName = input("Enter your first name: ")
#asking for the user's last name and then storing it in the variable lastName
lastName = input("Enter your last name: ")
#asking for the user's domain name and then storing it in the variable domainName
domainName = input("Enter your domain name: ")
#creating the full email address
emailAddress = (lastName + "." + firstName + "@" + domainName)
#printing all the information to the user about their newly assigned email address
print("Hello " + firstName + ",\nYour new email address is: " + emailAddress)
|
kelvincaoyx/UTEA-PYTHON
|
Week 1/pythonUnitOnePractice/email.py
|
email.py
|
py
| 740 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11932438017
|
from env import data
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from time import sleep
#Program Isi Data
def program(data_set):
#masuk website
browser = webdriver.Chrome()
actions = ActionChains(browser)
browser.get(data['linkActive'])
print('==== Welcome To Bangef, Automated Post-Test ====');
try:
for d in data_set:
# mengecek apakah elemen input sudah ada dan mengisikannya
# Nama
WebDriverWait(browser, 10).until(
EC.presence_of_element_located((By.ID, data['selectorById'][0]))
).send_keys(d['namaLengkap'])
# Email
WebDriverWait(browser, 10).until(
EC.presence_of_element_located((By.ID, data['selectorById'][1]))
).send_keys(d['email'])
# Nomer Telepon
WebDriverWait(browser, 10).until(
EC.presence_of_element_located((By.ID, data['selectorById'][2]))
).send_keys('0'+d['noTelpon'])
# Jenis Kelamin
elementJK = browser.find_element(By.ID, data['selectorById'][3])
Select(elementJK).select_by_value(d['jk'])
# Usia
WebDriverWait(browser, 10).until(
EC.presence_of_element_located((By.ID, data['selectorById'][4]))
).send_keys(d['usia'])
# Pekerjaan
elementPekerjaan = browser.find_element(By.ID, data['selectorById'][5])
Select(elementPekerjaan).select_by_value(d['pekerjaan'])
# Komunitas
WebDriverWait(browser, 10).until(
EC.presence_of_element_located((By.ID, data['selectorById'][6]))
).send_keys(d['organisasi'])
# Pendidikan
elementPendidikan = browser.find_element(By.ID, data['selectorById'][7])
Select(elementPendidikan).select_by_value(d['pendidikan'])
# Provinsi
WebDriverWait(browser, 10).until(
EC.presence_of_element_located((By.ID, data['selectorById'][8]))
).send_keys(d['provinsi'], Keys.RETURN)
# Kota Asal
elementCities = browser.find_element(By.ID, data['selectorById'][9])
elementCities.send_keys(d['kotaAsal'], Keys.RETURN)
# Captcha
browser.execute_script("arguments[0].scrollIntoView();", elementCities)
captcha = input('Masukan validasi captcha (sample : 9*9): \n');
arr = list(captcha)
if arr[1] == '+' :
result = int(arr[0]) + int(arr[2])
else :
result = int(arr[0]) * int(arr[2])
browser.find_element(By.ID, data['selectorById'][10]).send_keys(result)
# Select Radio Button
q1 = browser.find_element(By.ID, "1070-"+d['qSatu'])
browser.execute_script("arguments[0].scrollIntoView();", q1)
sleep(.5)
q1.click()
q2 = browser.find_element(By.ID, "1071-"+d['qDua'])
browser.execute_script("arguments[0].scrollIntoView();", q2)
q2.click()
q3 = browser.find_element(By.ID, "1072-"+d['qTiga'])
browser.execute_script("arguments[0].scrollIntoView();", q3)
q3.click()
q4 = browser.find_element(By.ID, "1073-"+d['qEmpat'])
actions.move_to_element(q4).click().perform()
browser.execute_script("arguments[0].scrollIntoView();", q4)
sleep(.5)
q4.click()
q5 = browser.find_element(By.ID, "1076-"+d['qLima'])
browser.execute_script("arguments[0].scrollIntoView();", q5)
q5.click()
# submit
footer = browser.find_element(By.CSS_SELECTOR, '#__next > div > div.footer.mt-3')
browser.execute_script("arguments[0].scrollIntoView();", footer)
sleep(1)
browser.find_element(By.XPATH, data['selectorByXpath']).click()
sleep(3)
# kembali ke page sebelumnya
browser.get(data['linkActive'])
WebDriverWait(browser, 10).until(
EC.presence_of_element_located((By.ID, data['selectorById'][0]))
)
print('Data dengan atas nama '+d['namaLengkap']+' berhasil ✔️')
print('Total Data : '+ str(d['id']) +' Selesai Post Test')
except Exception as err:
print('Data Selesai Terakhir : "id": "'+str(d['id'])+'".')
print(err)
browser.quit()
|
bangef/pz
|
python/post-test/module/program.py
|
program.py
|
py
| 4,797 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34294693162
|
import random as r
class node:
def __init__(self,val) -> None:
self.data=val
self.left=None
self.right=None
class BST:
def __init__(self) -> None:
self.root=None
def insertR(self,data,root):
if root==None:
return node(data)
else:
if data<self.root.data:
root.left=self.insertR(data,root.left)
else:
root.right=self.insertR(data,root.right)
return root
def inorder(self,root):
current=root
if current==None:
return
self.inorder(current.left)
print(current.data)
self.inorder(current.right)
if __name__=='__main__':
tree=BST()
lister=[]
for i in range(12):
lister.append(r.randint(10,123))
for val in lister:
tree.root=tree.insertR(val,tree.root)
tree.inorder(tree.root)
|
farhan1503001/Data-Structures-203-IUB
|
Binary Search Tree/insertR.py
|
insertR.py
|
py
| 917 |
python
|
en
|
code
| 2 |
github-code
|
6
|
35839328750
|
import argparse
from distutils.util import strtobool
import pathlib
import siml
import convert_raw_data
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'settings_yaml',
type=pathlib.Path,
help='YAML file name of settings.')
parser.add_argument(
'raw_data_directory',
type=pathlib.Path,
help='Raw data directory')
parser.add_argument(
'-p', '--preprocessors-pkl',
type=pathlib.Path,
default=None,
help='Preprocessors.pkl file')
parser.add_argument(
'-o', '--out-dir',
type=pathlib.Path,
default=None,
help='Output directory name')
parser.add_argument(
'-f', '--force-renew',
type=strtobool,
default=0,
help='If True, overwrite existing data [False]')
parser.add_argument(
'-l', '--light',
type=strtobool,
default=0,
help='If True, compute minimum required data only [False]')
parser.add_argument(
'-n', '--read-npy',
type=strtobool,
default=1,
help='If True, read .npy files instead of original files '
'if exists [True]')
parser.add_argument(
'-r', '--recursive',
type=strtobool,
default=1,
help='If True, process directory recursively [True]')
parser.add_argument(
'-e', '--elemental',
type=strtobool,
default=0,
help='If True, create also elemental features [False]')
parser.add_argument(
'-a', '--convert-answer',
type=strtobool,
default=1,
help='If True, convert answer [True]')
parser.add_argument(
'-s', '--skip-interim',
type=strtobool,
default=0,
help='If True, skip conversion of interim data [False]')
args = parser.parse_args()
main_setting = siml.setting.MainSetting.read_settings_yaml(
args.settings_yaml)
if not args.convert_answer:
main_setting.conversion.required_file_names = ['*.msh', '*.cnt']
main_setting.data.raw = args.raw_data_directory
if args.out_dir is None:
args.out_dir = args.raw_data_directory
main_setting.data.interim = [siml.prepost.determine_output_directory(
main_setting.data.raw,
main_setting.data.raw.parent / 'interim', 'raw')]
main_setting.data.preprocessed = [
siml.prepost.determine_output_directory(
main_setting.data.raw,
main_setting.data.raw.parent / 'preprocessed', 'raw')]
else:
main_setting.data.interim = [args.out_dir / 'interim']
main_setting.data.preprocessed = [args.out_dir / 'preprocessed']
if not args.skip_interim:
conversion_function = convert_raw_data.HeatConversionFuncionCreator(
create_elemental=args.elemental,
convert_answer=args.convert_answer,
light=args.light)
raw_converter = siml.prepost.RawConverter(
main_setting,
conversion_function=conversion_function,
filter_function=convert_raw_data.filter_function_heat,
force_renew=args.force_renew,
recursive=args.recursive,
to_first_order=True,
write_ucd=False,
read_npy=args.read_npy, read_res=args.convert_answer)
raw_converter.convert()
preprocessor = siml.prepost.Preprocessor(
main_setting, force_renew=args.force_renew,
allow_missing=True)
preprocessor.convert_interim_data(preprocessor_pkl=args.preprocessors_pkl)
return
if __name__ == '__main__':
main()
|
yellowshippo/isogcn-iclr2021
|
src/preprocess_raw_data_with_preprocessors.py
|
preprocess_raw_data_with_preprocessors.py
|
py
| 3,638 |
python
|
en
|
code
| 42 |
github-code
|
6
|
7874667169
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 22 16:48:54 2019
@author: xiaohaoren
"""
import json
import pickle
import numpy as np
negative_word = ['悶熱','吵雜','髒','髒亂','加強','改進','缺點']
def Load_All_Info(json_path,pickle_path):
with open(json_path,'r') as fp:
json_data = json.load(fp)
with open(pickle_path, 'rb') as fp:
pickle_data = pickle.load(fp)
keys = list(json_data.keys())
return json_data,pickle_data,keys
def FilteringAndRanking(querys,places,corpus,review_list=None):
"""
query = ['冷氣','衛生',...]
place = ['春山茶水舖','小川拉麵',...]
corpus = {'春山茶水舖':{'不錯':(正向次數,評論編號),'五花肉':(正向分數,評論編號),...}}
"""
scoreboard = {}
for i,place in enumerate(places):
#N = corpus[place]['__termNum__']
N = corpus[place]['__reviewNum__']
scoreboard[place]=0
if place not in corpus:
continue
for term in querys:
term_score = 0
term_sign = -1 if term in negative_word else 1
if term not in corpus[place]:
continue
else:
keyword_data = corpus[place][term]
for rid,p in keyword_data.items():
term_score += (term_sign * p)
if review_list is not None:
rid = int(rid)
review_content = review_list[rid]
print('"%s"由於「%s」中的"%s"而加%d分' % (place,review_content,term,term_sign*p))
scoreboard[place] += term_score
scoreboard[place] = scoreboard[place]/(N*len(querys)) * 100
return scoreboard
if __name__ == "__main__":
corpus_path = '../data/place_dict.json'
reviewContent_path = '../data/review_list.json'
querys = ['乾淨','衛生']
corpus,review_list,places = Load_All_Info(json_path=corpus_path,pickle_path=reviewContent_path)
scoreboard = FilteringAndRanking(querys=querys,places=places,corpus=corpus,review_list=review_list)
|
e841018/DinnerSelector
|
utils/Filtering.py
|
Filtering.py
|
py
| 2,301 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28120118270
|
import numpy as np
import pandas as pd
class Model:
def __init__(self, bias: float, age_y: float, bmi: float, bmisq,
gender: float, dysrhythmia: float, heart_failure: float, dys_hf_interaction: float,
discharge_home_self: float, discharge_facility: float, ed_visits: float, psych_dx: float,
pta_med_count: float, drug_abuse_dx: float, narcotic_meds: float, tja_within_past_yr: float):
"""Input regression coefficients as positional or named parameters.
Keep them organized and expose list or data frame. Note: 16 coefficients.
"""
self.as_list = [bias, age_y, bmi, bmisq,
gender, dysrhythmia, heart_failure, dys_hf_interaction,
discharge_home_self, discharge_facility, ed_visits, psych_dx,
pta_med_count, drug_abuse_dx, narcotic_meds, tja_within_past_yr]
row_names = ['bias', 'age_y', 'bmi', 'bmi ** 2',
'(gender == male)', 'dysrhythmia', 'heart_failure', 'dysrhythmia * heart_failure',
'disch_home_or_self', 'disch_facility', '(ed_visits > 9)', 'psych_dx',
'pta_med_count', 'drug_abuse_dx', 'narcotic_meds', 'TJA within past yr']
self.as_dataframe = pd.DataFrame(self.as_list, index=row_names)
class Patient:
def __init__(self, age_y: float, bmi: float, gender: str, dysrhythmia: bool,
heart_failure: bool, discharge: str, ed_visits: int, psych_dx: bool,
pta_med_count: float, drug_abuse_dx: bool, narcotic_meds: bool, tja_within_past_12_mo: bool):
"""Input 12 patient characteristics as positional or named parameters.
Keep them organized.
Expose list of 16 numbers, for multiplying with model coefficients.
"""
# self.age_y = age_y
# self.bmi = bmi
# self.gender = gender
# self.dysrhythmia = dysrhythmia
# self.heart_failure = heart_failure
# self.discharge = discharge
# self.ed_visits = ed_visits
# self.psych_dx = psych_dx
# self.pta_med_count = pta_med_count
# self.drug_abuse_dx = drug_abuse_dx
# self.narcotic_meds = narcotic_meds
# self.tja_within_past_12_mo = tja_within_past_12_mo
self.as_list = [1, age_y,
bmi, bmi ** 2,
gender == 'male', dysrhythmia,
heart_failure, dysrhythmia and heart_failure,
discharge == 'home' or discharge == 'self-care', discharge == 'facility',
ed_visits > 9, psych_dx,
pta_med_count, drug_abuse_dx,
narcotic_meds, tja_within_past_12_mo]
model_90_days = Model(
-0.5527,
0, # age
-0.0903, # bmi
0.00145, # bmi ** 2
0.2241, # (gender == 'male')
-0.1169, # dysrhythmia
-0.1284, # heart_failure
0.7544, # dysrhythmia * heart_failure
-0.2464, # (discharge == 'home' or discharge == 'self-care')
0.3233, # (discharge == 'facility')
0.3325, # (ed_visits > 9)
0, # psych dx
0.0193, # pta_med_count
0.2475, # drug_abuse_dx
0.1296, # narcotic_meds
-0.3820 # tja_within_past_12_mo
)
model_30_days = Model(-2.6576, 0.0291, -0.1345, 0.00218,
0.2070, -0.0505, -0.3669, 0.7994,
-0.3124, 0.3645, 0.5942, 0.1934,
0.0332, 0, 0, 0)
p = Patient(65, 30, 'male', True,
True, 'home', 3, False,
5, False, True, False)
if __name__ == '__main__':
print("Patient, length =", len(p.as_list))
print(p.as_list)
print()
print("Model, length =", len(model_30_days.as_list))
print(model_30_days.as_list)
print()
print("Score =", np.dot(p.as_list, model_30_days.as_list))
|
zimolzak/py-medical-functions
|
ortho_readmission.py
|
ortho_readmission.py
|
py
| 3,867 |
python
|
en
|
code
| 1 |
github-code
|
6
|
71175312187
|
import re
import time
import textwrap
from copy import copy
import torch.nn.functional as F
from training_utils import *
BASH_FORMATTING = {
'PURPLE': '\033[95m',
'CYAN': '\033[96m',
'DARKCYAN': '\033[36m',
'BLUE': '\033[94m',
'GREEN': '\033[92m',
'YELLOW': '\033[93m',
'RED':'\033[91m',
'BOLD': '\033[1m',
'UNDERLINE': '\033[4m',
'END': '\033[0m'
}
def bash_format_text(text, *args):
formatting = ''
for arg in args:
formatting += BASH_FORMATTING[arg]
return formatting + text + BASH_FORMATTING['END']
# maybe add do_sample ?
# randomly select one of two values that evaluate to true or false lols hehehe
def transfer_learning_bot(model, tokenizer, max_length, top_k, top_p):
'''
for chatbot trained using transfer learning
'''
input_sentence = input('User >> ')
input_sentence = input_sentence.lower()
context = copy(input_sentence)
input_sentence = tokenizer.encode(input_sentence, truncation = True, max_length = 128, return_tensors = 'pt')
continue_convo = True
while continue_convo:
print(bash_format_text('Typing...', 'YELLOW', 'BOLD'), end='\r' )
uni_temp = round(torch.rand(1).clamp(0.1).item(), 2)
repeat_penalty = round((torch.rand(1) * 5).clamp(1).item(), 2)
ngram = int(np.random.choice([2,3,4], 1)[0])
bot_reply = model.generate(input_sentence, max_length = max_length, top_k = top_k, top_p = top_p, temperature = uni_temp,
repetition_penalty = repeat_penalty, skip_special_tokens = True,
no_repeat_ngram_size=ngram, pad_token_id = tokenizer.eos_token_id)
# length_penalty=length_penalty)
bot_reply = tokenizer.decode(bot_reply.squeeze()).replace('<|endoftext|>', '')
bot_reply = textwrap.fill(bot_reply, width=75)
print(bash_format_text('Aubrey: {}'.format(bot_reply), 'YELLOW', 'BOLD'))
response = input('User >> ')
if (response == 'q' or response == 'quit' or response == 'exit'):
continue_convo = False
input_sentence = tokenizer.encode(response.lower(), truncation= True, max_length = 128, return_tensors = 'pt')
|
amauriciorr/AubreyBot
|
chat_utils.py
|
chat_utils.py
|
py
| 2,389 |
python
|
en
|
code
| 2 |
github-code
|
6
|
33359786284
|
from unittest import TestCase
import unittest
import requests
# import sys
#
# sys.path.insert(0, '../../src')
class TestLoadTimeSeries(TestCase):
def test_load_data_success(self):
f = open("tests/routes/time_series_covid19_recovered_global.csv", "rb")
file = f.read()
url = 'https://covid-monitor-61.herokuapp.com/time_series/data?type=recovered'
r = requests.post(url, data=file, headers={"Content-Type": "text/csv"})
f.close()
self.assertEqual(r.status_code, 200)
def test_query_data(self):
url = 'https://covid-monitor-61.herokuapp.com/time_series/cases'
body = {"return_type": "json",
"start_date": "01/26/20",
"end_date": "01/28/20",
"types": ["Recovered"],
"locations":
[
{"Country/Region": "Albania"},
{"Country/Region": "Canada", "Province/State": "Ontario"},
{"Country/Region": "Australia"}
]
}
r = requests.post(url, json=body, headers={"Content-Type": "application/json"})
self.assertEqual(r.status_code, 200)
if __name__ == '__main__':
unittest.main()
|
shin19991207/CSC301-A2
|
tests/routes/test_time_series.py
|
test_time_series.py
|
py
| 1,255 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25254340151
|
import numpy as np
import sys
from vispy import app, visuals, scene
# build visuals
Plot3D = scene.visuals.create_visual_node(visuals.line.line.LineVisual)
# build canvas
canvas = scene.SceneCanvas(keys='interactive', title='plot3d', show=True)
# Add a ViewBox to let the user zoom/rotate
view = canvas.central_widget.add_view()
view.camera = 'turntable'
view.camera.fov = 45
view.camera.distance = 6
# prepare data
x, y, z, segments = [], [], [], []
for start, i in enumerate(np.linspace(-5, 5, 1000)):
N = 6000
x.append(np.sin(np.linspace(-5-i, 5+1, N)*np.pi))
y.append(np.cos(np.linspace(-5+i, 5-i, N)*np.pi))
z.append(np.linspace(-5-i, 5-i, N))
start_idx = 1000 * start
idxs = np.arange(start_idx, start_idx+N-1)
idxs = np.stack([idxs, idxs+1], axis=-1)
segments.append(idxs)
x, y, z = np.concatenate(x), np.concatenate(y), np.concatenate(z)
segments = np.concatenate(segments, axis=0)
# plot
pos = np.c_[x, y, z]
Plot3D(pos, width=10.0,
color=(1.0, 0.0, 0.0, 1.0), method='gl',
connect=segments,
parent=view.scene)
if __name__ == '__main__':
if sys.flags.interactive != 1:
app.run()
|
ptmorris03/Clip3D
|
lines.py
|
lines.py
|
py
| 1,152 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22312741825
|
#Considere uma tupla que guarde temperaturas em Celsius (C) ou Fahrenheit (F)
# como um valor em duas partes: temperatura e escala. Por exemplo:
# 32,5 graus Celsius é representado como (32.5, ‘C’) e 45,2 graus Fahrenheit
# é representado como (45.2, ‘F’). Desenvolva uma função que soma duas
# temperaturas que podem estar em Celsius ou em Fahrenheit. Se as duas
# temperaturas estiverem na mesma escala, a resposta deve estar na mesma escala.
# Se as temperaturas estiverem em escalas diferentes, a resposta deve ser dada na
# escala da segunda temperatura. Considere até 4 (quatro) casas decimais).
soma = ()
t = (float(input()),
str(input()).upper().strip())
t2 = (float(input()),
str(input()).upper().strip())
if t[1] == t2 [1]:
soma = t[0] + t2[0]
print(f"({soma}, '{t[1]}')")
elif t[1] != t2[1] and t[1] == 'C':
F = 9 * t[0] / 5 + 32
soma = F + t2[0]
print(f"({soma}, '{t2[1]}')")
elif t[1] != t2 and t[1] == 'F':
C = (t[0] - 32) * (5/9)
soma = C + t2[0]
print(f"({soma:.4f}, '{t2[1]}')")
|
AlcionePereira/semana-14-1-parte
|
soma.py
|
soma.py
|
py
| 1,109 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
333459228
|
import argparse
import glob
import os
import h5py
import hdbscan
import numpy as np
from scipy.ndimage import binary_erosion
from skimage.filters import gaussian
from skimage.segmentation import watershed
from sklearn.cluster import MeanShift
def expand_labels_watershed(seg, raw, erosion_iters=4):
bg_mask = seg == 0
# don't need to do anything if we only have background
if bg_mask.size == int(bg_mask.sum()):
return seg
hmap = gaussian(raw, sigma=1.)
bg_mask = binary_erosion(bg_mask, iterations=erosion_iters)
seg_new = seg.copy()
bg_id = int(seg.max()) + 1
seg_new[bg_mask] = bg_id
seg_new = watershed(hmap, seg_new)
seg_new[seg_new == bg_id] = 0
return seg_new
def cluster(emb, clustering_alg, semantic_mask=None):
output_shape = emb.shape[1:]
# reshape (E, D, H, W) -> (E, D * H * W) and transpose -> (D * H * W, E)
flattened_embeddings = emb.reshape(emb.shape[0], -1).transpose()
result = np.zeros(flattened_embeddings.shape[0])
if semantic_mask is not None:
flattened_mask = semantic_mask.reshape(-1)
assert flattened_mask.shape[0] == flattened_embeddings.shape[0]
else:
flattened_mask = np.ones(flattened_embeddings.shape[0])
if flattened_mask.sum() == 0:
# return zeros for empty masks
return result.reshape(output_shape)
# cluster only within the foreground mask
clusters = clustering_alg.fit_predict(flattened_embeddings[flattened_mask == 1])
# always increase the labels by 1 cause clustering results start from 0 and we may loose one object
result[flattened_mask == 1] = clusters + 1
return result.reshape(output_shape)
def cluster_hdbscan(emb, min_size, eps, min_samples=None, semantic_mask=None):
clustering = hdbscan.HDBSCAN(min_cluster_size=min_size, cluster_selection_epsilon=eps, min_samples=min_samples)
return cluster(emb, clustering, semantic_mask)
def cluster_ms(emb, bandwidth, semantic_mask=None):
clustering = MeanShift(bandwidth=bandwidth, bin_seeding=True)
return cluster(emb, clustering, semantic_mask)
def run_clustering(emb, clustering, delta_var, min_size, expand_labels, remove_largest):
assert clustering in ['ms', 'hdbscan']
if clustering == 'hdbscan':
clusters = cluster_hdbscan(emb, min_size, delta_var)
else:
clusters = cluster_ms(emb, delta_var)
# watershed the empty (i.e. noise) region
if expand_labels:
clusters = expand_labels_watershed(clusters, raw)
if remove_largest:
ids, counts = np.unique(clusters, return_counts=True)
clusters[ids[np.argmax(counts)] == clusters] = 0
return clusters
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Segment embryos')
parser.add_argument('--emb_dir', type=str, help='Path to embedding predictions directory', required=True)
parser.add_argument('--clustering', type=str, help='Clustering algorithm: ms or hdbscan', required=True)
parser.add_argument('--seg_ds', type=str, help='Output seg dataset name', required=True)
parser.add_argument('--delta_var', type=float, help='delta_var param', default=0.5)
parser.add_argument('--min_size', type=int, help='HDBSCAN min_size param', default=50)
parser.add_argument('--remove_largest', help='Remove largest instance (BG)', action='store_true')
parser.add_argument('--expand_labels', help='Expand labels with watershed', action='store_true')
parser.add_argument('--min_instance_size', type=int, help='Min instance size filtering', required=False,
default=None)
args = parser.parse_args()
assert os.path.isdir(args.emb_dir)
for file_path in glob.glob(os.path.join(args.emb_dir, '*predictions.h5')):
_, filename = os.path.split(file_path)
print(f'Processing {filename}')
with h5py.File(file_path, 'r+') as f:
raw_sequence = f['raw_sequence'][:]
embedding_sequence = f['embedding_sequence1'][:]
seg_sequence = []
i = 0
for raw, emb in zip(raw_sequence, embedding_sequence):
i += 1
print(f'Processing patch {i}')
seg = run_clustering(emb, args.clustering, args.delta_var, args.min_size, args.expand_labels,
args.remove_largest)
seg_sequence.append(seg)
if args.seg_ds in f:
del f[args.seg_ds]
segments = np.stack(seg_sequence, axis=0)
f.create_dataset(args.seg_ds, data=segments, compression='gzip')
print('Done')
|
kreshuklab/takafumi_embryos_segmentation
|
utils/cluster.py
|
cluster.py
|
py
| 4,632 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25965027391
|
import json
import os
from contextlib import suppress
from math import sqrt
from typing import Tuple
import numpy as np
import pandas as pd
from openpyxl import load_workbook, styles, utils
from PIL import Image
def to_excel(
image: Image, path: str, lower_image_size_by: int = 10, **spreadsheet_kwargs
) -> None:
"""
- Added on release 0.0.1;
- Coded originally on https://github.com/Eric-Mendes/image2excel
Saves an image as a `.xlsx` file by coloring its cells each pixel's color.
## Parameters
* :param image: Your image opened using the `PIL.Image` module;
* :param path: The path that you want to save your output file.
Example: `/home/user/Documents/my_image.xlsx`;
* :param lower_image_size_by: A factor that the function will divide
your image's dimensions by. Defaults to `10`;
* It is very important that you lower your image's dimensions because a big image might take the function a long time to process plus your spreadsheet will probably take a long time to load on any software that you use to open it;
* :param **spreadsheet_kwargs: See below.
## Spreadsheet Kwargs
Optional parameters to tweak the spreadsheet's appearance.
* :param row_height (`float`): the rows' height. Defaults to `15`;
* :param column_width (`float`): the columns' width. Defaults to `2.3`;
* The default values on `row_height` and `column_width` were specifically thought out so that they make the cells squared, however - as any hardcoded value - they might not do the trick on your device. That is when you might want to tweak them a little bit.
* :param delete_cell_value (`bool`): wheter to keep or not the text corresponding to that color. Defaults to `True`;
* :param zoom_scale (`int`): how much to zoom in or out on the spreadsheet. Defaults to `20` which seems to be the default max zoom out on most spreadsheet softwares.
## Return
* :return: `None`, but outputs a `.xlsx` file on the given `path`.
"""
image = image.convert("RGB")
# Resizing image
image = image.resize(
(image.size[0] // lower_image_size_by, image.size[1] // lower_image_size_by)
)
# OpenPyxl colors work in a weird way
image_colors_processed = [
["%02x%02x%02x" % tuple(item) for item in row]
for row in np.array(image).tolist()
]
df = pd.DataFrame(image_colors_processed)
image_name = os.path.splitext(os.path.split(path)[1])[0]
# Saving a DataFrame where each cell has a text corresponding to the RGB color its background should be
df.to_excel(path, index=False, header=False)
# Loading the excel file, painting each cell with its color and saving the updates
wb = load_workbook(path)
ws = wb.active
ws.title = image_name
for row in range(1, df.shape[0] + 1):
for col in range(1, df.shape[1] + 1):
cell = ws.cell(row=row, column=col)
# Makes cells squared
ws.row_dimensions[row].height = spreadsheet_kwargs.get("row_height", 15)
ws.column_dimensions[
utils.get_column_letter(col)
].width = spreadsheet_kwargs.get("column_width", 2.3)
# Painting the cell
cell.fill = styles.PatternFill(
start_color=cell.value, end_color=cell.value, fill_type="solid"
)
if spreadsheet_kwargs.get("delete_cell_value", True):
cell.value = None # Deletes the text from the cell
# Saves spreadsheet already zoomed in or out
ws.sheet_view.zoomScale = spreadsheet_kwargs.get("zoom_scale", 20)
wb.save(path)
def to_minecraft(
image: Image,
path: str,
lower_image_size_by: int = 10,
player_pos: Tuple[int, int, int] = (0, 0, 0),
minecraft_version: str = '1.18.2',
) -> None:
"""
- Added on release 0.0.1;
- Coded originally on https://github.com/Eric-Mendes/pixel-art-map
Saves an image as a minecraft datapack that when loaded into your world will build a pixel art of it on the player's position.
## Parameters
* :param image: Your image opened using the `PIL.Image` module;
* :param path: The path that you want to save your datapack.
Example: `/home/user/Documents/my_image_datapack`;
* :param lower_image_size_by: A factor that the function will divide
your image's dimensions by. Defaults to `10`;
* :param player_pos: The player's (x, y, z) position. Defaults to `(0, 0, 0)`;
* :param minecraft_version: The minecraft client version (x.xx.x). Default is `1.18.2`.
## Return
* :return: `None`, but outputs a datapack on the given `path`.
"""
image = image.convert("RGB")
# Makes the commands that the datapack will run when loaded
def script(df, **kwargs):
player_pos = [
kwargs.get("player_x", 0),
kwargs.get("player_y", 0),
kwargs.get("player_z", 0),
]
z = (df != df.shift()).cumsum()
zri = z.reset_index()
ix_name = z.index.name
co_name = z.columns.name
for i in z:
v = zri.groupby(i)[ix_name].agg(["first", "last"])
s = {co_name: i}
e = {co_name: i}
for _, r in v.iterrows():
s[ix_name] = r["first"]
e[ix_name] = r["last"]
material = df.loc[r["first"], i]
yield f'fill {s["x"] + player_pos[0]} {0 + player_pos[1]} {s["z"] + player_pos[2]} {e["x"] + player_pos[0]} {0 + player_pos[1]} {e["z"] + player_pos[2]} {material.split(",")[0].strip()}'
# Helper function. Loads the blocks an the colors they have when looked at via map,
# and maps the pixels to the blocks
blocks = [
{
"rgb": (127, 178, 56),
"blocks": ("grass_block", "slime_block"),
},
{
"rgb": (247, 233, 163),
"blocks": ("sand", "birch_planks", "birch_log[axis=y]", "stripped_birch_log[axis=x]", "birch_wood", "stripped_birch_wood", "birch_sign", "birch_pressure_plate", "birch_trapdoor", "birch_stairs", "birch_slab", "birch_fence_gate", "birch_fence", "birch_door", "sandstone", "glowstone", "end_stone", "end_stone_brick_slab", "end_stone_brick_stairs", "end_stone_brick_wall", "bone_block", "turtle_egg", "scaffolding", "candle"),
},
{
"rgb": (199, 199, 199),
"blocks": ("mushroom_stem", "cobweb", "white_bed[part=head]", "white_candle"),
},
{
"rgb": (255, 0, 0),
"blocks": ("redstone_block", "tnt", "lava", "fire"),
},
{
"rgb": (160, 160, 255),
"blocks": ("ice", "frosted_ice", "packed_ice", "blue_ice"),
},
{
"rgb": (167, 167, 167),
"blocks": ("iron_block", "iron_door", "brewing_stand", "heavy_weighted_pressure_plate", "iron_trapdoor", "lantern", "anvil", "grindstone", "soul_lantern", "lodestone"),
},
{
"rgb": (0, 124, 0),
"blocks": ("oak_sapling", "spruce_sapling", "birch_sapling", "jungle_sapling", "acacia_sapling", "dark_oak_sapling", "dandelion", "poppy", "blue_orchid", "allium", "azure_bluet", "red_tulip", "orange_tulip", "white_tulip", "pink_tulip", "oxeye_daisy", "cornflower", "lily_of_the_valley", "wither_rose", "sunflower", "lilac", "rose_bush", "peony", "wheat[age=7]", "sugar_cane[age=9]", "pumpkin_stem[age=7]", "melon_stem[age=7]", "lily_pad", "cocoa[age=2]", "carrots[age=7]", "potatoes[age=7]", "beetroots[age=7]", "sweet_berry_bush[age=3]", "grass", "fern", "vine", "oak_leaves", "spruce_leaves", "birch_leaves", "jungle_leaves", "acacia_leaves", "dark_oak_leaves", "azalea_leaves", "flowering_azalea_leaves", "cactus[age=9]", "bamboo[age=1]", "cave_vines", "spore_blossom", "flowering_azalea", "big_dripleaf", "small_dripleaf"),
},
{
"rgb": (255, 255, 255),
"blocks": ("snow", "snow_block", "white_bed[part=foot]", "white_wool", "white_stained_glass", "white_carpet", "white_shulker_box", "white_glazed_terracotta", "white_concrete", "white_concrete_powder", "powder_snow"),
},
{
"rgb": (164, 168, 184),
"blocks": ("clay", "infested_chiseled_stone_bricks", "infested_cobblestone", "infested_cracked_stone_bricks", "infested_mossy_stone_bricks", "infested_stone", "infested_stone_bricks"),
},
{
"rgb": (151, 109, 77),
"blocks": ("coarse_dirt", "dirt", "farmland", "dirt_path", "granite_slab", "granite_stairs", "granite_wall", "polished_granite_slab", "polished_granite_stairs", "jungle_planks", "jungle_log[axis=y]", "stripped_jungle_log[axis=x]", "jungle_wood", "stripped_jungle_wood", "jungle_sign", "jungle_pressure_plate", "jungle_trapdoor", "jungle_stairs", "jungle_slab", "jungle_fence_gate", "jungle_fence", "jungle_door", "jukebox", "brown_mushroom_block", "rooted_dirt", "hanging_roots"),
},
{
"rgb": (112, 112, 112),
"blocks": ("stone", "stone_slab", "stone_stairs", "andesite_slab", "andesite_stairs", "andesite_wall", "polished_andesite_slab", "polished_andesite_stairs", "cobblestone_slab", "cobblestone_stairs", "cobblestone_wall", "bedrock", "gold_ore", "iron_ore", "coal_ore", "lapis_lazuli_ore", "dispenser", "mossy_cobblestone_slab", "mossy_cobblestone_stairs", "mossy_cobblestone_wall", "spawner", "diamond_ore", "furnace", "stone_pressure_plate", "redstone_ore", "stone_bricks", "emerald_ore", "ender_chest", "dropper", "smooth_stone_slab", "observer", "smoker", "blast_furnace", "stonecutter", "sticky_piston", "piston", "piston_head", "gravel", "acacia_log[axis=z]", "cauldron", "hopper", "copper_ore"),
},
{
"rgb": (64, 64, 255),
"blocks": ("water", "kelp", "seagrass", "bubble_column"),
},
{
"rgb": (143, 119, 72),
"blocks": ("oak_planks", "oak_log[axis=y]", "stripped_oak_log[axis=x]", "oak_wood", "stripped_oak_wood", "oak_sign", "oak_pressure_plate", "oak_trapdoor", "oak_stairs", "oak_slab", "oak_fence_gate", "oak_fence", "oak_door", "note_block", "bookshelf", "chest", "crafting_table", "trapped_chest", "daylight_detector", "loom", "barrel", "cartography_table", "fletching_table", "lectern", "smithing_table", "composter", "bamboo_sapling", "dead_bush", "petrified_oak_slab", "beehive", "white_banner"),
},
{
"rgb": (255, 252, 245),
"blocks": ("quartz_block", "diorite_stairs", "diorite_slab", "diorite_wall", "polished_diorite_stairs", "polished_diorite_slab", "birch_log[axis=x]", "sea_lantern", "target"),
},
{
"rgb": (216, 127, 51),
"blocks": ("acacia_planks", "acacia_log[axis=y]", "stripped_acacia_log[axis=x]", "acacia_wood", "stripped_acacia_wood", "acacia_sign", "acacia_pressure_plate", "acacia_trapdoor", "acacia_stairs", "acacia_slab", "acacia_fence_gate", "acacia_fence", "acacia_door", "red_sand", "orange_wool", "orange_carpet", "orange_shulker_box", "orange_bed[part=foot]", "orange_stained_glass", "orange_glazed_terracotta", "orange_concrete", "orange_concrete_powder", "orange_candle", "pumpkin", "carved_pumpkin", "jack_o_lantern", "terracotta", "red_sandstone", "honey_block", "honeycomb_block", "copper_block", "lightning_rod", "raw_copper_block"),
},
{
"rgb": (178, 76, 216),
"blocks": ("magenta_wool", "magenta_carpet", "magenta_shulker_box", "magenta_bed[part=foot]", "magenta_stained_glass", "magenta_glazed_terracotta", "magenta_concrete", "magenta_concrete_powder", "magenta_candle", "purpur_block"),
},
{
"rgb": (102, 153, 216),
"blocks": ("light_blue_wool", "light_blue_carpet", "light_blue_shulker_box", "light_blue_bed[part=foot]", "light_blue_stained_glass", "light_blue_glazed_terracotta", "light_blue_concrete", "light_blue_concrete_powder", "light_blue_candle", "soul_fire"),
},
{
"rgb": (229, 229, 51),
"blocks": ("sponge", "wet_sponge", "yellow_wool", "yellow_carpet", "yellow_shulker_box", "yellow_bed[part=foot]", "yellow_stained_glass", "yellow_glazed_terracotta", "yellow_concrete", "yellow_concrete_powder", "yellow_candle", "hay_bale", "horn_coral_block[waterlogged=true]", "bee_nest"),
},
{
"rgb": (127, 204, 25),
"blocks": ("lime_wool", "lime_carpet", "lime_shulker_box", "lime_bed[part=foot]", "lime_stained_glass", "lime_glazed_terracotta", "lime_concrete", "lime_concrete_powder", "lime_candle", "melon"),
},
{
"rgb": (242, 127, 165),
"blocks": ("pink_wool", "pink_carpet", "pink_shulker_box", "pink_bed[part=foot]", "pink_stained_glass", "pink_glazed_terracotta", "pink_concrete", "pink_concrete_powder", "pink_candle", "brain_coral_block[waterlogged=true]"),
},
{
"rgb": (76, 76, 76),
"blocks": ("acacia_wood", "gray_wool", "gray_carpet", "gray_shulker_box", "gray_bed[part=foot]", "gray_stained_glass", "gray_glazed_terracotta", "gray_concrete", "gray_concrete_powder", "gray_candle", "dead_coral_block", "tinted_glass"),
},
{
"rgb": (153, 153, 153),
"blocks": ("light_gray_wool", "light_gray_carpet", "light_gray_shulker_box", "light_gray_bed[part=foot]", "light_gray_stained_glass", "light_gray_glazed_terracotta", "light_gray_concrete", "light_gray_concrete_powder", "light_gray_candle", "structure_block", "jigsaw"),
},
{
"rgb": (76, 127, 153),
"blocks": ("cyan_wool", "cyan_carpet", "cyan_shulker_box", "cyan_bed[part=foot]", "cyan_stained_glass", "cyan_glazed_terracotta", "cyan_concrete", "cyan_concrete_powder", "cyan_candle", "prismarine_slab", "prismarine_stairs", "prismarine_wall", "warped_roots", "warped_fungus", "twisting_vines", "nether_sprouts", "sculk_sensor"),
},
{
"rgb": (127, 63, 178),
"blocks": ("shulker_box", "purple_wool", "purple_carpet", "purple_shulker_box", "purple_bed[part=foot]", "purple_stained_glass", "purple_glazed_terracotta", "purple_concrete", "purple_concrete_powder", "purple_candle", "mycelium", "chorus_plant", "chorus_flower", "repeating_command_block", "bubble_coral_block", "amethyst_block", "budding_amethyst", "amethyst_cluster"),
},
{
"rgb": (51, 76, 178),
"blocks": ("blue_wool", "blue_carpet", "blue_shulker_box", "blue_bed[part=foot]", "blue_stained_glass", "blue_glazed_terracotta", "blue_concrete", "blue_concrete_powder", "blue_candle", "tube_coral_block"),
},
{
"rgb": (102, 76, 51),
"blocks": ("dark_oak_planks", "dark_oak_log[axis=y]", "stripped_dark_oak_log[axis=x]", "dark_oak_wood", "stripped_dark_oak_wood", "dark_oak_sign", "dark_oak_pressure_plate", "dark_oak_trapdoor", "dark_oak_stairs", "dark_oak_slab", "dark_oak_fence_gate", "dark_oak_fence", "dark_oak_door", "spruce_log[axis=x]", "brown_wool", "brown_carpet", "brown_shulker_box", "brown_bed[part=foot]", "brown_stained_glass", "brown_glazed_terracotta", "brown_concrete", "brown_concrete_powder", "brown_candle", "soul_sand", "command_block", "brown_mushroom", "soul_soil"),
},
{
"rgb": (102, 127, 51),
"blocks": ("green_wool", "green_carpet", "green_shulker_box", "green_bed[part=foot]", "green_stained_glass", "green_glazed_terracotta", "green_concrete", "green_concrete_powder", "green_candle", "end_portal_frame", "chain_command_block", "sea_pickle", "moss_carpet", "moss_block", "dried_kelp_block"),
},
{
"rgb": (153, 51, 51),
"blocks": ("red_wool", "red_carpet", "red_shulker_box", "red_bed[part=foot]", "red_stained_glass", "red_glazed_terracotta", "red_concrete", "red_concrete_powder", "red_candle", "brick_slab", "brick_stairs", "brick_wall", "red_mushroom_block", "nether_wart", "enchanting_table", "nether_wart_block", "fire_coral_block", "red_mushroom", "shroomlight"),
},
{
"rgb": (25, 25, 25),
"blocks": ("black_wool", "black_carpet", "black_shulker_box", "black_bed[part=foot]", "black_stained_glass", "black_glazed_terracotta", "black_concrete", "black_concrete_powder", "black_candle", "obsidian", "end_portal", "dragon_egg", "coal_block", "end_gateway", "basalt", "polished_basalt", "smooth_basalt", "netherite_block", "crying_obsidian", "respawn_anchor", "blackstone", "gilded_blackstone"),
},
{
"rgb": (250, 238, 77),
"blocks": ("gold_block", "light_weighted_pressure_plate", "bell", "raw_gold_block"),
},
{
"rgb": (92, 219, 213),
"blocks": ("diamond_block", "beacon", "prismarine_brick_slab", "prismarine_brick_stairs", "dark_prismarine_slab", "dark_prismarine_stairs", "conduit"),
},
{
"rgb": (74, 128, 255),
"blocks": ("lapis_lazuli_block"),
},
{
"rgb": (0, 217, 58),
"blocks": ("emerald_block"),
},
{
"rgb": (129, 86, 49),
"blocks": ("podzol", "spruce_planks", "spruce_log[axis=y]", "stripped_spruce_log[axis=x]", "spruce_wood", "stripped_spruce_wood", "spruce_sign", "spruce_pressure_plate", "spruce_trapdoor", "spruce_stairs", "spruce_slab", "spruce_fence_gate", "spruce_fence", "spruce_door", "oak_log[axis=x]", "jungle_log[axis=x]", "campfire", "soul_campfire"),
},
{
"rgb": (112, 2, 0),
"blocks": ("netherrack", "nether_brick_fence", "nether_brick_slab", "nether_brick_stairs", "nether_brick_wall", "nether_brick_chiseled", "nether_brick_cracked", "nether_gold_ore", "nether_quartz_ore", "magma_block", "red_nether_brick_slab", "red_nether_brick_stairs", "red_nether_brick_wall", "crimson_roots", "crimson_fungus", "weeping_vines"),
},
{
"rgb": (209, 177, 161),
"blocks": ("white_terracotta", "calcite"),
},
{
"rgb": (159, 82, 36),
"blocks": ("orange_terracotta"),
},
{
"rgb": (149, 87, 108),
"blocks": ("magenta_terracotta"),
},
{
"rgb": (112, 108, 138),
"blocks": ("light_blue_terracotta"),
},
{
"rgb": (186, 133, 36),
"blocks": ("yellow_terracotta"),
},
{
"rgb": (103, 117, 53),
"blocks": ("lime_terracotta"),
},
{
"rgb": (160, 77, 78),
"blocks": ("pink_terracotta"),
},
{
"rgb": (57, 41, 35),
"blocks": ("gray_terracotta", "tuff"),
},
{
"rgb": (135, 107, 98),
"blocks": ("light_gray_terracotta", "exposed_copper"),
},
{
"rgb": (87, 92, 92),
"blocks": ("cyan_terracotta"),
},
{
"rgb": (122, 73, 88),
"blocks": ("purple_terracotta", "purple_shulker_box"),
},
{
"rgb": (76, 62, 92),
"blocks": ("blue_terracotta"),
},
{
"rgb": (76, 50, 35),
"blocks": ("brown_terracotta", "pointed_dripstone", "dripstone_block"),
},
{
"rgb": (76, 82, 42),
"blocks": ("green_terracotta"),
},
{
"rgb": (142, 60, 46),
"blocks": ("red_terracotta"),
},
{
"rgb": (37, 22, 16),
"blocks": ("black_terracotta"),
},
{
"rgb": (189, 48, 49),
"blocks": ("crimson_nylium"),
},
{
"rgb": (148, 63, 97),
"blocks": ("crimson_planks", "crimson_log[axis=y]", "stripped_crimson_log[axis=x]", "crimson_wood", "stripped_crimson_wood", "crimson_sign", "crimson_pressure_plate", "crimson_trapdoor", "crimson_stairs", "crimson_slab", "crimson_fence_gate", "crimson_fence", "crimson_door"),
},
{
"rgb": (92, 25, 29),
"blocks": ("crimson_hyphae", "stripped_crimson_hyphae"),
},
{
"rgb": (22, 126, 134),
"blocks": ("warped_nylium", "oxidized_copper"),
},
{
"rgb": (58, 142, 140),
"blocks": ("warped_planks", "warped_log[axis=y]", "stripped_warped_log[axis=x]", "warped_wood", "stripped_warped_wood", "warped_sign", "warped_pressure_plate", "warped_trapdoor", "warped_stairs", "warped_slab", "warped_fence_gate", "warped_fence", "warped_door", "weathered_copper"),
},
{
"rgb": (86, 44, 62),
"blocks": ("warped_hyphae", "stripped_warped_hyphae"),
},
{
"rgb": (20, 180, 133),
"blocks": ("warped_wart_block"),
},
{
"rgb": (100, 100, 100),
"blocks": ("deepslate"),
},
{
"rgb": (216, 175, 147),
"blocks": ("raw_iron_block"),
},
{
"rgb": (127, 167, 150),
"blocks": ("glow_lichen"),
},
]
def to_minecraft_color(pxl):
color = None
min_distance = None
for item in blocks:
# Calculates the "distance" between two RGB colors as if they
# were points in a 3-dimensional space.
# The closer they are, the more they look like each other.
euclidean_distance = sqrt(sum([pow(p - c, 2) for p, c in zip(item["rgb"], pxl)]))
if min_distance is None or euclidean_distance < min_distance:
min_distance = euclidean_distance
color = ", ".join("minecraft:"+block for block in item["blocks"])
return color
# Resizing the image and mapping each pixel's color to a minecraft color
image = image.resize(
(image.size[0] // lower_image_size_by, image.size[1] // lower_image_size_by)
)
image_colors_processed = [
[to_minecraft_color(pixel) for pixel in row] for row in np.array(image)
]
# Getting the name that the image should have via the given path
image_name = os.path.splitext(os.path.split(path)[1])[0]
df = pd.DataFrame(image_colors_processed)
# Creates - in an error proof manner - the folder structure of the datapack
with suppress(FileExistsError):
os.makedirs(f"{path}/data/minecraft/tags/functions")
os.makedirs(f"{path}/data/pixelart-map/functions")
if minecraft_version >= '1.13.0':
if minecraft_version >= '1.13.0' and minecraft_version <= '1.14.4':
datapack_version = 4
elif minecraft_version >= '1.15.0' & minecraft_version <= '1.16.1':
datapack_version = 5
elif minecraft_version >= '1.16.2' & minecraft_version <= '1.16.5':
datapack_version = 6
elif minecraft_version >= '1.17.0' & minecraft_version <= '1.17.1':
datapack_version = 7
elif minecraft_version >= '1.18.0' & minecraft_version <= '1.18.1':
datapack_version = 8
elif minecraft_version >= '1.18.2':
datapack_version = 9
else:
datapack_version = 4
raise ValueError("This versions is incompatible with datapacks (below 1.13.0) or the version is writen wrong (correct: x.xx.x | wrong: x.x, x.xx)")
pack_mcmeta = {
"pack": {
"pack_format": datapack_version,
"description": f"This datapack will generate the image ({image_name}) in your world",
}
}
load_json = {"values": ["pixelart-map:load"]}
tick_json = {"values": ["pixelart-map:tick"]}
with open(f"{path}/pack.mcmeta", "w") as file:
file.write(json.dumps(pack_mcmeta, indent=4))
with open(f"{path}/data/minecraft/tags/functions/load.json", "w") as file:
file.write(json.dumps(load_json, indent=4))
with open(f"{path}/data/minecraft/tags/functions/tick.json", "w") as file:
file.write(json.dumps(tick_json, indent=4))
with open(f"{path}/data/pixelart-map/functions/tick.mcfunction", "w") as file:
file.write("")
# Making the commands that when ran will build the image's pixel art.
# This part's had a huge contribution from this thread: https://stackoverflow.com/questions/70512775/how-to-group-elements-in-dataframe-by-row/70546452#70546452
df = df.rename_axis(index="z", columns="x")
a = list(
script(
df,
player_x=player_pos[0],
player_y=player_pos[1],
player_z=player_pos[2],
)
)
b = list(
script(
df.T,
player_x=player_pos[0],
player_y=player_pos[1],
player_z=player_pos[2],
)
)
res = min([a, b], key=len)
with open(f"{path}/data/pixelart-map/functions/load.mcfunction", "w") as file:
file.write("\n".join(res))
|
Henrique-CSS/unexpected-isaves
|
src/unexpected_isaves/save_image.py
|
save_image.py
|
py
| 24,926 |
python
|
en
|
code
| null |
github-code
|
6
|
19167044996
|
"""
A collection of neural network code. The first part of the script includes
blocks, which are the building blocks of our models. The second part includes
the actual Pytorch models.
"""
import torch
import torchvision.transforms as transforms
class ConvBlock(torch.nn.Module):
"""
A ConvBlock represents a convolution. It's not just a convolution however,
as some common operations (dropout, activation, batchnorm, 2x2 pooling)
can be set and run in the order mentioned.
"""
def __init__(
self,
dim,
n_out,
kernel_size=3,
stride=1,
padding=1,
batchnorm=False,
dropout=0,
activation=True,
):
""" A convolution operation """
super(ConvBlock, self).__init__()
n_in = int(dim[0])
self.conv2d = torch.nn.Conv2d(
n_in, n_out, kernel_size=kernel_size, stride=stride, padding=padding
)
self.batchnorm = torch.nn.BatchNorm2d(n_out) if batchnorm else None
self.activation = torch.nn.ReLU(inplace=True) if activation else None
self.dropout = torch.nn.Dropout2d(dropout) if dropout else None
dim[0] = n_out
dim[1:] = 1 + (dim[1:] + padding * 2 - kernel_size) // stride
self.n_params = n_out * (n_in * kernel_size * kernel_size + (3 if batchnorm else 1))
print(
"Conv2d in %4i out %4i h %4i w %4i k %i s %i params %9i"
% (n_in, *dim, kernel_size, stride, self.n_params)
)
def forward(self, batch):
""" Forward the 4D batch """
out = self.conv2d(batch)
if self.activation:
out = self.activation(out)
if self.batchnorm:
out = self.batchnorm(out)
if self.dropout:
out = self.dropout(out)
return out
class LinearBlock(torch.nn.Module):
"""
A LinearBlock represents a fully connected layer. It's not just this, as
some common operations (dropout, activation, batchnorm) can be set and run
in the order mentioned.
"""
def __init__(self, dim, n_out, batchnorm=False, dropout=0.0, activation=True):
""" A fully connected operation """
super(LinearBlock, self).__init__()
n_in = int(dim[0])
self.linear = torch.nn.Linear(n_in, n_out)
dim[0] = n_out if type(n_out) in (int, float) else n_out[0]
self.batchnorm = torch.nn.BatchNorm1d(dim[0]) if batchnorm else None
self.dropout = torch.nn.Dropout(dropout) if dropout > 0.0 else None
self.activation = torch.nn.ReLU(inplace=True) if activation else None
self.n_params = n_out * (n_in + (3 if batchnorm else 1))
print(
"Linear in %4i out %4i params %9i" % (n_in, n_out, self.n_params)
)
def forward(self, batch):
""" Forward the 2D batch """
out = self.linear(batch)
if self.activation:
out = self.activation(out)
if self.batchnorm:
out = self.batchnorm(out)
if self.dropout:
out = self.dropout(out)
return out
class PoolBlock(torch.nn.Module):
"""
A PoolBlock is a pooling operation that happens on a matrix, often between
convolutional layers, on each channel individually. By default only two are
supported: max and avg.
"""
def __init__(self, dim, pool="max", size=None, stride=None):
""" A pooling operation """
super(PoolBlock, self).__init__()
stride = size if stride is None else stride
if size:
dim[1:] //= stride
else:
size = [int(x) for x in dim[1:]]
dim[1:] = 1
if pool == "max":
self.pool = torch.nn.MaxPool2d(size, stride=stride, padding=0)
elif pool == "avg":
self.pool = torch.nn.AvgPool2d(size, stride=stride, padding=0)
self.n_params = 0
def forward(self, batch):
""" Forward the 4D batch """
out = self.pool(batch)
return out
class ViewBlock(torch.nn.Module):
"""
A ViewBlock restructures the shape of our activation maps so they're
represented as 1D instead of 3D.
"""
def __init__(self, dim, shape=-1):
""" A reshape operation """
super(ViewBlock, self).__init__()
self.shape = shape
if self.shape == -1:
dim[0] = dim[0] * dim[1] * dim[2]
dim[-2] = 0
dim[-1] = 0
else:
dim[:] = shape
self.n_params = 0
print("View d %4i h %4i w %4i" % (*dim,))
def forward(self, batch):
""" Forward the 4D batch into a 2D batch """
return batch.view(batch.size(0), self.shape)
class Tiny(torch.nn.Module):
""" A small and quick model """
def __init__(self, in_dim, n_status, n_out):
"""
Args:
in_dim (list): The input size of each example
n_status (int): Number of status inputs to add
n_out (int): Number of values to predict
"""
super(Tiny, self).__init__()
self.n_status = n_status
dim = in_dim.copy()
self.feat = torch.nn.Sequential(
ConvBlock(dim, 16),
PoolBlock(dim, "max", 2),
ConvBlock(dim, 32),
PoolBlock(dim, "max", 2),
ConvBlock(dim, 48),
PoolBlock(dim, "max", 2),
ConvBlock(dim, 64),
PoolBlock(dim, "max", 2),
)
self.view = ViewBlock(dim)
dim[0] += n_status
self.head = torch.nn.Sequential(LinearBlock(dim, n_out, activation=False))
self.n_params = sum([x.n_params for x in self.feat]) + sum([x.n_params for x in self.head])
print("Tiny params %9i" % self.n_params)
def forward(self, batch, status):
"""
Args:
batch (4D tensor): A batch of camera input.
status (1D tensor): Status inputs indicating things like speed.
"""
out = self.feat(batch)
out = self.view(out)
if self.n_status:
out = torch.cat((out, status), 1)
out = self.head(out)
return out
class StarTree(torch.nn.Module):
"""
A medium-sized model that uses layers with few activation maps to
efficiently increase the number of layers, and therefore nonlinearities.
"""
def __init__(self, in_dim, n_status, n_out):
"""
Args:
in_dim (list): The input size of each example
n_status (int): Number of status inputs to add
n_out (int): Number of values to predict
"""
super(StarTree, self).__init__()
self.n_status = n_status
dim = in_dim.copy()
self.feat = torch.nn.Sequential(
ConvBlock(dim, 64, dropout=0.25),
ConvBlock(dim, 16),
ConvBlock(dim, 32),
PoolBlock(dim, "max", 2),
ConvBlock(dim, 24),
ConvBlock(dim, 48),
PoolBlock(dim, "max", 2),
ConvBlock(dim, 32),
ConvBlock(dim, 64),
PoolBlock(dim, "max", 2),
ConvBlock(dim, 40),
ConvBlock(dim, 80, dropout=0.25),
PoolBlock(dim, "max", 2),
)
self.view = ViewBlock(dim)
dim[0] += n_status
self.head = torch.nn.Sequential(
LinearBlock(dim, 50), LinearBlock(dim, n_out, activation=False),
)
self.n_params = sum([x.n_params for x in self.feat]) + sum([x.n_params for x in self.head])
print("StarTree params %9i" % self.n_params)
def forward(self, batch, status):
"""
Args:
batch (4D tensor): A batch of camera input.
status (1D tensor): Status inputs indicating things like speed.
"""
out = self.feat(batch)
out = self.view(out)
if self.n_status:
out = torch.cat((out, status), 1)
out = self.head(out)
return out
def train_epoch(device, model, optimizer, criterion, loader):
""" Run the optimzer over all batches in an epoch """
model.train()
epoch_loss = 0
batch_index = 0
for batch_index, (examples, statuses, labels) in enumerate(loader):
optimizer.zero_grad()
guesses = model(examples.to(device), statuses.to(device))
loss = criterion(guesses, labels.to(device))
loss.backward()
optimizer.step()
epoch_loss += loss.item()
return epoch_loss / (batch_index + 1)
def test_epoch(device, model, criterion, loader):
""" Run the evaluator over all batches in an epoch """
model.eval()
epoch_loss = 0
batch_index = 0
with torch.no_grad():
for batch_index, (examples, statuses, labels) in enumerate(loader):
guesses = model(examples.to(device), statuses.to(device))
loss = criterion(guesses, labels.to(device))
epoch_loss += loss.item()
return epoch_loss / (batch_index + 1)
def compose_transforms(transform_config):
""" Apply all image transforms """
transform_list = []
for perturb_config in transform_config:
if perturb_config["name"] == "colorjitter":
transform = transforms.ColorJitter(
brightness=perturb_config["brightness"],
contrast=perturb_config["contrast"],
saturation=perturb_config["saturation"],
hue=perturb_config["hue"],
)
transform_list.append(transform)
transform_list.append(transforms.ToTensor())
return transforms.Compose(transform_list)
|
notkarol/derplearning
|
derp/model.py
|
model.py
|
py
| 9,661 |
python
|
en
|
code
| 40 |
github-code
|
6
|
26096479620
|
from typing import final
import pandas as pd
import numpy as np
import os
final_df=pd.read_csv("prepared_final_data.csv")
print(final_df)
values=final_df["pollution"].values
print(values)
print(final_df.columns)
"""# Normalized the data"""
from sklearn.preprocessing import MinMaxScaler
# values = final_df.values
print(values)
scaler = MinMaxScaler(feature_range=(0, 1))
scaled_dataset = scaler.fit_transform(values.reshape(-1,1))
scaled_dataset
# Creating a window for previous data
def to_supervised(window_size,train):
X = []
Y = []
for i in range(window_size, len(train)):
X.append(train[i-window_size:i,:])
Y.append(train[i,0:1])
return np.array(X), np.array(Y)
feature,label = to_supervised(window_size=5, train=scaled_dataset)
n_train = 24*365
X_train, X_test = feature[n_train:,] , feature[:n_train,]
print('X_train' ,X_train.shape)
print('X_test' ,X_test.shape)
Y_train, Y_test = label[n_train:,] , label[:n_train,]
print('Y_train' ,Y_train.shape)
print('Y_test' ,Y_test.shape)
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout,LSTM
model = Sequential()
model.add(LSTM(units = 50, return_sequences = True, input_shape=(X_train.shape[1], X_train.shape[2])))
model.add(Dropout(0.2))
model.add(LSTM(units = 50, return_sequences = True))
model.add(Dropout(0.2))
model.add(LSTM(units = 50))
model.add(Dropout(0.2))
model.add(Dense(units = 1))
model.compile(optimizer = 'adam', loss = 'mean_squared_error')
from keras.callbacks import EarlyStopping
es_callback = EarlyStopping(monitor='val_loss', patience=3,min_delta=0.01)
path = 'air_pollution_forecasting_model'
isdir = os.path.isdir(path)
print(isdir)
if isdir:
reconstructed_model = keras.models.load_model("air_pollution_forecasting_model")
model = reconstructed_model
else:
model.fit(X_train, Y_train, validation_split = 0.1, epochs = 10, batch_size = 32, callbacks=[es_callback])
model.save("air_pollution_forecasting_model")
breakpoint()
Y_pred = np.round(model.predict(X_test),2)
from sklearn.metrics import mean_squared_error
mse = mean_squared_error(Y_test, Y_pred)
rmse = np.sqrt(mse)
print(rmse)
# Scaling back to the original scale
d = scaled_dataset[:8760,:]
print('dummy',d.shape)
print('Y_pred',Y_pred.shape)
Y_predicted = np.concatenate((Y_pred,d[:8760,1:]), axis =1)
print('concat y_pred',Y_pred.shape)
Y_tested = np.concatenate((Y_test, d[:8760,1:]), axis = 1)
print('concat Y_test', Y_test.shape)
Y_predicted = scaler.inverse_transform(Y_predicted)
Y_tested = scaler.inverse_transform(Y_tested)
Y_predicted = Y_predicted[:,0:1]
Y_tested = Y_tested[:,0:1]
print('Y_tested', Y_tested.shape)
print('Y_predicted', Y_predicted.shape)
import matplotlib.pyplot as plt
plt.plot(Y_predicted[:100,:], color= 'green')
plt.plot(Y_tested[:100,:] , color = 'red')
plt.title("Air Pollution Prediction (Multivariate)")
plt.xlabel("Date")
plt.ylabel("Pollution level")
plt.savefig("results.png")
import pickle
pickle.dump(scaler, open('min_max_scaler.pkl','wb'))
|
manisha841/Air-Quality-Index-Prediction
|
train.py
|
train.py
|
py
| 3,028 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32311173285
|
#import networkx as nx
#import matplotlib.pyplot as plt
import json
import pprint
from TwitterModule import *
import time
from datetime import datetime
#Set up api and global variables
twitter_api = oauth_login()#twitter api for grabbing data
#dates = [330,331,401,402,403]
dates = [401,402,403,404,405,406,407]
for day in dates:
print(day)
names = ['@itsnotdrew','@davidhogg111','@IngrahamAngle','@sleepnumber','@ATT','@Allstate','@esurance','@Bayer','@RocketMortgage','@LibertyMutual','@Arbys','@TripAdvisor','@Nestle','@hulu','@Wayfair','@FoxNews','#BoycottIngramAdverts','#boycottLauraIngraham','#FireIngraham','#FireLauraIngraham']
errorLogName = 'errorLog' + str(day) + '_4' + '.txt'
errorLog = open(errorLogName,'w')
for q in names:
try:
dateStr = str(day)
dateDay = dateStr[1:]
dateDayPlusOne = str(int(dateDay)+1)
dateMonth = dateStr[0]
if (dateStr == '331'): #dirty code to fix a logic bug when switching months
dateDayPlusOne = '01'
dateMonth = '4'
until = '2018-0' + dateMonth + '-' + dateDayPlusOne
tweetsDicitonary = {}
name = q[1:]
nameFile = name + dateStr +'_4'+ '.json'
file = open(nameFile,'w')
'''
First search call to twitter_api
Parameters:
q: is the search term
result_type: is whether we want recent, popular or mixed tweets. Currently set to recent
max_results: is how many results we wan to take in a single call. Is currently 10 for testing
until: specifies the date that all tweets returned form this call should come before
(so all tweets from this call are from 3/28/2018)
getMaxID parses the maxID from the appropriate string in the search return metadata
maxid will then be used to call the next batch of tweets. More info on maxid is Available on the search api documentation
'''
print(q + 'at ' + str(datetime.now())) #prints twitter user being processed
response = make_twitter_request(twitter_api.search.tweets,q=q,result_type='recent',count=5, until=until)
try:
next_results = response['search_metadata']['next_results']
getMaxID = dict([ kv.split('=') for kv in next_results[1:].split("&") ])
maxid = getMaxID['max_id']
except:
next_results = ""
maxid = 0
line = "\nretrieval error at " + str(datetime.now()) + " while processing beginning call of " + q
errorLog.write(line)
'''
Parameters in response:
most are the same
-result_type is mixed (testing)
-max_results is 100 (testing, but really it should be kept like this)
-max_id field is at the end of the call, allowing each call of the function to retrieve older and older tweets
time.sleep(5): Can only call the search api 180 times in 15 minutes, so ~5 seconds. Right now set to one because testing, but should probably be set to 10self.
Or, we can edit the make-twitter_request function to handle this error for us
'''
for i in range(1,101): #top possible tweets 10,000
#print(i) #testing code
try:
response = make_twitter_request(twitter_api.search.tweets,q=q,result_type='recent',count=100,until=until,max_id=maxid)
next_results = response['search_metadata']['next_results']
if (next_results == None):
break
getMaxID = dict([ kv.split('=') for kv in next_results[1:].split("&") ])#to get the nextID
maxid = getMaxID['max_id']
# print(maxid)
time.sleep(5)
except:
line = "\nretrieval error at " + str(datetime.now()) + " while processing " + q + ' at loop number ' + str(i)
errorLog.write(line)
break
for tweet in response['statuses']:#add each tweet to a dictionary
try:
tweetsDicitonary[tweet['id']] = tweet
except:
line = "\ndicitonary error at " + str(datetime.now()) + " while processing " + str(tweet['id'])
errorLog.write(line)
file.seek(0)
file.seek(0)
json.dump(tweetsDicitonary,file)
file.close()
except:
line = "\nFatal error at " + str(datetime.now()) + " while processing " + q
errorLog.write(line)
json.dump(tweetsDicitonary,file)
file.close()
|
drewpj/cis400tweetfrequency
|
searchTweets.py
|
searchTweets.py
|
py
| 4,925 |
python
|
en
|
code
| 1 |
github-code
|
6
|
71477060028
|
import sys
input = sys.stdin.readline
def BFS(y, x, word):
global ans
ans = max(ans, len(word))
for dy, dx in ((0, 1), (0, -1), (1, 0), (-1, 0)):
ny = y + dy
nx = x + dx
if 0 <= ny < R and 0 <= nx < C and data[ny][nx] not in word:
BFS(ny, nx, word+data[ny][nx])
R, C = map(int, input().split())
ans = 0
data = [input().rstrip() for _ in range(R)]
# print(data)
#
BFS(0, 0, data[0][0])
print(ans)
|
YOONJAHYUN/Python
|
BOJ/1987_2.py
|
1987_2.py
|
py
| 451 |
python
|
en
|
code
| 2 |
github-code
|
6
|
40709996191
|
# coding=utf-8
from __future__ import absolute_import, division, print_function
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset
from util.custom_dataset import FaceLandmarksDataset, Rescale, ToTensor
import torchvision.models as models
from torchvision import transforms
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
from torchvision import transforms, utils
import torchvision
class View(nn.Module):
def __init__(self, shape):
super(View, self).__init__()
self.shape = shape
def forward(self, x):
return x.view(*self.shape)
class InnerSum(nn.Module):
def __init__(self):
super(InnerSum, self).__init__()
def forward(self, x):
y = torch.zeros_like(x)
for i in range(x.size(0)):
y[i] = x[i].mul(x[i])
if len(y.shape) == 3:
return y.sum(2)
else:
return y.sum(1)
class ACNN(nn.Module):
def __init__(self):
super(ACNN, self).__init__()
self.inner = InnerSum()
self.pretrain = models.vgg16(pretrained=True).features[:28]
self.VGG16 = self.pretrain[:21]
self.PG_base = nn.Sequential(nn.Conv2d(512, 512, kernel_size=3, padding=1), nn.BatchNorm2d(512), nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=3, padding=1), nn.BatchNorm2d(512), nn.ReLU(),)
self.PG_attention = nn.Sequential(nn.MaxPool2d(2, stride=2), nn.Conv2d(512, 128, kernel_size=3, padding=1),
nn.BatchNorm2d(128), nn.ReLU(), nn.AdaptiveAvgPool2d((1, 1)), View((-1, 128)),
nn.Linear(128, 64), nn.ReLU(), nn.Linear(64, 1), nn.Sigmoid())
self.GG_base = self.pretrain[21:]
self.GG_attention = nn.Sequential(nn.MaxPool2d(2, stride=2), nn.Conv2d(512, 128, kernel_size=3, padding=1),
nn.BatchNorm2d(128), nn.ReLU(), nn.AdaptiveAvgPool2d((1, 1)), View((-1, 128)),
nn.Linear(128, 64), nn.ReLU(), nn.Linear(64, 1), nn.Sigmoid())
self.PG24_base = nn.ModuleList([self.PG_base for _ in range(24)])
self.PG24_alpha = nn.ModuleList([self.PG_attention for _ in range(24)])
self.pad = nn.ReflectionPad2d(6)
# self.crop = batch_slice(40, 40, 6, 6)
self.crop = torchvision.ops.roi_pool
self.PG_fc = nn.Linear(512*6*6, 64)
self.GG_fc = nn.Linear(512*14*14, 512)
self.fc1 = nn.Linear(2048, 1024)
self.dropout = nn.Dropout(0.5)
self.fc2 = nn.Linear(1024, 7)
# def crop_layer(self, img: '(B, C, H, W)', landmarks: '(B, 24, 2)'):
# # device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# pad = nn.ReflectionPad2d(6) # padding for cropping
# img = pad(img) # (B, 512, 36, 36)
# total_crop = torch.zeros((img.size(0), landmarks.size(1), 512, 6, 6), device=self.device)
#
# for i in range(landmarks.size(0)): # Batch
# # crop_per_batch = []
# for patch in range(landmarks.size(1)): # 24 landmarks
# total_crop[i, patch, :, :, :] = img[i, :, (int(landmarks[i, patch, 0]) - 3): (int(
# landmarks[i, patch, 0]) + 3),
# (int(landmarks[i, patch, 1]) - 3): (int(
# landmarks[i, patch, 1]) + 3)] # crop_img: (512, 6, 6)
#
# total_crop = total_crop.permute(1, 0, 2, 3, 4) # output: (24, B, 512, 6, 6)
# return total_crop
def _branch24(self, crop_img):
PG_out = []
for x, base, alpha in zip(crop_img, self.PG24_base, self.PG24_alpha):
PG_conv2 = base(x)
PG_reshape = PG_conv2.view(-1, 512*6*6)
PG_reshape = self.PG_fc(PG_reshape)
PG_per = PG_reshape * alpha(PG_conv2).view(x.size(0), 1)
PG_out.append(PG_per)
return PG_out
def forward(self, img, landmarks):
img_feature = self.VGG16(img) # (B, 512, 28, 28)
img_pad = self.pad(img_feature)
# landmarks = landmarks.long()
crop_img = self.crop(img_pad, landmarks, output_size=(6, 6))
crop_img = crop_img.view(24, -1, 512, 6, 6)
GG_conv2 = self.GG_base(img_feature)
GG_reshape = GG_conv2.view(-1, 512*14*14)
GG_reshape = self.GG_fc(GG_reshape)
GG_out = GG_reshape * self.GG_attention(GG_conv2).view(img_feature.size(0), 1)
# crop_img = self.crop_layer(img_feature, landmarks)
PG_out = self._branch24(crop_img)
PG_total = torch.cat(PG_out, dim=1)
total_out = torch.cat([GG_out, PG_total], dim=1)
out = self.fc1(total_out)
out = F.relu(self.dropout(out))
out = self.fc2(out)
return out
def landmark_resize(landmarks:'(B, 24, 2)')->'(B*24, 4)':
bs = landmarks.size(0)
batch = list(range(bs))
batch = np.array(batch * 24).reshape(24, -1).T
point = np.array(list(range(24)) * bs).reshape(bs, -1)
insert_point = np.insert(landmarks, 0, point, 2)
insert_batch = np.insert(insert_point, 0, batch, 2)
new_landmark = insert_batch.reshape(-1, 4)
return new_landmark
def data_normal(origin_data, size): # (-1, 1)
size = size / 2
norm_data = origin_data.true_divide(size) - 1
return norm_data
def grid_field(landmarks, cropsize=6): # landmarks: (B, 24, 2)
total_crop = []
landmarks = landmark_resize(landmarks) # (B*24, 4)
lm_batch = landmarks[:, 0].long()
landmarks_x_l = landmarks[:, 2] - (cropsize / 2)
landmarks_x_r = landmarks[:, 2] + (cropsize / 2)
landmarks_y_l = landmarks[:, 3] - (cropsize / 2)
landmarks_y_r = landmarks[:, 3] + (cropsize / 2)
for i in range(landmarks.size(0)):
new_h = torch.linspace(landmarks_x_l[i], landmarks_x_r[i] - 1, cropsize).view(-1, 1).repeat(1, cropsize)
new_w = torch.linspace(landmarks_y_l[i], landmarks_y_r[i] - 1, cropsize).repeat(cropsize, 1)
grid = torch.cat((new_w.unsqueeze(2), new_h.unsqueeze(2)), dim=2)
grid = grid.unsqueeze(0)
grid = data_normal(grid, size=28)
total_crop.append(grid)
total_crop = torch.cat(total_crop, dim=0)
return lm_batch, total_crop
def roi_select(landmarks: '(B, 4, 2)') -> '(B*24, 5)':
landmarks = landmark_resize(landmarks)
landmarks_right = landmarks[:, 2:] + 3
landmarks_left = landmarks[:, 2:] - 3
landmarks = torch.cat([landmarks[:, 0].view(-1, 1), landmarks_left, landmarks_right], dim=1)
return landmarks
# if __name__ == '__main__':
# model = ACNN()
# shuffle = False
# device = torch.device('cuda:0' if torch.cuda.is_available() else "cpu")
# model.to(device)
# train_set = FaceLandmarksDataset(csv_file='train_acnn.csv', root_dir='original/',
# transform=ToTensor())
# test_set = FaceLandmarksDataset(csv_file='test_acnn.csv', root_dir='original/',
# transform=ToTensor())
# train_loader = DataLoader(dataset=train_set, shuffle=shuffle, batch_size=4, num_workers=0,
# pin_memory=True)
# test_loader = DataLoader(dataset=test_set, shuffle=shuffle, batch_size=4, num_workers=8,
# pin_memory=True)
# for step, batch in enumerate(train_loader):
# imgs, landmarks, targets = batch['image'], batch['landmarks'] / 8. + 6, batch['label']
# landmarks = roi_select(landmarks)
#
# imgs, landmarks, targets = imgs.to(device), landmarks.to(device), targets.to(device)
# logits = model(imgs, landmarks)
# print(logits.size())
# break
|
hanluyt/gACNN_pytorch
|
model_roi.py
|
model_roi.py
|
py
| 7,947 |
python
|
en
|
code
| 2 |
github-code
|
6
|
1002077560
|
import g2d
from boardgame import BoardGame
from time import time
W, H = 40, 40
LONG_PRESS = 0.5
class BoardGameGui:
def __init__(self, g: BoardGame):
self._game = g
self._downtime = 0
self.update_buttons()
def tick(self):
if g2d.key_pressed("LeftButton"):
self._downtime = time()
elif g2d.key_pressed("a"):
self._game.automatism()
self.update_buttons()
elif g2d.key_pressed("h"):
self._game.hint()
self.update_buttons()
elif g2d.key_pressed("u"):
print(self._game.unsolvable())
elif g2d.key_released("LeftButton"):
mouse = g2d.mouse_position()
x, y = mouse[0] // W, mouse[1] // H
if time() - self._downtime > LONG_PRESS:
self._game.flag_at(x, y)
else:
self._game.play_at(x, y)
self.update_buttons()
def update_buttons(self):
g2d.clear_canvas()
g2d.set_color((0, 0, 0))
cols, rows = self._game.cols(), self._game.rows()
for y in range(1, rows):
g2d.draw_line((0, y * H), (cols * W, y * H))
for x in range(1, cols):
g2d.draw_line((x * W, 0), (x * W, rows * H))
for y in range(rows):
for x in range(cols):
value = self._game.value_at(x, y)
if value == '1': #Settaggio colori
g2d.set_color((200,200,200))
g2d.fill_rect((x*40, y*40, 39, 39))
elif value == '2':
g2d.set_color((0,0,0))
g2d.fill_rect((x*40, y*40, 39, 39))
center = x * W + W//2, y * H + H//2
g2d.draw_text_centered(value, center, H//2)
g2d.update_canvas()
if self._game.finished():
g2d.alert(self._game.message())
g2d.close_canvas()
def gui_play(game: BoardGame):
g2d.init_canvas((game.cols() * W, game.rows() * H))
ui = BoardGameGui(game)
g2d.main_loop(ui.tick)
|
refedico/3-in-a-Row
|
boardgamegui.py
|
boardgamegui.py
|
py
| 2,063 |
python
|
en
|
code
| 3 |
github-code
|
6
|
36562134507
|
import sys
import json
import time
import numpy as np
import argparse
from operator import itemgetter
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import dok_matrix
from math import sqrt
from math import log
from upper_learning_corpus import LearningCorpus
from sparse_matrix import *
from ranking import *
def convert_counts_to_pmi2(matrix, rowSum, colSum):
totalSum = sum(rowSum.values())
sys.stderr.write('Converting to csc_matrix format... ')
startTime = time.time()
matrix = coo_matrix(matrix)
sys.stderr.write('done. Time taken: '+str(time.time()-startTime)+' secs\n')
totalEntries = len(matrix.row)
sys.stderr.write('Num entries: '+str(totalEntries)+'\n')
numEntries = 1.
# symmetric matrix
for r, c, val in zip(np.nditer(matrix.row), np.nditer(matrix.col), np.nditer(matrix.data, op_flags=['readwrite'])):
pi, pj, pij = (1.*val/rowSum[str(r)], 1.*val/colSum[str(c)], 1.*val/totalSum)
val[...] = log(pij/(pi*pj))
if numEntries% 1000000 == 0: sys.stderr.write(str(numEntries)+' ')
numEntries += 1
sys.stderr.write('done!\n')
return csc_matrix((matrix.data, (matrix.row, matrix.col)), shape=matrix.shape)
def convert_counts_to_pmi(matrix, rowSum, colSum):
totalSum = sum(rowSum.values())
sys.stderr.write('Converting to dok_matrix format... ')
startTime = time.time()
matrix = dok_matrix(matrix)
sys.stderr.write('done. Time taken: '+str(time.time()-startTime)+' secs\n')
totalEntries = len(matrix)
sys.stderr.write('Num entries: '+str(totalEntries)+'\n')
r, c = matrix.shape
numEntries = 1.
# symmetric matrix
if r == c:
for key, val in matrix.iteritems():
i, j = key
i, j = (str(i), str(j))
if int(i) <= int(j):
pi, pj, pij = (1.*val/rowSum[i], 1.*val/colSum[j], 1.*val/totalSum)
pmi = log(pij/(pi*pj))
matrix[int(i), int(j)] = pmi
matrix[int(j), int(i)] = pmi
else:
pass
if numEntries% 1000000 == 0: sys.stderr.write(str(numEntries)+' ')
numEntries += 1
else:
for key, val in matrix.iteritems():
i, j = key
i, j = (str(i), str(j))
pi, pj, pij = (1.*val/rowSum[i], 1.*val/colSum[j], 1.*val/totalSum)
matrix[int(i), int(j)] = log(pij/(pi*pj))
if numEntries% 1000000 == 0: sys.stderr.write(str(numEntries)+' ')
numEntries += 1
sys.stderr.write('done!\n')
return csc_matrix(matrix)
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--matrixfile", type=str, default=None, help="Matrix file name")
parser.add_argument("-d", "--dictfile", type=str, help="Dictionary file name")
parser.add_argument("-o", "--outputfile", type=str, default=None, help="Output file name")
args = parser.parse_args()
outFileName = args.outputfile
dictFile = open(args.dictfile, 'r')
values = dictFile.readline().strip().split()
if len(values) == 3:
colCutoff, rowCutoff, windowSize = values
else:
colCutoff, windowSize = values
rowCutoff = 0.
vocab = json.loads(dictFile.readline())
wordFeatures = json.loads(dictFile.readline())
rowSum = json.loads(dictFile.readline())
colSum = json.loads(dictFile.readline())
contextMat = load_sparse_matrix(args.matrixfile)
sys.stderr.write("windowSize: "+str(windowSize)+" colCutoff: "+str(colCutoff)+" rowCutoff: "+str(rowCutoff)+'\n')
sys.stderr.write("featLen: "+str(len(wordFeatures))+" vocabLen: "+str(len(vocab))+'\n')
sys.stderr.write('Read the matrix!\n')
''' Convert the matrix here! '''
contextMat = convert_counts_to_pmi(contextMat, rowSum, colSum)
sys.stderr.write('Writing the matrix now... ')
if outFileName is None: outFileName = args.dictfile.replace('.dict', '_pmi')
save_sparse_matrix(outFileName, contextMat)
sys.stderr.write('done!\n')
|
mfaruqui/vector-semantics
|
src/svd/convert_counts_to_pmi.py
|
convert_counts_to_pmi.py
|
py
| 4,114 |
python
|
en
|
code
| 5 |
github-code
|
6
|
25026171656
|
from flask import abort
from flask_restx import Resource, Namespace, Model, fields, reqparse
from infraestructura.alumnos_repo import AlumnosRepo
from api.cursos_api import modeloCurso
from flask_restx.inputs import date
repo = AlumnosRepo()
nsAlumno = Namespace('Alumnos', description='Administrador de Alumno')
modeloAlumnoSinID = Model('AlumnoSinID',{
'nombre': fields.String(),
'direccion': fields.String(),
'sexo':fields.String(),
'edad':fields.Integer(),
'fecha_baja': fields.Date()
})
modeloAlumno = modeloAlumnoSinID.clone('Alumno',{
'id': fields.Integer(),
#'cursos': fields.Nested(modeloCurso, skip_none=True)
})
# modeloBusqueda = Model('BusquedaFechas', {
# 'desde': fields.Date(),
# 'hasta': fields.Date()
# })
nsAlumno.models[modeloAlumno.name] = modeloAlumno
nsAlumno.models[modeloAlumnoSinID.name] = modeloAlumnoSinID
# nsAlumno.models[modeloBusqueda.name] = modeloBusqueda
nuevoAlumnoParser = reqparse.RequestParser(bundle_errors=True)
nuevoAlumnoParser.add_argument('nombre', type=str, required=True)
nuevoAlumnoParser.add_argument('direccion', type=str, required=True)
nuevoAlumnoParser.add_argument('sexo', type=str, required=True)
nuevoAlumnoParser.add_argument('edad', type=int, required=True)
nuevoAlumnoParser.add_argument('fecha_baja', type=date, required=False)
editarAlumnoParser = nuevoAlumnoParser.copy()
editarAlumnoParser.add_argument('id', type=int, required=True)
@nsAlumno.route('/')
class AlumnosResource(Resource):
# @nsAlumno.marshal_list_with(modeloAlumno)
# def get(self):
# return repo.get_all()
@nsAlumno.marshal_list_with(modeloAlumno)
def get(self):
return repo.get_all()
@nsAlumno.expect(modeloAlumnoSinID)
@nsAlumno.marshal_with(modeloAlumno)
def post(self):
data = nuevoAlumnoParser.parse_args()
Alumno = repo.agregar(data)
if Alumno:
return Alumno, 201
abort(500)
@nsAlumno.route('/<int:id>')
class AlumnoResource(Resource):
@nsAlumno.marshal_with(modeloAlumno)
def get(self, id):
Alumno = repo.get_by_id(id)
if Alumno:
return Alumno, 200
abort(404)
@nsAlumno.expect(modeloAlumno)
def put(self, id):
data = editarAlumnoParser.parse_args()
if repo.modificar(id, data):
return 'Alumno actualizado', 200
abort(404)
# @nsAlumno.route('/buscar/<string:desde>/<string:hasta>/')
# class AlumnoResource(Resource):
# @nsAlumno.marshal_list_with(modeloAlumno)
# def get(self, desde, hasta):
# l = repoLep.buscar(desde, hasta)
# if l:
# a = []
# for x in l:
# h = repo.get_by_id(x.Alumno_id)
# a.append(h)
# return l, 200
# abort(404)
@nsAlumno.route('/baja/<int:id>')
class AlumnoResource(Resource):
def put(self, id):
if repo.baja(id):
return 'Alumno dado de baja', 200
abort(400)
@nsAlumno.route('/buscar/<int:curso>')
class AlumnoResource(Resource):
@nsAlumno.marshal_list_with(modeloAlumno)
def get(self, curso):
l = repo.get_alumno_curso(curso)
if l:
return l, 200
abort(404)
|
PepoPalo/Final-Laboratorio-Diciembre2021
|
Backend/api/alumnos_api.py
|
alumnos_api.py
|
py
| 3,258 |
python
|
es
|
code
| 1 |
github-code
|
6
|
32144899005
|
import pandas as pd
def read_fasta(file_path):
sequences = {"Header": [], "Sequence": []}
current_header = None
current_sequence = ""
with open(file_path, "r") as file:
for line in file:
line = line.strip()
if line.startswith(">"):
# New header found
if current_header is not None:
sequences["Header"].append(current_header)
sequences["Sequence"].append(current_sequence)
current_header = line[1:]
current_sequence = ""
else:
# Continue building the sequence
current_sequence += line
# Add the last sequence
if current_header is not None:
sequences["Header"].append(current_header)
sequences["Sequence"].append(current_sequence)
return pd.DataFrame(sequences)
def extract_label(header):
# Extract label after the "|" symbol
parts = header.split("|")
if len(parts) > 1:
return parts[1].strip()
else:
return None
file_path = "data/pharos/pharos.fasta"
fasta_df = read_fasta(file_path)
fasta_df["Label"] = fasta_df["Header"].apply(extract_label)
tclin_df = fasta_df[fasta_df["Label"] == "Tclin"]
tdark_df = fasta_df[fasta_df["Label"] == "Tdark"]
length_tclin_df = len(tclin_df)
random_tdark_df = tdark_df.sample(n=length_tclin_df, random_state=42)
from sklearn.model_selection import train_test_split
import os
# Assuming tclin_df and tdark_df are already defined
# Define the test size
test_size = 0.2
# Split the positive sequences (Tclin) into train and test sets
tclin_train, tclin_test = train_test_split(
tclin_df, test_size=test_size, random_state=42
)
# Split the negative sequences (Tdark) into train and test sets
tdark_train, tdark_test = train_test_split(
random_tdark_df, test_size=test_size, random_state=42
)
# Create folders if they don't exist
train_folder = "data/pharos/fastadata/Train"
test_folder = "data/pharos/fastadata/Independent_Test"
# Create folders if they don't exist
for folder in [train_folder, test_folder]:
if not os.path.exists(folder):
os.makedirs(folder)
if not os.path.exists(train_folder):
os.makedirs(train_folder)
if not os.path.exists(test_folder):
os.makedirs(test_folder)
# Function to extract header before the '|' symbol
def extract_header(identifier):
return identifier.split("|")[0]
# Function to write sequences to fasta file
def write_fasta(filename, dataframe):
with open(filename, "w") as file:
for index, row in dataframe.iterrows():
header = extract_header(row["Header"])
file.write(f">{header}\n{row['Sequence']}\n")
# Save the sequences to FASTA files in the train and test folders
write_fasta(os.path.join(train_folder, "positive_train_sequence.fasta"), tclin_train)
write_fasta(os.path.join(test_folder, "positive_test_sequence.fasta"), tclin_test)
write_fasta(os.path.join(train_folder, "negative_train_sequence.fasta"), tdark_train)
write_fasta(os.path.join(test_folder, "negative_test_sequence.fasta"), tdark_test)
|
txz32102/paper
|
util/sample.py
|
sample.py
|
py
| 3,139 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25575152305
|
from unicodedata import mirrored
import numpy as np
import inspect
import unittest
def select_alternating_columns(a: np.ndarray) -> np.ndarray:
"""
Select alternating columns starting from the 0-th
index of `a`. `a` will be at least 2 dimensions.
>>> a = np.array([[0, 1, 2],
... [3, 4, 5]])
>>> select_alternating_columns(a)
array([[0, 2],
[3, 5]])
"""
if a.shape[1]%2 == 0:
valid_columns = np.array([True if i % 2 == 0 else False for i in range(a.shape[1])])
return a[:, valid_columns]
valid_columns = np.array([True if i % 2 == 0 else False for i in range(a.shape[1])])
return a[:, valid_columns]
def popcount_rows(a: np.ndarray) -> np.ndarray:
"""
Return an array containing the popcount of every row
in `a`. `a` is 2d and consists of 0s and 1s only.
>>> a = np.array([[0, 0, 1],
... [0, 0, 0],
... [1, 0, 1],
... [1, 1, 1]])
>>> popcount_rows(a)
array([1, 0, 2, 3])
"""
return np.array([np.sum(row) for row in a])
def remove_all_zero_rows(a: np.ndarray) -> np.ndarray:
"""
Removes any rows that entirely consist of zeros from `a`.
>>> a = np.array([[0, 0, 0],
... [0, 0, 1]])
>>> remove_all_zero_rows(a)
array([[0, 0, 1]])
"""
count_non_zero = np.array([np.sum(row) for row in a])
return a[count_non_zero != 0]
def swap_halves(a: np.ndarray) -> np.ndarray:
"""
Swaps the front and back halves of `a`, which
is at least 2 dimensions. If the array's size
is odd, includes the middle element as the
first element of the back half.
>>> swap_halves(np.array([0, 1, 2, 3]))
array([2, 3, 0, 1])
>>> swap_halves(np.array([0, 1, 2]))
array([1, 2, 0])
>>> a = np.reshape(range(8), [4, 2])
>>> a
array([[0, 1],
[2, 3],
[4, 5],
[6, 7]])
>>> swap_halves(a)
array([[4, 5],
[6, 7],
[0, 1],
[2, 3]])
"""
firs_half = a[:len(a) // 2]
second_half = a[len(a) // 2:]
return np.concatenate([second_half, firs_half])
def trim_zeros_on_edges_2d(a: np.ndarray) -> np.ndarray:
"""
Trims zeros around a rectangular 1-delimited section.
The section delimited by 1s will always be
rectangular and there is only one such section.
`a` will be 2d and consist of 0s and 1s only.
>>> a = np.array([[0, 0, 0, 0, 0, 0],
... [0, 1, 1, 1, 0, 0],
... [0, 1, 0, 1, 0, 0],
... [0, 1, 1, 1, 0, 0],
... [0, 0, 0, 0, 0, 0]])
>>>
>>> trim_zeros_on_edges_2d(a)
array([[1, 1, 1],
[1, 0, 1],
[1, 1, 1]])
"""
#rows with all zeros
b = np.array([np.sum(row) for row in a])
zero_columns = np.array([True if i == 0 else False for i in b])
#columns with all zeros
c = np.array([np.sum(column) for column in a.T])
zero_rows = np.array([True if i == 0 else False for i in c])
#select only rows and columns that are not all zeros
v = a[~zero_columns, :]
v = v[:, ~zero_rows]
return v
def one_hot_encode_1d(a: np.ndarray) -> np.ndarray:
"""
One hot encode every row in `a`. Values of `a` are
unique positive whole numbers or zero in the range [0-n).
>>> one_hot_encode_1d(np.array([3, 0, 1, 2]))
array([[0, 0, 0, 1],
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0]])
"""
#max value of a
m = a.max() + 1
return np.array([[ 1 if i == v else 0 for i in range(m)] for v in a])
def make_chessboard(size: int) -> np.ndarray:
"""
Makes a 2d chessboard pattern with both dimensions
equal to `size`. The top-left corner should be 0.
`size` must be >= 0.
>>> make_chessboard(3)
array([[0, 1, 0],
[1, 0, 1],
[0, 1, 0]])
>>> make_chessboard(4)
array([[0, 1, 0, 1],
[1, 0, 1, 0],
[0, 1, 0, 1],
[1, 0, 1, 0]])
"""
return np.array([[0 if i % 2 == j % 2 else 1 for j in range(size)] for i in range(size)])
def quad(a: np.ndarray) -> np.ndarray:
"""
Repeat the array horizontally and vertically.
>>> quad(np.array([0, 1]))
array([[0, 1, 0, 1],
[0, 1, 0, 1]])
>>> quad(np.array([[0, 1], [2, 3]]))
array([[0, 1, 0, 1],
[2, 3, 2, 3],
[0, 1, 0, 1],
[2, 3, 2, 3]])
"""
return np.tile(a, (2, 2))
def reflect_quad(a: np.ndarray) -> np.ndarray:
"""
Repeat the array horizontally and vertically but
also flip/mirror/reflect the repeated array around
the middle.
>>> reflect_quad(np.array([0, 1, 2]))
array([[0, 1, 2, 2, 1, 0],
[0, 1, 2, 2, 1, 0]])
>>> a = np.array([[0, 1],
... [2, 3]])
>>> reflect_quad(a)
array([[0, 1, 1, 0],
[2, 3, 3, 2],
[2, 3, 3, 2],
[0, 1, 1, 0]])
"""
#flip vertical and join rows
b = []
if len(a.shape) > 1:
b = np.flip(a, 0)
b = np.concatenate([a, b])
c = []
#flip horizontal and join columns
if a.shape[0] > 1:
c = np.flip(b, 1)
c = np.concatenate([b, c], 1)
return c
def rows_where_bits_set_at_idxes(a: np.ndarray,
set_idxes: np.ndarray) -> np.ndarray:
"""
Return a list of indexes of rows with bits
set at indexes specified by `set_idxes`.
>>> a = np.array([[1, 0, 1, 0],
... [0, 1, 1, 0],
... [0, 1, 0, 1],
... [0, 1, 1, 1]])
>>> rows_where_bits_set_at_idxes(a, np.array([1, 3]))
array([2, 3], dtype=int64)
"""
return np.array([i for i in range(len(a)) if np.any(a[i, set_idxes])])
class Test(unittest.TestCase):
def test_select_alternating_columns_even(self):
""" select alternating columns (even length)
"""
a = np.reshape(np.arange(16), [4, 4])
actual = select_alternating_columns(a)
expected = np.array([[0, 2], [4, 6], [8, 10], [12, 14]])
np.testing.assert_equal(actual, expected)
def test_select_alternating_columns_odd(self):
""" select alternating columns (odd length)
"""
a = np.reshape(np.arange(6), [2, 3])
actual = select_alternating_columns(a)
expected = np.array([[0, 2], [3, 5]])
np.testing.assert_equal(actual, expected)
def test_popcount_rows(self):
""" popcount rows
"""
a = np.array([[0, 0, 1],
[0, 0, 0],
[1, 0, 1],
[1, 1, 1]])
actual = popcount_rows(a)
expected = np.array([1, 0, 2, 3])
np.testing.assert_equal(actual, expected)
def test_remove_all_zero_rows(self):
""" remove all zero rows (integer)
"""
a = np.array([[0, 0, 0], [0, 0, 1]])
actual = remove_all_zero_rows(a)
expected = np.array([[0, 0, 1]])
np.testing.assert_equal(actual, expected)
def test_remove_all_zero_rows_float(self):
""" remove all zero rows (float)
"""
a = np.array([[0., 0., 0.], [0., 0., 1.]])
actual = remove_all_zero_rows(a)
expected = np.array([[0., 0., 1.]])
np.testing.assert_equal(actual, expected)
def test_swap_halves_even(self):
""" swap halves (even length)
"""
a = np.arange(8)
actual = swap_halves(a)
expected = np.array([4, 5, 6, 7, 0, 1, 2, 3])
np.testing.assert_equal(actual, expected)
def test_swap_halves_odd(self):
""" swap halves (odd length)
"""
a = np.arange(7)
actual = swap_halves(a)
expected = np.array([3, 4, 5, 6, 0, 1, 2])
np.testing.assert_equal(actual, expected)
def test_swap_halves_2d_arr(self):
""" swap halves (2d array)
"""
a = np.reshape(np.arange(8), [4, 2])
actual = swap_halves(a)
expected = np.array([[4, 5], [6, 7], [0, 1], [2, 3]])
np.testing.assert_equal(actual, expected)
def test_trim_zeroes_on_edges_2d(self):
""" trim zeros on edges 2d
"""
a = np.array([[0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0]])
expected = np.array([[1, 1, 1],
[1, 0, 1],
[1, 1, 1]])
actual = trim_zeros_on_edges_2d(a)
np.testing.assert_equal(actual, expected)
def test_one_hot_encode_1d(self):
""" one hot encode 1d
"""
a = np.array([3, 0, 1, 2])
expected = np.array([[0, 0, 0, 1],
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0]])
actual = one_hot_encode_1d(a)
np.testing.assert_equal(actual, expected)
def test_make_chessboard_even(self):
""" make chessboard (even size)
"""
expected = np.array([[0, 1, 0, 1],
[1, 0, 1, 0],
[0, 1, 0, 1],
[1, 0, 1, 0]])
actual = make_chessboard(4)
np.testing.assert_equal(actual, expected)
def test_make_chessboard_odd(self):
""" make chessboard (odd size)
"""
expected = np.array([[0, 1, 0],
[1, 0, 1],
[0, 1, 0]])
actual = make_chessboard(3)
np.testing.assert_equal(actual, expected)
def test_quad(self):
""" quad
"""
a = np.array([0, 1])
expected = np.array([[0, 1, 0, 1],
[0, 1, 0, 1]])
actual = quad(a)
np.testing.assert_equal(actual, expected)
def test_quad_larger(self):
""" quad (larger array)
"""
a = np.array([[0, 1],
[2, 3]])
expected = np.array([[0, 1, 0, 1],
[2, 3, 2, 3],
[0, 1, 0, 1],
[2, 3, 2, 3]])
actual = quad(a)
np.testing.assert_equal(actual, expected)
def test_reflect_quad(self):
""" reflect quad
"""
a = np.array([0, 1, 2])
expected = np.array([[0, 1, 2, 2, 1, 0],
[0, 1, 2, 2, 1, 0]])
actual = reflect_quad(a)
np.testing.assert_equal(actual, expected)
def test_reflect_quad_larger(self):
""" reflect quad (larger array)
"""
a = np.array([[0, 1],
[2, 3]])
expected = np.array([[0, 1, 1, 0],
[2, 3, 3, 2],
[2, 3, 3, 2],
[0, 1, 1, 0]])
actual = reflect_quad(a)
np.testing.assert_equal(actual, expected)
def test_rows_where_bits_set_at_idxes(self):
""" rows where bits are set at indexes
"""
a = np.array([[1, 0, 1, 0],
[0, 1, 1, 0],
[0, 1, 0, 1],
[0, 1, 1, 1]])
expected = np.array([2, 3])
set_idxes = np.array([1, 3])
actual = rows_where_bits_set_at_idxes(a, set_idxes)
np.testing.assert_equal(actual, expected)
test_src = inspect.getsource(Test)
unittest.TestLoader.sortTestMethodsUsing = lambda _, x, y: (
test_src.index(f"def {x}") - test_src.index(f"def {y}")
)
#run tests
if __name__ == "__main__":
unittest.main()
|
ThadeuFerreira/python_code_challengers
|
numpyArrays.py
|
numpyArrays.py
|
py
| 11,852 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26735943730
|
import numpy as np
from .utils import LinearAnnealer,ExponentialAnnealer
import tqdm
import torch
import torch.nn as nn
import wandb
from progress.bar import Bar
from array2gif import write_gif
import copy
from .utils import set_seed
from .utils import save_rewards_meanvar_plot,get_logger,MLP,ReplayMemory
import logging
import time
from torch.distributions.categorical import Categorical
# With spinning up help ;)
class VPG:
def __init__(self, env, config):
for k, v in config.items():
setattr(self, k, v)
print(config)
self.env = env
self.config = copy.deepcopy(config)
self.reset(self.seed)
def reset(self, seed):
self.seed = seed
set_seed(self.seed)
self.env.seed(self.seed)
self.env.action_space.seed(self.seed)
obs_size = self.env.observation_space.n if 'n' in self.env.observation_space.__dict__ else self.env.observation_space._shape[0]
self.policy = MLP(self.nUnits, obs_size,self.env.action_space.n).to(self.device)
self.optimizer = torch.optim.Adam(self.policy.parameters(), lr=self.lr)
def get_policy(self,s):
return Categorical(logits=self.policy(s))
def get_action(self,s):
return self.get_policy(s).sample()
def update_net(self,ep_mem):
def r_to_go(rewards):
return torch.cumsum(rewards.flip(dims=[0]),dim=0).flip(dims=[0])
s = torch.stack([exp.s for exp in ep_mem.memory])
a = torch.stack([exp.a for exp in ep_mem.memory])
r = torch.stack([exp.r for exp in ep_mem.memory])
r = r_to_go(r)
self.optimizer.zero_grad()
ep_loss = -(self.get_policy(s).log_prob(a) * r).mean()
ep_loss.backward()
self.optimizer.step()
return ep_loss
def train(self):
bar = Bar('{}'.format('Training'), max=self.nepisodes)
self.logger = get_logger("VPG",self.env.spec.name)
episode_rewards = []
eval_rewards = []
n_experience = 0
last_eval_mean = 0
last_eval_std = 0
step = 0
for ep in (range(self.nepisodes)):
self.policy.train()
replaymem = ReplayMemory(10000,1)
state = self.env.reset(seed=self.seed)
ep_reward = 0
for t in range(1,self.max_steps):
action = self.get_action(torch.tensor(state).unsqueeze(0).float())
new_state, reward, done, info = self.env.step(action.item())
ep_reward += reward
replaymem.add_exp(torch.tensor(state).unsqueeze(0).float(),action,reward,torch.tensor(new_state).unsqueeze(0).float(),int(done))
state = new_state
step += 1
if done:
break
self.update_net(replaymem)
if self.num_eval_episodes > 0 and ((ep % self.eval_freq )==0):
temp_eval_rewards = []
for _ in range(self.num_eval_episodes):
temp_eval_rewards.append(self.evaluate())
last_eval_mean = np.mean(temp_eval_rewards)
last_eval_std = np.std(temp_eval_rewards)
eval_rewards.append(temp_eval_rewards)
if self.use_wandb:
wandb.log({"episode_reward": ep_reward,'eval_reward_mean':last_eval_mean,'eval_reward_std':last_eval_std})
episode_rewards.append(ep_reward)
ep_info = ('Episode '+str(ep)+' reward: ' + str(ep_reward) + ' Mean r over last 20 episodes :' + str(np.mean(episode_rewards[-20:]).item())+' last eval mean,std ' +str(last_eval_mean)+' '+str(last_eval_std))
if "cart" in self.env.spec.name.lower() and np.mean(episode_rewards[-20:]).item() > 480:
print("Solved cartpole exiting early")
bar.finish()
self.logger.info(ep_info)
return eval_rewards, np.mean(episode_rewards[-30:]).item()
self.logger.info( ep_info)
Bar.suffix = ep_info
bar.next()
bar.finish()
return eval_rewards, np.mean(episode_rewards[-30:]).item()
def show_results(self):
self.evaluate(save_gif=True)
def evaluate(self,save_gif = False):
self.policy.eval()
state = self.env.reset(seed=self.seed)
total_reward = 0
frames = []
for t in range(1,self.max_steps):
action = self.get_action(torch.tensor(state).unsqueeze(0).float())
new_state, reward, done, info = self.env.step(action.item())
if save_gif:
img = self.env.render(mode="rgb_array")
frames.append(img)
total_reward += reward
state = new_state
if done :
break
if save_gif:
write_gif([np.transpose(f, axes=[2,0, 1]) for f in frames], 'gifs/vpg_'+self.env.spec.name+'.gif', fps=30)
if self.use_wandb:
wandb.log({"loss": total_reward})
return total_reward
|
gauthierboeshertz/reel
|
algos/plearners/vpg.py
|
vpg.py
|
py
| 5,225 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70541333949
|
import os
import re
import spotipy
from moviepy.editor import *
from urllib.parse import quote
from urllib import request as rq
from youtube_dl import YoutubeDL
from spotipy.oauth2 import SpotifyClientCredentials
## fix to skip use for PYTHONPATH
sys.path.append(os.getcwd())
sys.path.append(os.path.join(os.getcwd(),"..","common"))
from common.common import controller_common
common = controller_common()
class controller_spotify:
def __init__(self,client_api,token_api,user):
self.__CLIENT_ID = client_api
self.__CLIENT_SECRET = token_api
self.__USER_ID = user
self.auth_manager = SpotifyClientCredentials(
client_id=self.__CLIENT_ID, client_secret=self.__CLIENT_SECRET
)
self.sp = spotipy.Spotify(auth_manager=self.auth_manager)
def get_ydl_opts(self, path):
return {
"format": "bestaudio/best",
"outtmpl": f"{path}/%(id)s.%(ext)s",
"ignoreerrors": True,
"postprocessors": [
{
"key": "FFmpegExtractAudio",
"preferredcodec": "mp3",
"preferredquality": "320",
}
],
}
def get_user_playlists(self):
return [
{"value": pl.get("uri"), "name": pl.get("name")}
for pl in self.sp.user_playlists(self.__USER_ID).get("items")
]
def normalize_str(self, string):
return string.translate(str.maketrans('\\/:*?"<>|', "__ "))
def get_playlist_details(self, pl_uri):
offset = 0
fields = "items.track.track_number,items.track.name,items.track.artists.name,items.track.album.name,items.track.album.release_date,total,items.track.album.images"
pl_name = self.sp.playlist(pl_uri)["name"]
pl_items = self.sp.playlist_items(
pl_uri,
offset=offset,
fields=fields,
additional_types=["track"],
)["items"]
pl_tracks = []
while len(pl_items) > 0:
for item in pl_items:
if item["track"]:
track_name = self.normalize_str(item["track"]["name"])
artist_name = self.normalize_str(
item["track"]["artists"][0]["name"]
)
pl_tracks.append(
{
"uri": quote(
f'{track_name.replace(" ", "+")}+{artist_name.replace(" ", "+")}'
),
"file_name": f"{artist_name} - {track_name}",
"track_name": track_name,
"artist_name": artist_name,
"album_name": self.normalize_str(
item["track"]["album"]["name"]
),
"album_date": item["track"]["album"]["release_date"],
"track_number": item["track"]["track_number"],
"album_art": item["track"]["album"]["images"][0]["url"],
}
)
offset = offset + len(pl_items)
pl_items = self.sp.playlist_items(
pl_uri,
offset=offset,
fields=fields,
additional_types=["track"],
)["items"]
return {"pl_name": pl_name, "pl_tracks": pl_tracks}
def check_existing_tracks(self, playlist, path):
existing_tracks = os.listdir(path)
tracks = [
track
for track in playlist["pl_tracks"]
if f"{track['file_name']}.mp3" not in existing_tracks
]
return tracks
def download_tracks(self, pl_uri):
count = 0
items = list()
pl_details = self.get_playlist_details(pl_uri)
path = common.create_download_directory(pl_details["pl_name"])
tracks = self.check_existing_tracks(pl_details, path)
print(
f"\n\033[1m\033[33m[info] Downloading {len(tracks)} tracks from {pl_details['pl_name']}\033[0m"
)
with YoutubeDL(self.get_ydl_opts(path)) as ydl:
for track in tracks:
html = rq.urlopen(
f"https://www.youtube.com/results?search_query={track['uri']}"
)
video_ids = re.findall(r"watch\?v=(\S{11})", html.read().decode())
if video_ids:
url = "https://www.youtube.com/watch?v=" + video_ids[0]
print ( f"Add [{count}] - {url}" )
count = count + 1
items.append(url)
res = common.thread_pool(items,path,"download")
if res:
common.converterto_mp3(pl_details["pl_name"])
|
alejan2x/FuckDownload
|
spotify/spotify.py
|
spotify.py
|
py
| 4,850 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23873824195
|
import cv2
import os
# Input folder containing the saved images
image_folder = '/Users/tobieabel/Desktop/video_frames/ConcatVideo/'
# Output video file path
output_video_path = '/Users/tobieabel/Desktop/video_frames/Youtube/v3_a demo.mp4'
# Get the list of image files in the input folder
image_files = os.listdir(image_folder)
image_files.remove('.DS_Store')
def file_sort_key(filename):
# Extract the numeric portion of the filename
number = int(os.path.splitext(filename)[0])
return number
# Sort the files chronologically
sorted_files = sorted(image_files, key=file_sort_key)
print(sorted_files)
# Get the dimensions of the first image to initialize the video writer
first_image_path = os.path.join(image_folder, sorted_files[0])
first_image = cv2.imread(first_image_path)
height, width, _ = first_image.shape #need to be careful of this. I scrapped a video from youtube whose resolution was an odd width 1740, height 988
#and these dimensions didn't work with cv2.VideoWriter so I had to use cv2.resize to change the images to 1920x1080 which is the closest accepatable format
# Define the codec and create the video writer
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
fps = 30 # Adjust as needed
video_writer = cv2.VideoWriter(output_video_path, fourcc, fps, (width, height))
# Loop through the image files and write them to the video
for i in sorted_files:
image_path = os.path.join(image_folder, i)
image = cv2.imread(image_path)
# Write the image to the video writer
video_writer.write(image)
# Release the video writer
video_writer.release()
print(f"Video saved to: {output_video_path}")
|
tobieabel/demo-v3-People-Counter
|
Create_video.py
|
Create_video.py
|
py
| 1,633 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35929199029
|
#!/usr/bin/python3
"""base geometry class"""
BaseGeometry = __import__('7-base_geometry').BaseGeometry
Rectangle = __import__('9-rectangle').Rectangle
"""class to represent a square"""
class Square(Rectangle):
"""square Class"""
def __init__(self, size):
"""init"""
self.integer_validator("size", size)
super().__init__(size, size)
self.__size = size
|
philimon-reset/alx-higher_level_programming
|
0x0A-python-inheritance/10-square.py
|
10-square.py
|
py
| 411 |
python
|
en
|
code
| 2 |
github-code
|
6
|
32731778878
|
# 피보나치 수 - 재귀호출
def fib(n):
if(n == 1 or n == 2):
return 1
else:
global count
count += 1
return fib(n-1) + fib(n-2)
# 피보나치 수 - 동적 프로그래밍
def fibonacci(n):
f = []
f.append(1)
f.append(1)
cnt = 0
for i in range(2, n):
cnt += 1
f.append(f[i-1] + f[i-2])
return cnt
count = 1
n = int(input())
fib(n)
print(count, fibonacci(n))
|
woo222/baekjoon
|
python/동적프로그램/b1_24416_알고리즘 수업-피보나치 수1.py
|
b1_24416_알고리즘 수업-피보나치 수1.py
|
py
| 445 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
29262983646
|
import sys
import socket
import platform
import psutil
import wmi
import urllib.request
from PyQt5.QtWidgets import QApplication, QMainWindow, QVBoxLayout, QPushButton, QTextEdit, QWidget
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QFont, QColor
class App(QMainWindow):
def __init__(self, app):
super().__init__()
self.app = app
self.initUI()
def initUI(self):
self.setWindowTitle('App')
self.setGeometry(200, 200, 800, 600)
central_widget = QWidget(self)
self.setCentralWidget(central_widget)
layout = QVBoxLayout()
self.text_output = QTextEdit(self)
self.text_output.setFont(QFont("Arial", 12))
layout.addWidget(self.text_output)
button_ipv4_info = QPushButton('Get My IPv4', self)
button_proxy_info = QPushButton('Check Proxy Info', self)
button_system_info = QPushButton('Retrieve System Info', self)
button_bios_info = QPushButton('Fetch BIOS Info', self)
button_hostname_info = QPushButton('Get Hostname', self)
button_ipv4_info.setFont(QFont("Arial", 10))
button_proxy_info.setFont(QFont("Arial", 10))
button_system_info.setFont(QFont("Arial", 10))
button_bios_info.setFont(QFont("Arial", 10))
button_hostname_info.setFont(QFont("Arial", 10))
button_ipv4_info.setStyleSheet("background-color: lightblue;")
button_proxy_info.setStyleSheet("background-color: lightgreen;")
button_system_info.setStyleSheet("background-color: lightcoral;")
button_bios_info.setStyleSheet("background-color: lightsalmon;")
button_hostname_info.setStyleSheet("background-color: lightyellow;")
layout.addWidget(button_ipv4_info)
layout.addWidget(button_proxy_info)
layout.addWidget(button_system_info)
layout.addWidget(button_bios_info)
layout.addWidget(button_hostname_info)
central_widget.setLayout(layout)
button_ipv4_info.clicked.connect(self.fetch_ipv4_info)
button_proxy_info.clicked.connect(self.check_proxy_info)
button_system_info.clicked.connect(self.retrieve_system_info)
button_bios_info.clicked.connect(self.fetch_bios_info)
button_hostname_info.clicked.connect(self.get_host_name)
def fetch_ipv4_info(self):
hostname = socket.gethostname()
ip = socket.gethostbyname(hostname)
is_static = socket.gethostbyaddr(ip)
interface = None
if "Wi-Fi" in platform.platform():
interface = "Wi-Fi"
elif "Ethernet" in platform.platform():
interface = "Ethernet"
result = f"IPv4 Address: {ip}\nStatic: {is_static}\nNetwork Interface: {interface}"
self.text_output.append(result)
def check_proxy_info(self):
proxy_handler = urllib.request.ProxyHandler()
opener = urllib.request.build_opener(proxy_handler)
try:
opener.open("http://www.google.com", timeout=5)
is_proxy_enabled = True
except Exception:
is_proxy_enabled = False
proxy_status = "Proxy is enabled" if is_proxy_enabled else "Proxy is disabled"
self.text_output.append(proxy_status)
def retrieve_system_info(self):
os_version = platform.platform()
os_architecture = platform.architecture()
num_cores = psutil.cpu_count(logical=False)
ram = round(psutil.virtual_memory().total / (1024 ** 3), 2)
result = f"Operating System Version: {os_version}\nArchitecture: {os_architecture}\nCPU Cores: {num_cores}\nRAM: {ram} GB"
self.text_output.append(result)
def fetch_bios_info(self):
c = wmi.WMI()
bios = c.Win32_BIOS()[0]
result = f"BIOS Manufacturer: {bios.Manufacturer}\nBIOS Version: {bios.Version}\nBIOS Release Date: {bios.ReleaseDate}"
self.text_output.append(result)
def get_host_name(self):
hostname = socket.gethostname()
self.text_output.append(f"Hostname: {hostname}")
if __name__ == '__main__':
app = QApplication(sys.argv)
window = App(app)
window.show()
sys.exit(app.exec_())
|
miko7ajradziw1llowicz/Zadanie-3-python
|
main.py
|
main.py
|
py
| 4,160 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1363723921
|
from typing import Any, Dict, List, Type, TypeVar, Union
from attrs import define as _attrs_define
from attrs import field as _attrs_field
from ..types import UNSET, Unset
T = TypeVar("T", bound="FollowUpPriorityV2ResponseBody")
@_attrs_define
class FollowUpPriorityV2ResponseBody:
"""
Example:
{'description': 'A follow-up that requires immediate attention.', 'id': '01GNW4BAQ7XRMFF6FHKNXDFPRW', 'name':
'Urgent', 'rank': 10}
Attributes:
id (str): Unique identifier for the follow-up priority option Example: 01GNW4BAQ7XRMFF6FHKNXDFPRW.
name (str): Name of the follow-up priority option Example: Urgent.
rank (int): Rank is used to order the follow-up priority options correctly Example: 10.
description (Union[Unset, str]): Description of the follow-up priority option Example: A follow-up that requires
immediate attention..
"""
id: str
name: str
rank: int
description: Union[Unset, str] = UNSET
additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
id = self.id
name = self.name
rank = self.rank
description = self.description
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update(
{
"id": id,
"name": name,
"rank": rank,
}
)
if description is not UNSET:
field_dict["description"] = description
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
id = d.pop("id")
name = d.pop("name")
rank = d.pop("rank")
description = d.pop("description", UNSET)
follow_up_priority_v2_response_body = cls(
id=id,
name=name,
rank=rank,
description=description,
)
follow_up_priority_v2_response_body.additional_properties = d
return follow_up_priority_v2_response_body
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
|
expobrain/python-incidentio-client
|
incident_io_client/models/follow_up_priority_v2_response_body.py
|
follow_up_priority_v2_response_body.py
|
py
| 2,629 |
python
|
en
|
code
| 4 |
github-code
|
6
|
37407208814
|
from jinja2 import Environment, BaseLoader
from io import BytesIO
import plotly
import base64
'''
export = ExportHTML('testclass.html')
export.render()
'''
class ExportHTML:
__template_vars = {'title':'Hello World','body':'Hello World !!!'}
__template_html = '''
<html>
<head lang="en">
<meta charset="UTF-8">
<title>{{ title }}</title>
<style>
table {
border-collapse: collapse;
width: 100%;
}
th {
text-align: center;
background-color: #ffd700;
color: black;
}
tr:nth-child(even) {background-color: #f2f2f2;}
tr {
text-align: right;
page-break-inside: avoid;
}
thead { display: table-header-group; }
tfoot { display: table-row-group; }
.break-before {
page-break-before: always;
}
</style>
</head>
<body>
<h1>Header</h1>
{{ body }}
<h2 class="break-before">Next Page</h2>
</body>
</html>
'''
def encode_graph(self, fig):
tmpfile = BytesIO()
fig.savefig(tmpfile, format='png', bbox_inches='tight')
encoded = base64.b64encode(tmpfile.getvalue()).decode('utf-8')
fig_html = '<img src=\'data:image/png;base64,{}\'>'.format(encoded)
return fig_html
def plotly_img_uri(self, fig, height=300, width=1200, orca_path='C:/Users/Administrator/anaconda3/orca_app/orca.exe'):
plotly.io.orca.config.executable = orca_path
img_uri = base64.b64encode(plotly.io.to_image(fig, width=width, height=height)).decode('ascii')
return '<img style="width: {width}; height: {height}" '\
'src="data:image/png;base64,{img_uri}" />'.format(width=width, height=height, img_uri=img_uri)
@property
def template_vars(self):
return self.__template_vars
@template_vars.setter
def template_vars(self, var_dict):
self.__template_vars = var_dict
@property
def template_html(self):
return self.__template_html
@template_html.setter
def template_html(self, htmlString):
self.__template_html = htmlString
def render(self, output_file):
template = Environment(loader=BaseLoader()).from_string(self.template_html)
template_vars = self.template_vars
html_out = template.render(template_vars)
with open(output_file, "w") as fh:
fh.write(html_out)
|
etq-quant/etqbankloan
|
Lib/etiqalib/export_html.py
|
export_html.py
|
py
| 2,768 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26656448918
|
#the code partial borrowed from
# "Neural Network-based Reconstruction in Compressed Sensing
#MRI Without Fully-sampled Training Data"
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
import util_torch as util_torch
def absval(arr):
"""
Takes absolute value of last dimension, if complex.
Input dims: (N, l, w, 2)
Output dims: (N, l, w)
"""
# Expects input of size (N, l, w, 2)
assert arr.shape[-1] == 2
return torch.norm(arr, dim=3)
def scale(y, y_zf):
"""Scales inputs for numerical stability"""
flat_yzf = torch.flatten(absval(y_zf), start_dim=1, end_dim=2)
max_val_per_batch, _ = torch.max(flat_yzf, dim=1, keepdim=True)
y = y / max_val_per_batch.view(len(y), 1, 1, 1)
y_zf = y_zf / max_val_per_batch.view(len(y), 1, 1, 1)
return y, y_zf
class Upsample(nn.Module):
"""Upsamples input multi-channel image"""
def __init__(self, scale_factor, mode, align_corners):
super(Upsample, self).__init__()
self.scale_factor = scale_factor
self.mode = mode
self.align_corners = align_corners
def forward(self, x):
return F.interpolate(x, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners)
class ResBlock(nn.Module):
'''5-layer CNN with residual output'''
def __init__(self, n_ch_in=2, n_ch_out=2, nf=64, ks=3):
super(ResBlock, self).__init__()
self.n_ch_out = n_ch_out
self.conv1 = nn.Conv2d(n_ch_in, nf, ks, padding = ks//2)
self.conv2 = nn.Conv2d(nf, nf, ks, padding = ks//2)
self.conv3 = nn.Conv2d(nf, nf, ks, padding = ks//2)
self.conv4 = nn.Conv2d(nf, nf, ks, padding = ks//2)
self.conv5 = nn.Conv2d(nf, n_ch_out, ks, padding = ks//2)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
conv1_out = self.conv1(x)
conv1_out = self.relu(conv1_out)
conv2_out = self.conv2(conv1_out)
conv2_out = self.relu(conv2_out)
conv3_out = self.conv3(conv2_out)
conv3_out = self.relu(conv3_out)
conv4_out = self.conv4(conv3_out)
conv4_out = self.relu(conv4_out)
conv5_out = self.conv5(conv4_out)
x_res = x[:,:self.n_ch_out,:,:] + conv5_out
return x_res
class Net(nn.Module):
def __init__(self, K, lmbda, device, n_hidden=64):
super(Net, self).__init__()
#self.mask = mask
self.lmbda = lmbda
self.resblocks = nn.ModuleList()
self.device = device
for i in range(K):
resblock = ResBlock(n_ch_in=2, nf=n_hidden)
self.resblocks.append(resblock)
self.block_final = ResBlock(n_ch_in=2, nf=n_hidden)
def forward(self, ksp_input, sensemap, window = 1, mask = None):
if mask is None:
mask=torch.not_equal(ksp_input, 0)
dtype=torch.complex64
mask = mask.type(dtype)
x = util_torch.transpose_model(ksp_input * window, sensemap)
x = util_torch.complex_to_channels(x)#;print(x.shape);quit()
#ksp_input, x = scale(ksp_input, x)
for i in range(len(self.resblocks)):
# z-minimization
x = x.permute(0, 3, 1, 2)
z = self.resblocks[i](x)
z = z.permute(0, 2, 3, 1)
z = util_torch.channels_to_complex(z)
# x-minimization
#z_ksp = utils.fft(z)
z_ksp = util_torch.model_forward(z, sensemap)
#x_ksp = losslayer.data_consistency(z_ksp, y, self.mask, self.lmbda)
x_ksp = (1 - mask) * z_ksp + mask * (self.lmbda*z_ksp + ksp_input) / (1 + self.lmbda)
#x = utils.ifft(x_ksp)
x = util_torch.transpose_model(x_ksp, sensemap)
x = util_torch.complex_to_channels(x)
x = x.permute(0, 3, 1, 2)
x = self.block_final(x)
return x
|
ikjalata/MRIunsup
|
model.py
|
model.py
|
py
| 3,925 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28118192230
|
# -*- coding:utf-8 -*-
from PySide2.QtCore import Signal
from PySide2.QtWidgets import QDialog
from core.options import MultiOption
from ui.base.constants import ITEM_SEPARATORS
from ui.base.ui_add_items import Ui_AddItemsDialog
# noinspection PyTypeChecker
from utils import warn, splitItems, isEmpty
# noinspection PyTypeChecker
class AddItemsDialog(QDialog, Ui_AddItemsDialog):
ADD_EXCLUDE_MODULES = 0
ADD_HIDDEN_IMPORTS = 1
COLLECT_ALL_SUBMODULES = 3
COLLECT_ALL_DATA = 4
COLLECT_ALL_BINARIES = 5
COLLECT_ALL = 6
COPY_METADATA = 7
DEEP_COPY_METADATA = 8
DEFAULT_ITEMS_SEP = ";"
itemsAdded = Signal(MultiOption, list)
def __init__(self, parent):
super().__init__(parent)
self._action = -1
self._option = None
self.setupUi()
def setupUi(self, _=None):
super(AddItemsDialog, self).setupUi(self)
self.multiItemSeparatorCombo.addItems(ITEM_SEPARATORS.keys())
self.addButton.clicked.connect(self.onAddItem)
def onAddItem(self):
content = self.itemsEdit.toPlainText().replace("\n", "").replace("\r", "").strip()
if isEmpty(content):
warn(self, self.tr(u"Warning"), self.tr("Items cannot be empty!"))
return
content = content.replace(";", ";").replace(",", ",")
sepKey = self.multiItemSeparatorCombo.currentText()
items = splitItems(content, sepKey, self.DEFAULT_ITEMS_SEP)
self.itemsAdded.emit(self._option, items)
self.accept()
def display(self, action, option):
self._action = action
self._option = option
self.updateTitle()
self.show()
def updateTitle(self):
if self._action == self.ADD_EXCLUDE_MODULES:
self.setWindowTitle(self.tr("Add Exclude Modules"))
elif self._action == self.ADD_HIDDEN_IMPORTS:
self.setWindowTitle(self.tr("Add Hidden Imports"))
elif self._action == self.COLLECT_ALL_SUBMODULES:
self.setWindowTitle(self.tr("Collect all submodules from:"))
elif self._action == self.COLLECT_ALL_DATA:
self.setWindowTitle(self.tr("Collect all data from:"))
elif self._action == self.COLLECT_ALL_BINARIES:
self.setWindowTitle(self.tr("Collect all binaries from:"))
elif self._action == self.COLLECT_ALL:
self.setWindowTitle(self.tr("Collect all(submodules,data, bin...) from:"))
elif self._action == self.COPY_METADATA:
self.setWindowTitle(self.tr("Copy metadata for:"))
elif self._action == self.DEEP_COPY_METADATA:
self.setWindowTitle(self.tr("Copy metadata for(recursively):"))
else:
raise ValueError("unknown action")
def hideEvent(self, event):
super().hideEvent(event)
self._action = -1
self._option = None
self.setWindowTitle("")
self.itemsEdit.setText("")
|
zimolab/PyInstallerGUI
|
ui/add_items_ui.py
|
add_items_ui.py
|
py
| 2,937 |
python
|
en
|
code
| 10 |
github-code
|
6
|
31975334850
|
import numpy as np
import scipy.ndimage
import scipy.misc
import glob
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
def loadPsf(psftype, fileformat):
path='/gdata/zhoutk/Deconv/'+psftype
files=glob.glob(path+'/'+'*'+fileformat)
length=len(files)
if length==0:
print(path+'/')
print('invalid psf file path')
return
im0=scipy.misc.imread(files[0])
shape=im0.shape
psf=np.zeros((length, shape[0], shape[1]))
files.sort()
for i, file in enumerate(files):
#print(file)
psf[i,:,:]=scipy.misc.imread(file)
#print(type(psf))
#print(psf.shape)
return psf
def convolvePsf3D(volumn, psf, psnr):
##normalize with its largest content
psf=psf/np.sum(psf)
#change from int8 to float64
volumn=volumn.astype('float64')
#convolve psf with volumn
#print(volumn.shape)
#print(psf.shape)
print('max_volumn: ', np.max(volumn))
#volumn=scipy.ndimage.zoom(volumn, 2.0)
if torch.cuda.is_available():
psf=psf[:99, :, :]
for i in range(len(psf.shape)):
psf=np.flip(psf, i)
psf=torch.from_numpy(psf.copy()).unsqueeze(0).unsqueeze(0).type(torch.FloatTensor).cuda()
psf=Variable(psf, requires_grad=False)
volumn=torch.from_numpy(volumn.copy()).unsqueeze(0).unsqueeze(0).type(torch.FloatTensor).cuda()
volumn=Variable(volumn, requires_grad=False)
output=F.conv3d(volumn, psf, padding=(49, 12, 12))
output=output.squeeze().cpu().data.numpy()
else:
output=scipy.ndimage.filters.convolve(volumn, psf, mode='constant')
print('convolve output shape: ', output.shape)
print('max_output: ', np.max(output))
#noise level --- gaussian noise
sigma=np.max(output)/np.power(10, psnr/20)
print('gaussian noise level:', sigma)
noise=np.random.normal(0, sigma, output.shape)
#add noise to the output
output=np.clip(output+noise, 0, np.max(output))
#output=output[0:101,0:101,0:101]
return output
|
rickyim/DeconvNet
|
source/PSFConv.py
|
PSFConv.py
|
py
| 2,123 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74182199547
|
def EatUp (n):
if n > 1:
EatUp(n-1)
print("eat %d" %n)
elif n == 1:
print("eat 1")
def EatDown (n):
if n > 1:
print("eat %d" %n)
EatDown(n-1)
elif n == 1:
print("eat 1")
def Fac(n):
result = 1
for i in range(2,n+1):
print(i)
result *= i
return result
def Fac_Recursive(n):
if n == 0 or n == 1:
return 1
else:
return Fac_Recursive(n-1) * n
def summ(n, l):
if n == 0:
return 0
elif n == 1:
return l[0]
else:
return sum(n-1, l) + l[n-1]
def summ2(l, fromI, toI):
if fromI > toI:
return 0
elif fromI == toI:
return l[toI]
else:
return l[fromI] + summ2(l, fromI+1, toI)
def summ3(l):
n = len(l)
if n == 0:
return 0
elif n == 1:
return l[0]
else:
return l[0] + summ3(l[1:])
def fibR(n):
if n <= 1:
return n
else:
return fibR(n-1) + fibR(n-2)
print(fibR(8))
|
chollsak/KMITL-Object-Oriented-Data-Structures-2D
|
Recursive/recursive.py
|
recursive.py
|
py
| 1,049 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71989647867
|
from tkinter import *
import tkinter as tk
import tkinter.messagebox
from PIL import ImageTk, Image
HEIGHT = 500
WIDTH = 600
root = tk.Tk()
def restart():
ans = tkinter.messagebox.askyesno('Starting New Game','Are you sure?')
if ans:
root.destroy()
from BallShooterLimit import Limit
Limit()
def history():
hist = tkinter.messagebox.askyesno('View History','Are you sure you would like to view history?')
if hist:
root.destroy()
def exiting():
exiting = tkinter.messagebox.askyesno('Exit','Are you sure you would like to exit?')
if exiting:
root.destroy()
def MainMenu():
root.title("Main Menu Page GUI")
canvas = tk.Canvas(root,height = HEIGHT, width = WIDTH)
canvas.pack()
#this label shows a header in the main menu
headerLabel = tk.Label(root, text = "Choose any of the options below: ")
headerLabel.place(relx = 0.1, rely =0.1, relwidth = 0.85, relheight = 0.1)
#this frame is for the "StartNewGame" button
restartframe = tk.Frame(root,bg = '#ff99cc',bd = 5)
restartframe.place(relx = 0.5, rely = 0.25,relwidth = 0.3, relheight = 0.1,anchor = 'n')
#this label is for the "StartNewGame" button
restartlabel = tk.Label(restartframe)
restartlabel.place(relx = 0.4, rely =0, relwidth = 0.5, relheight = 1)
#this is the "StartnewGame" button
restartbutton = tk.Button(restartframe,text = "Start New Game",font = 40, fg = 'black', command = lambda: restart())
restartbutton.place(relx = 0.5,rely = 0.5, relwidth = 0.9, relheight = 1, anchor = 'center')
#this frame is for the "ViewHistory" button
histframe = tk.Frame(root,bg = '#ff99cc',bd = 5)
histframe.place(relx = 0.5, rely = 0.35,relwidth = 0.3, relheight = 0.1,anchor = 'n')
#this label is for the "ViewHistory" button
histlabel = tk.Label(histframe)
histlabel.place(relx = 0.1, rely =0, relwidth = 0.8, relheight = 1)
#this is the "ViewHistory" button
histbutton = tk.Button(histframe,text = "View History",font = 40, fg = 'black', command = lambda: history())
histbutton.place(relx = 0.5,rely = 0.5, relwidth = 0.9, relheight = 1, anchor = 'center')
#this frame is for the "ExitGame" button
exitframe = tk.Frame(root,bg = '#ff99cc',bd = 5)
exitframe.place(relx = 0.5, rely = 0.45,relwidth = 0.3, relheight = 0.1,anchor = 'n')
#this label is for the "StartNewGame" button
exitlabel = tk.Label(exitframe)
exitlabel.place(relx = 0.4, rely =0, relwidth = 0.5, relheight = 1)
#this is the "StartnewGame" button
exitbutton = tk.Button(exitframe,text = "Exit Game",font = 40, fg = 'black', command = lambda: exiting())
exitbutton.place(relx = 0.5,rely = 0.5, relwidth = 0.9, relheight = 1, anchor = 'center')
root.mainloop()
|
Alfred-Akinkoye/reacTen
|
GameServer/MainMenuPage.py
|
MainMenuPage.py
|
py
| 2,800 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19809320159
|
import time
import constants as cons
import matplotlib.pyplot as plt
from preprocessing.images_reader import ImagesReader
start_time = time.time()
print('reading images...')
reader = ImagesReader(cons.PREPROCESSED_DATASET_DIR)
train_images = reader.read_train_images()
classes = [None] * len(train_images)
samples = [None] * len(train_images)
for i, image_class in enumerate(train_images):
classes[i] = image_class
samples[i] = len(train_images[image_class])
plt.plot(classes, samples)
plt.show()
end_time = time.time()
print('done in {:.2f}s'.format(end_time - start_time))
|
sachokFoX/caltech_256
|
code/run_data_distribution_analysis.py
|
run_data_distribution_analysis.py
|
py
| 589 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12948066350
|
import matplotlib.pyplot as plt
tiempo = [0,1,2,3,4,5]
sensor = [4,5,6,8,9, 10]
plt.plot(tiempo,sensor,'--,r')
plt.title('Grafico sensor contra el tiempo')
plt.xlabel('Tiempo(s)')
plt.ylabel('Voltaje(v)')
plt.savefig('sensor.png')
plt.show()
# Nota: se le puede poner el simbolo para que se grafique('--'), si no se pone nada se grafica como una linea recta
# DICCIONARIO
diccionario = {}
diccionario['NombresEstudiantes'] = ['Andrea', 'Nicolle', 'Isabel', 'Santiago']
diccionario['EdadEstudiantes'] = [18,20,19,15]
diccionario['Peso'] = [60,55,70,78]
print(diccionario)
print(diccionario['NombresEstudiantes'][-1],diccionario['EdadEstudiantes'][-1],diccionario['Peso'][-1])
|
vero-obando/Programacion
|
Clases/Graficos/curvas.py
|
curvas.py
|
py
| 679 |
python
|
es
|
code
| 0 |
github-code
|
6
|
11353054992
|
# Licensed under a 3-clause BSD style license - see LICENSE
from __future__ import print_function, division
import Icarus
from Icarus.Utils.import_modules import *
##### Welcome message
print( "Analysing some mock data. It is recommended to run it within the `ipython --pylab' environment.\n" )
##### Loading the data
atmo_fln = 'atmo_models.txt'
data_fln = 'data.txt'
ndiv = 5
porb = 10 * 3600
x2sini = 1.1
print( "Loading the data into an Icarus.Photometry object (failure to do so is likely due to missing atmosphere models).\n" )
fit = Icarus.Photometry.Photometry(atmo_fln, data_fln, ndiv, porb, x2sini)
##### This is the list of true parameters for the stars, as per construction
incl = 75.*cts.degree
corotation = 1.
filling = 0.90
Tnight = 2500.
gravdark = 0.08
K = 300e3
Tday = 5000.
DM = 10.0
AJ = 0.02
par0 = np.r_[incl, corotation, filling, Tnight, gravdark, K, Tday, DM, AJ]
##### Fitting the data using a simple fmin algorithm from scipy
##### Here we make use of the Calc_chi2 function with offset_free = 1 in order to allow for a possible band calibration error, which we assume is 0.3 mag (see column 5 in data.txt).
##### We will also assume that corotation = 1, gravdark = 0.08 and K=300e3.
## Defining the func_par
func_par = lambda p: np.r_[p[0], 1., p[1], p[2], 0.08, 300e3, p[3], p[4], p[5]]
## Wrapper function for the figure of merit to optimize
def FoM(p):
p = np.asarray(p)
## Return large value if parameters are out of bound
if (p < np.r_[0.1, 0.1, 1500., p[2], 8., 0.]).any() or (p > np.r_[np.pi/2, 1.0, 8000., 8000., 12., 0.1]).any():
#print( "out-of-bound" )
return np.ones_like(fit.mag)*1e5
else:
chi2, extras = fit.Calc_chi2(p, offset_free=0, func_par=func_par, full_output=True, verbose=False)
return extras['res']
## Initial guess
par_guess = [70*cts.degree, 0.95, 2000., 5500., 10.3, 0.01]
## Running the fit
print( "Performing a crude fit using the scipy.optimize.leastsq function.\n" )
print( "Beware that the fitting may not converge to the best-fit solution due to local minima. One should try to fit the data using diferent guess parameters or, even better, a more robust fitting algorithm.\n" )
print( "Also, do not expect the best-fit parameter to converge at the actual solution. The reason being that noise is added to the theoretical data when generating the mock data. Hence it might be that by sheer luck the mock data mimic a slightly different set of parameters. If one was to regenerate the mock data several times and rerun the fit, it would on average converge at the actual solution.\n" )
sol = scipy.optimize.leastsq(FoM, par_guess, full_output=True)
par = sol[0]
err = np.sqrt( sol[1].diagonal() )
##### Printing the results
print( "Results from the fitting:" )
print( "{:<28} {:>15} {:>15}".format("Parameter", "Actual solution", "Fitted Solution") )
print( "{:<28} {:>15.3f} {:>15.3f} +/- {:.3f}".format("inclination", incl/cts.degree, par[0]/cts.degree, err[0]/cts.degree) )
print( "{:<28} {:>15.3f} {:>15.3f} +/- {:.3f}".format("filling factor", filling, par[1], err[1]) )
print( "{:<28} {:>15.1f} {:>15.3f} +/- {:.3f}".format("Tnight", Tnight, par[2], err[2]) )
print( "{:<28} {:>15.1f} {:>15.3f} +/- {:.3f}".format("Tday", Tday, par[3], err[3]) )
print( "{:<28} {:>15.2f} {:>15.3f} +/- {:.3f}".format("DM", DM, par[4], err[4]) )
print( "{:<28} {:>15.3f} {:>15.3f} +/- {:.3f}".format("AJ", AJ, par[5], err[5]) )
print( "" )
##### Plotting the data and model if possible
if pylab:
print( "Plotting the data. If nothing shows up, try pylab.show()." )
fig = pylab.figure()
ax = fig.add_subplot(111)
pl1 = ax.errorbar(np.r_[fit.data['phase'][0],fit.data['phase'][0]+1.], np.r_[fit.data['mag'][0],fit.data['mag'][0]], yerr=np.r_[fit.data['err'][0],fit.data['err'][0]], marker='s', mfc='red', mec='red', ms=3, ecolor='red', fmt='.')
pl2 = ax.errorbar(np.r_[fit.data['phase'][1],fit.data['phase'][1]+1.], np.r_[fit.data['mag'][1],fit.data['mag'][1]], yerr=np.r_[fit.data['err'][1],fit.data['err'][1]], marker='s', mfc='blue', mec='blue', ms=3, ecolor='blue', fmt='.')
phs = np.linspace(0, 2, 101)
flux = fit.Get_flux_theoretical(par, [phs,phs], func_par=func_par)
pl3 = ax.plot(phs, flux[0], 'r-')
pl4 = ax.plot(phs, flux[1], 'b-')
flux = fit.Get_flux_theoretical(par0, [phs,phs])
pl5 = ax.plot(phs, flux[0], 'r:')
pl6 = ax.plot(phs, flux[1], 'b:')
leg = ax.legend([pl1[0],pl3[0],pl5[0],pl2[0],pl4[0],pl6[0]], ["i","Fit","Real","g","Fit","Real"], ncol=2, loc=0, numpoints=1, scatterpoints=1)
ax.set_xlabel("Orbital Phase")
ax.set_ylabel("Magnitude")
ax.set_xlim([0.,2.])
vals = np.r_[fit.data['mag'][0], fit.data['mag'][1]]
ax.set_ylim([vals.max()+(vals.max()-vals.min())*0.1, vals.min()-(vals.max()-vals.min())*0.1])
pylab.show()
|
bretonr/Icarus
|
Examples/Example1/example1.py
|
example1.py
|
py
| 4,839 |
python
|
en
|
code
| 11 |
github-code
|
6
|
42345298259
|
def getLongestLine(img):
longest = 0
for i in range(0, len(img)):
if len(img[i]) > longest:
longest = len(img[i])
return longest
def rotate(img):
width = getLongestLine(img)
height = len(img)
longest = width
answer = []
if(width < height):
longest = height
for i in range(0, longest):
answer.append([' '] * longest)
for i in range(0, len(img)):
for j in range(0, len(img[i])):
try:
print("Swapped at " + str(j) + " " + str(i))
answer[j][i] = img[i][j]
except:
print("Entered a space")
answer[j][i] = ' '
print(answer[i])
lines = 1
while lines:
lines = int(input())
img = []
for i in range(0, lines):
line = input()
img.append(list(line))
rotate(img)
print()
|
yodigi7/kattis
|
CompetitionASCIIRotation.py
|
CompetitionASCIIRotation.py
|
py
| 890 |
python
|
en
|
code
| 2 |
github-code
|
6
|
41152382829
|
from tkinter import *
import tkinter as tk
from tkinter import ttk
from tkinter import messagebox
import pandas as pd
class Tabla:
def __init__(self,root, dataFrame, anchos, fechas, bgColor, posX, posY):
self.anchos = anchos
self.fechas = fechas
self.nuevoDatos = []
self.componentes = []
cont = 0
self.df = dataFrame
self.frm = ttk.Frame(root)
for k in dataFrame:
tmp = Entry(self.frm, width=anchos[cont], bg=bgColor, fg='black', font= ('Arial', 12),
highlightthickness=1, highlightbackground="#000000", highlightcolor="#000000")
tmp.grid(row=0, column=cont)
tmp.insert(INSERT, k)
cont += 1
self.lista = list(dataFrame.to_records(index=False))
self.filas = len(self.lista)
self.columnas = cont
for i in range(self.filas):
row = []
for j in range(self.columnas):
aux = Entry(self.frm, width=anchos[j], fg='black',
font=('Arial',12,), highlightthickness=1, highlightbackground="#000000", highlightcolor="#000000")
aux.grid(row=i + 1, column=j)
if len(fechas) == 0:
aux.insert(INSERT, self.lista[i][j])
else:
if j in fechas:
aux.insert(INSERT, pd.to_datetime(self.lista[i][j]).date().strftime('%d/%m/%y'))
else:
aux.insert(INSERT, self.lista[i][j])
aux.configure(state='readonly')
row.append(aux)
self.componentes.append(row)
self.frm.pack()
self.frm.place(x=posX, y=posY)
def limpiar(self):
for widget in self.frm.winfo_children():
widget.destroy()
class EquiposFrame():
def __init__(self, ventana):
self.cantEquipos = tk.StringVar()
self.retirados = tk.StringVar()
self.inscritos = tk.StringVar()
self.mostrarTabla(ventana)
tk.Label(ventana, text='Equipos', font=('Arial Black', 12), bg="#3a7ff6", width=25).place(x=15, y=50)
tk.Button(ventana, text='listar', font=('Arial', 12), width=8, height=2, highlightbackground = "black", borderwidth=5, bg="white", command= lambda: self.mostrarTabla(ventana)).place(x=40, y=120)
tk.Button(ventana, text='borrar', font=('Arial', 12), width=8, height=2, bg="white", borderwidth = 5, command=self.limpiarTabla).place(x=170, y=120)
tk.Label(ventana, text='Cantidad de equipos', font=('Arial', 12), bg="white").place(x=25, y=220)
tk.Label(ventana, text='Equipos retirados', font=('Arial', 12), bg="white").place(x=25, y=260)
tk.Label(ventana, text='Equipos incritos', font=('Arial', 12), bg="white").place(x=25, y=300)
tk.Button(ventana, textvariable=self.cantEquipos, font=('Arial', 12), state='disabled').place(x=300, y=210)
tk.Button(ventana, textvariable=self.retirados, font=('Arial', 12), state='disabled').place(x=300, y=250)
tk.Button(ventana, textvariable=self.inscritos, font=('Arial', 12), state='disabled').place(x=300, y=290)
def mostrarTabla(self, ventana):
archivo = pd.read_excel('Equipos.xlsx', sheet_name='Hoja1')
self.cantEquipos.set(str(len(archivo['N°'])))
self.retirados.set(list(archivo['Retirado']).count('Si'))
self.inscritos.set(list(archivo['Retirado']).count('No'))
# self.retirados.set(str(list(archivo['Retirado'].count('No'))))
anchos = [5, 40, 20, 18, 10]
fechas = []
self.tabla = Tabla(ventana, archivo,anchos,fechas,'#3a7ff6', 50, 350)
def limpiarTabla(self):
self.tabla.limpiar()
class CampeonatoFrame():
def __init__(self, ventana):
self.temporada = tk.StringVar()
self.inicio = tk.StringVar()
self.fin = tk.StringVar()
tk.Label(ventana, text='Campeonato', font=('Arial Black', 12), bg="#3a7ff6", width=25).place(x=15, y=50)
tk.Label(ventana, text='Fecha de Inicio', font=('Arial', 12), bg="white").place(x=25, y=100)
tk.Label(ventana, text='Fecha Final', font=('Arial', 12), bg="white").place(x=25, y=140)
tk.Entry(ventana, bd=3, bg="#F6F7F9", highlightthickness=2, font=("Arial", 11), textvariable=self.temporada).place(x=300, y=50, width=120.0, height=30)
tk.Entry(ventana, bd=3, bg="#F6F7F9", highlightthickness=2, font=("Arial", 11), textvariable=self.inicio).place(x=300, y=90, width=120.0, height=30)
tk.Entry(ventana, bd=3, bg="#F6F7F9", highlightthickness=2, font=("Arial", 11), textvariable=self.fin).place(x=300, y=130, width=120.0, height=30)
tk.Button(ventana, text='GENERAR', wraplength=125, justify=tk.LEFT, relief='flat', font=('Arial', 16, "bold"), anchor="w", borderwidth = 0, highlightthickness = 15, compound = 'center', bg="#3a7ff6", fg="white", activebackground="#3a7ff6", command= lambda: self.mostrarTabla(ventana)).place(x=500, y=70)
def mostrarTabla(self, ventana):
archivo = pd.read_excel('Partidos.xlsx', sheet_name='Hoja1')
anchos = [5, 10, 60]
fechas = [1]
self.tabla = Tabla(ventana, archivo,anchos,fechas,'#3a7ff6', 50, 350)
class ListaRegistroFrame():
def __init__(self, ventana):
self.equipo = tk.StringVar()
self.representante = tk.StringVar()
self.jugadores = tk.StringVar()
self.mostrarTabla(ventana)
tk.Label(ventana, text='Lista de Registro', font=('Arial Black', 12), bg="#3a7ff6", width=25).place(x=15, y=50)
tk.Label(ventana, text='Nombre del Equipo', font=('Arial', 12), bg="white").place(x=25, y=100)
tk.Label(ventana, text='Representante', font=('Arial', 12), bg="white").place(x=25, y=140)
tk.Label(ventana, text='Cantidad de Jugadores', font=('Arial', 12), bg="white").place(x=25, y=180)
tk.Entry(ventana, bd=1, bg="#F6F7F9", highlightthickness=2, font=("Arial", 11), textvariable=self.equipo).place(x=320, y=90, width=220.0, height=30)
tk.Entry(ventana, bd=1, bg="#F6F7F9", highlightthickness=2, font=("Arial", 11), textvariable=self.representante).place(x=320, y=130, width=220.0, height=30)
tk.Entry(ventana, bd=1, bg="#F6F7F9", highlightthickness=2, font=("Arial", 11), textvariable=self.jugadores).place(x=320, y=170, width=220.0, height=30)
tk.Button(ventana, text='AGREGAR', wraplength=125, justify=tk.LEFT, relief='flat', font=('Arial', 16, "bold"), anchor="w", borderwidth = 0, highlightthickness = 15, compound = 'center', bg="#3a7ff6", fg="white", activebackground="#3a7ff6", command= lambda:self.agregarEquipo(ventana)).place(x=580, y=115)
def mostrarTabla(self, ventana):
archivo = pd.read_excel('Equipos.xlsx', sheet_name='Hoja1')
anchos = [5, 40, 20, 18, 10]
fechas = []
self.tabla = Tabla(ventana, archivo,anchos,fechas,'#3a7ff6', 50, 350)
def agregarEquipo(self, ventana):
mensajes = ''
if self.equipo.get() == "":
mensajes += 'Debe ingresar el nombre del equipo\n'
if self.jugadores.get().isnumeric() is False:
mensajes += 'La cantidad de jugadores debe de ser un numero.\n'
if self.representante.get() == "":
mensajes += 'Debe ingresar el nombre del representante.\n'
if len(mensajes) > 0:
messagebox.showerror(title='ERROR', message=mensajes)
return
archivo = pd.read_excel('Equipos.xlsx', sheet_name='Hoja1')
archivo.loc[archivo.shape[0]] = [len(archivo['N°']) + 1, self.equipo.get(), self.representante.get(), int(self.jugadores.get()), 'No']
archivo.to_excel('Equipos.xlsx', sheet_name='Hoja1', index=False)
self.jugadores.set('')
self.equipo.set('')
self.representante.set('')
self.mostrarTabla(ventana)
class ReportesProblemasFrame():
def __init__(self, ventana):
tk.Label(ventana, text='Reporte de Problema', font=('Arial Black', 12), bg="#3a7ff6", width=25).place(x=15, y=50)
tk.Label(ventana, text='Informar Incidencias', font=('Arial', 12), bg="white").place(x=25, y=100)
tk.Label(ventana, text='Describa su inconveniente', font=('Arial', 12), bg="#3a7ff6").place(x=50, y=180)
self.texto = tk.Text(ventana, font=('Arial', 12), width=50, height=12)
self.texto.place(x=50, y=200)
tk.Button(ventana, text='ENVIAR', wraplength=125, justify=tk.LEFT, relief='flat', font=('Arial', 16, "bold"), anchor="w", borderwidth = 0, highlightthickness = 0, compound = 'center', bg="#3a7ff6", fg="white", activebackground="#3a7ff6", command=self.reportar).place(x=300, y=450)
tk.Button(ventana, text='BORRADOR', wraplength=125, justify=tk.LEFT, relief='flat', font=('Arial', 16, "bold"), anchor="w", borderwidth = 0, highlightthickness = 0, compound = 'center', bg="#3a7ff6", fg="white", activebackground="#3a7ff6", command=self.borrador).place(x=450, y=450)
def reportar(self):
archivo = pd.read_excel('Problemas.xlsx', sheet_name='Hoja1')
archivo.loc[archivo.shape[0]] = [len(archivo['Codigo']) + 1, self.texto.get("1.0","end-1c")]
archivo.to_excel('Problemas.xlsx', sheet_name='Hoja1', index=False)
self.texto.delete(1.0,END)
messagebox.showinfo(title='EXITOSO', message='El problema se ha reportado satisfactoriamente')
def borrador(self):
self.texto.delete(1.0,END)
class GestionEquipos():
def __init__(self):
self.main_window = tk.Tk()
w = 1000
h = 650
screen_width = self.main_window.winfo_screenwidth()
screen_height = self.main_window.winfo_screenheight()
x = (screen_width/2) - (w/2)
y = (screen_height/2) - (h/2)
self.main_window.geometry('%dx%d+%d+%d' % (w, h, x, y))
self.formActual = EquiposFrame(self.main_window)
self.agregarBotonesPrincipales()
self.main_window.mainloop()
def agregarBotonesPrincipales(self):
tk.Button(self.main_window, text='Equipos', command=self.abrirEquiposFrm).place(x=0, y=0)
tk.Button(self.main_window, text='Campeonato', command=self.abrirCampeonatoFrm).place(x=52, y=0)
tk.Button(self.main_window, text='Lista de Registro', command=self.abrirListaRegistroFrm).place(x=131, y=0)
tk.Button(self.main_window, text='Reporte de Problemas', command=self.abrirReporteProblemaFrm).place(x=227, y=0)
def limpiarVentana(self):
for widget in self.main_window.winfo_children():
widget.destroy()
def abrirEquiposFrm(self):
self.limpiarVentana()
self.agregarBotonesPrincipales()
self.formActual = EquiposFrame(self.main_window)
def abrirCampeonatoFrm(self):
self.limpiarVentana()
self.agregarBotonesPrincipales()
self.formActual = CampeonatoFrame(self.main_window)
def abrirListaRegistroFrm(self):
self.limpiarVentana()
self.agregarBotonesPrincipales()
self.formActual = ListaRegistroFrame(self.main_window)
def abrirReporteProblemaFrm(self):
self.limpiarVentana()
self.agregarBotonesPrincipales()
self.formActual = ReportesProblemasFrame(self.main_window)
programa = GestionEquipos()
|
Moisesmp75/TkinterForms
|
Trabajo4/programa.py
|
programa.py
|
py
| 11,298 |
python
|
es
|
code
| 0 |
github-code
|
6
|
23341249880
|
import json as js
import csv
import sys
import jinja2
import os
from datetime import datetime
# import smtplib
# read customers file to get information about customers
def get_customers(customers_file, error):
TITLE = []
FIRST_NAME = []
LAST_NAME = []
EMAIL = []
with open(customers_file, mode='r') as csv_file:
customers = csv.DictReader(csv_file, delimiter=',')
errorData = []
for customer in customers:
if customer["EMAIL"] != '':
TITLE.append(customer["TITLE"])
FIRST_NAME.append(customer["FIRST_NAME"])
LAST_NAME.append(customer["LAST_NAME"])
EMAIL.append(customer["EMAIL"])
else:
errorData.append([customer["TITLE"], customer["FIRST_NAME"], customer["LAST_NAME"], customer["EMAIL"]])
with open(error, mode='w', newline='') as f:
errorCustomer = csv.writer(f)
errorCustomer.writerow(['TITLE','FIRST_NAME','LAST_NAME','EMAIL'])
for customer in errorData:
errorCustomer.writerow(customer)
return TITLE, FIRST_NAME, LAST_NAME, EMAIL
def read_template(email_template_file):
with open(email_template_file, mode='r') as email_template:
template = js.load(email_template)
return template
# Can use CLI Python Library to parse agv from CMD such as argparse, getopt,...
def main(email_template, customers, path_output_emails, error):
# how to use smtp send email
# s = smtplib.SMTP(host='host_address', port=port)
# s.starttls()
# s.login(MY_ADDRESS, PASSWORD)
TITLE, FIRST_NAME, LAST_NAME, EMAIL = get_customers(customers, error)
template = read_template(email_template)
if os.path.isdir(path_output_emails):
os.chdir(path_output_emails)
else:
os.mkdir(path_output_emails)
os.chdir(path_output_emails)
now = datetime.now()
env = jinja2.Environment()
outputJsonFile = open("output.json", "w")
resultData = []
for title, first_name, last_name, email in zip(TITLE, FIRST_NAME, LAST_NAME, EMAIL):
data = {}
body_template = env.from_string(template["body"])
data["from"] = template["from"]
data["to"] = email
data["subject"] = template["subject"]
data["mineType"] = template["mineType"]
data["body"] = body_template.render(TITLE=title, FIRST_NAME=first_name, LAST_NAME=last_name, TODAY=now.strftime('%d %b %Y'))
resultData.append(data)
# s.send_message(data)
# del data
output = js.dumps(resultData, indent=4)
outputJsonFile.write(output)
outputJsonFile.close()
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])
|
thanhthien272/sendEmailPython
|
send_email.py
|
send_email.py
|
py
| 2,750 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36079551198
|
import sys
import glob
from log.logdb import LogDb
from log.loader import LogLoader
from gcp.storage import LogStorage
from log.timeutil import timestamp
class DbLoader:
def __init__(self):
self.book_last_time = 0
self.funding_last_time = 0
self.trade_last_time = 0
self.log_db = None
self.log_loader = LogLoader(self.order_book_tick, self.trade_tick, self.funding_tick)
def open_db(self, db_file=None):
self.log_db = LogDb(db_file)
self.log_db.connect()
self.log_db.create_cursor()
self.log_db.create()
def close_db(self):
self.log_db.close()
def get_db(self):
return self.log_db
def load_line(self, line):
self.log_loader.load_line(line)
def load_lines(self, lines):
for line in lines:
self.log_loader.load_line(line)
def load_file(self, log_file):
print('Processs ' + log_file, end='')
try:
self.log_loader.load(log_file)
except EOFError as e:
print('error to process fileError EOF', e)
except Exception as e:
print('File process error SKIP', e)
def load_dir(self, log_dir ='/tmp'):
log_files = sorted(glob.glob(log_dir + '/' + '*.log'))
for file in log_files:
self.log_db.create_cursor()
self.load_file(file)
self.log_db.commit()
log_files = sorted(glob.glob(log_dir + '/' + '*.log.gz'))
for file in log_files:
self.log_db.create_cursor()
self.load_file(file)
self.log_db.commit()
def load_from_blobs(self, path=''):
log_storage = LogStorage()
log_storage.process_blob_dir(path, self.load_file)
def load_from_blob_by_date(self, year, month, day):
log_storage = LogStorage()
log_storage.process_blob_date_with_padding(year, month, day, self.load_file)
def order_book_tick(self, time_stamp, order_book):
if self.book_last_time != time_stamp:
self.log_db.insert_order_book_message(time_stamp, order_book)
self.book_last_time = time_stamp
def funding_tick(self, time_stamp, funding):
self.log_db.insert_funding(time_stamp, funding)
def trade_tick(self, time_stamp, buy_trade, sell_trade):
for price in buy_trade.keys():
self.log_db.insert_buy_trade(time_stamp, price, buy_trade[price])
for price in sell_trade.keys():
self.log_db.insert_sell_trade(time_stamp, price, sell_trade[price])
if __name__ == '__main__':
log_dir = '/tmp'
db_file = '/tmp/bitlog.db'
if len(sys.argv) == 2:
log_dir = sys.argv[0]
db_file = sys.argv[1]
print(log_dir, db_file)
db_loader = DbLoader()
db_loader.open_db()
db_loader.load_dir(log_dir)
db_loader.close_db()
|
yasstake/mmf
|
log/dbloader.py
|
dbloader.py
|
py
| 2,864 |
python
|
en
|
code
| 1 |
github-code
|
6
|
73730402429
|
import boto3
import logging
import os
import json
import time
from datetime import datetime
from jsonpath_ng.ext import parse
import helpers
logger = logging.getLogger()
logger.setLevel(logging.INFO)
utl = helpers.Utils()
dyn = helpers.Dyn()
ssm = boto3.client('ssm')
ec2 = boto3.client('ec2')
appValue = os.getenv('TAG_APP_VALUE')
appName = os.getenv('APP_NAME')
def getNicInformation(instance):
logger.info(instance + "- getNicInformation")
ssm_rsp = ssm.send_command(
InstanceIds=[instance],
DocumentName='AWS-RunShellScript',
TimeoutSeconds=30,
Parameters={
'commands':[
"NIC=$(ifconfig -a | grep UP,BROADCAST | awk '{print substr($1, 1, length($1)-1)}');aws ssm put-parameter --name '/amplify/minecraftserverdashboard/" + instance + "/nic' --type 'String' --value $NIC"
]
},
)
resp = checkExecutionLoop(instance,ssm_rsp["Command"]["CommandId"])
logger.info(resp)
def minecraftInit(instance):
logger.info(instance + " - minecraftInit")
instanceInfo = dyn.GetInstanceAttr(instance)
logger.info(instanceInfo)
if instanceInfo['code'] != 200:
logger.warning("Instance data does not exist")
return False
if 'runCommand' in instanceInfo['msg'] and 'workingDir' in instanceInfo['msg']:
script = os.path.join(instanceInfo['msg']['workingDir'],instanceInfo['msg']['runCommand'])
#script = instanceInfo['msg']['runCommand']
ssm_rsp = ssm.send_command(
InstanceIds=[instance],
DocumentName='AWS-RunShellScript',
TimeoutSeconds=30,
Parameters={
'commands':[
script
],
'workingDirectory':[
instanceInfo['msg']['workingDir']
],
},
)
logger.info(ssm_rsp)
else:
logger.warning("RunCommand or Working Directories are not defined")
return False
def cwAgentStatusCheck(instance):
logger.info(instance + " - cwAgentStatusCheck")
ssmAgentStatus = ssmExecCommands(instance,"AmazonCloudWatch-ManageAgent",{"action": ["status"],"mode": ["ec2"]})
#logger.info(ssmAgentStatus)
# Checking Agent Status if Success. Failed messages occurs when the CloudWatch Agent is not installed.
if ssmAgentStatus["Status"] == "Success":
agentDetails=""
jpexpr = parse("$.pluginsDetails[?(@.Name[:] == 'ControlCloudWatchAgentLinux')].Output")
for i in jpexpr.find(ssmAgentStatus):
agentDetails = i.value
if len(agentDetails) > 5:
agentDetailsJson = json.loads(agentDetails)
if agentDetailsJson["status"] == "running":
logger.info("Agent is already running. Version :" + agentDetailsJson["version"])
# AmazonCloudWatch Agent configuration
logger.info("Configuring agent")
ssmAgentConfig = ssmExecCommands(instance,"AmazonCloudWatch-ManageAgent",{"action": ["configure"],"mode": ["ec2"],"optionalConfigurationLocation": ["/amplify/minecraftserverdashboard/amazoncloudwatch-linux"],"optionalConfigurationSource": ["ssm"],"optionalRestart": ["yes"]})
logger.info(ssmAgentConfig)
return { "code": 200, "msg": "Agent is already running. Version :" + agentDetailsJson["version"] }
else:
logger.info("Agent Status: " + agentDetailsJson["status"] + " - configuration Status: " + agentDetailsJson["configstatus"])
return { "code": 400, "msg":"Agent Status: " + agentDetailsJson["status"] + " - configuration Status: " + agentDetailsJson["configstatus"] }
else:
logger.warning(agentDetailsJson)
return { "code": 500, "msg": "Detailed information not available"}
else:
return { "code": 500, "msg": "Failed" }
def cwAgentInstall(instance):
ssmInstallAgent = ssmExecCommands(instance,"AWS-ConfigureAWSPackage",{"action": ["Install"],"name": ["AmazonCloudWatchAgent"]})
#logger.info(ssmInstallAgent)
# Checking Agent Status if Success. Failed messages occurs when the CloudWatch Agent is not installed.
if ssmInstallAgent["Status"] == "Success":
# AmazonCloudWatch Agent installation
jpexpr = parse("$.pluginsDetails[?(@.Name[:] == 'configurePackage')].Output")
for i in jpexpr.find(ssmInstallAgent):
agentDetails = i.value
logger.info(agentDetails)
# AmazonCloudWatch Agent configuration
logger.info("Configuring agent")
ssmAgentConfig = ssmExecCommands(instance,"AmazonCloudWatch-ManageAgent",{"action": ["configure"],"mode": ["ec2"],"optionalConfigurationLocation": ["/amplify/minecraftserverdashboard/amazoncloudwatch-linux"],"optionalConfigurationSource": ["ssm"],"optionalRestart": ["yes"]})
logger.info(ssmAgentConfig)
def scriptExec(instance):
ssmRunScript = ssmExecCommands(instance,"AWS-RunRemoteScript",{"sourceType": ["GitHub"],"sourceInfo": ["{\"owner\":\"arturlr\", \"repository\": \"minecraft-server-dashboard\", \"path\": \"scripts/adding_cron.sh\", \"getOptions\": \"branch:dev\" }"],"commandLine": ["bash adding_cron.sh"]})
logger.info(ssmRunScript)
def sendCommand(instance, param, docName):
ssm_rsp = ssm.send_command(
InstanceIds=[instance],
DocumentName=docName,
TimeoutSeconds=30,
Parameters=param
)
# logger.info("sendCommand " + instance + " - " + ssm_rsp["Command"]["Status"])
return { "CommandId": ssm_rsp["Command"]["CommandId"], "Status": ssm_rsp["Command"]["Status"] }
def listCommand(instance, commandId):
ssm_rsp = ssm.list_commands(
CommandId=commandId,
InstanceId=instance,
)
logger.info("listCommand " + instance + " - " + ssm_rsp["Commands"][0]["Status"])
return { "Status": ssm_rsp["Commands"][0]["Status"] }
def getCommandDetails(instance, commandId):
ssm_rsp = ssm.list_command_invocations(
CommandId=commandId,
InstanceId=instance,
Details=True
)
if 'CommandPlugins' in ssm_rsp["CommandInvocations"][0]:
pluginsDetails = ssm_rsp["CommandInvocations"][0]["CommandPlugins"]
logger.info("getCommandDetails " + instance + " - " + ssm_rsp["CommandInvocations"][0]["Status"])
return { "Status": ssm_rsp["CommandInvocations"][0]["Status"], "pluginsDetails": pluginsDetails }
def checkExecutionLoop(instanceId, commandId, sleepTime=5):
loopCount = 0
while True:
checkStatusCommand = listCommand(instanceId, commandId)
logger.info(instanceId + " - " + commandId + " - " + checkStatusCommand["Status"])
if checkStatusCommand["Status"] == "Success":
getStatusDetails = getCommandDetails(instanceId, commandId)
return getStatusDetails
elif checkStatusCommand["Status"] == "Failed":
return "Failed"
elif loopCount > 5:
logger.error("Timeout - Cancelling the Command")
logger.error(checkStatusCommand)
ssm.cancel_command(
CommandId=commandId,
InstanceIds=[instanceId]
)
return "Cancelled"
else:
loopCount = loopCount + 1
time.sleep(sleepTime)
def ssmExecCommands(instanceId, docName, params):
logger.info("ssmExecCommands " + instanceId + " - " + docName)
command = sendCommand(instanceId, params, docName)
response = checkExecutionLoop(instanceId,command["CommandId"])
return response
def handler(event, context):
try:
instanceId = event["instanceId"]
# Execute minecraft initialization
minecraftInit(instanceId)
# Nic Value
getNicInformation(instanceId)
## CloudWatch Agent Steps
cwAgentStatus = cwAgentStatusCheck(instanceId)
if cwAgentStatus['code'] != 200:
cwAgentInstall(instanceId)
scriptExec(instanceId)
return { "code": 200, "msg": "CW Agent installed and Script executed"}
else:
return cwAgentStatus
except Exception as e:
logger.error('Something went wrong: ' + str(e))
return { "code": 500, "msg": str(e) }
|
arturlr/minecraft-server-dashboard
|
lambdas/configServer/index.py
|
index.py
|
py
| 8,475 |
python
|
en
|
code
| 2 |
github-code
|
6
|
22043825261
|
from flask import Response
import json
from presentation.contracts import HttpController, HttpRequest
def adapt_route(flask_request, controller: HttpController):
request = HttpRequest(
params=flask_request.args,
body=flask_request.json
)
data = controller.handle(request)
return Response(
json.dumps(data.body),
status=data.status,
mimetype='application/json'
)
try:
request = HttpRequest(
params=flask_request.args,
body=flask_request.json
)
data = controller.handle(request)
return Response(
json.dumps(data.body),
status=data.status,
mimetype='application/json'
)
except Exception as e:
return Response(
json.dumps({"error": "Internal server error"}),
status=500,
mimetype='application/json'
)
|
panda-coder/py-clean-flask
|
src/main/adapters/flask_route_adapter.py
|
flask_route_adapter.py
|
py
| 924 |
python
|
en
|
code
| 1 |
github-code
|
6
|
51262091
|
from typing import *
# class Solution:
# def atMostNGivenDigitSet(self, digits: List[str], n: int) -> int:
# n_str = str(n)
# k = len(n_str)
# res = 0
# for i in range(1, k):
# res += len(digits) ** i
# def dfs(cur, pos, res):
# # base case
# if pos == k:
# res += 1
# return res
# for d in digits:
# if d < n_str[pos]:
# res += len(digits) ** (k-1-pos)
# elif d == n_str[pos]:
# res = dfs(cur*10 + int(d), pos+1, res)
# return res
# res = dfs(0, 0, res)
# return res
class Solution:
def atMostNGivenDigitSet(self, digits: List[str], n: int) -> int:
n_str = str(n)
k = len(n_str)
# NOTE, need to use global
global res
res = 0
for i in range(1, k):
res += len(digits) ** i
def dfs(cur, pos):
global res
# base case
if pos == k:
res += 1
return
for d in digits:
if d < n_str[pos]:
res += len(digits) ** (k-1-pos)
elif d == n_str[pos]:
dfs(cur*10 + int(d), pos+1)
dfs(0, 0)
return res
if __name__ == "__main__":
s = Solution()
digits = ["7"]
n = 8
assert s.atMostNGivenDigitSet(digits, n) == 1
digits = ["1","3","5","7"]
n = 100
assert s.atMostNGivenDigitSet(digits, n) == 20
|
code-cp/leetcode
|
solutions/902/main.py
|
main.py
|
py
| 1,623 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70929713788
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 6 11:55:47 2023
@author: Gilberto
"""
import pandas as pd
from datetime import datetime, timedelta
class StraightLineAmortization:
def __init__(self, settlement_date, maturity_date, first_payment_date, notional_amount, rate, basis_numerator, basis_denominator, amortization_years, payment_frequency):
self.settlement_date = datetime.strptime(settlement_date, "%m/%d/%Y") if isinstance(settlement_date, str) else settlement_date
self.maturity_date = datetime.strptime(maturity_date, "%m/%d/%Y") if isinstance(maturity_date, str) else maturity_date
self.first_payment_date = datetime.strptime(first_payment_date, "%m/%d/%Y") if isinstance(first_payment_date, str) else first_payment_date
self.notional_amount = notional_amount
self.rate = rate/100
self.basis_numerator = basis_numerator
self.basis_denominator = basis_denominator
self.amortization_years = amortization_years
# Add the payment_frequency variable
self.payment_frequency = payment_frequency
# Adjust the num_periods and monthly_principal_payment based on payment_frequency
if self.payment_frequency == "1M":
self.num_periods = self.amortization_years * 12
elif self.payment_frequency == "3M":
self.num_periods = self.amortization_years * 4
elif self.payment_frequency == "6M":
self.num_periods = self.amortization_years * 2
self.period_principal_payment = self.notional_amount / self.num_periods
def compute_days(self, start_date, end_date):
if self.basis_numerator == "ACT":
days = (end_date - start_date).days
else:
days = 30 # assuming each month has 30 days
if self.basis_denominator == 360:
return days
else:
return days / 365.0 * 360.0
def get_next_dates(self, current_date):
if current_date == self.settlement_date:
return self.first_payment_date, self.first_payment_date
# Calculate next_month and next_year based on payment_frequency
if self.payment_frequency == "1M":
months_increment = 1
elif self.payment_frequency == "3M":
months_increment = 3
elif self.payment_frequency == "6M":
months_increment = 6
next_month = (current_date.month + months_increment - 1) % 12 + 1
next_year = current_date.year + (current_date.month - 1 + months_increment) // 12
period_end_date = current_date.replace(year=next_year, month=next_month, day=self.first_payment_date.day)
payment_date = period_end_date
# If it's a weekend, move to the next business day for payment date
while payment_date.weekday() >= 5:
payment_date += timedelta(days=1)
return period_end_date, payment_date
def generate_schedule(self):
data = []
current_date = self.settlement_date
payment_number = 1
notional_amount = self.notional_amount
while current_date < self.maturity_date and payment_number <= self.num_periods:
period_start_date = current_date
period_end_date, payment_date = self.get_next_dates(current_date)
days_in_period = self.compute_days(period_start_date, period_end_date)
actual_days_in_period = (period_end_date - period_start_date).days
interest_for_period = (notional_amount * self.rate * days_in_period) / self.basis_denominator
period_payment = round(interest_for_period + self.period_principal_payment,2)
notional_amount -= self.period_principal_payment
data.append([period_start_date, period_end_date, payment_date, payment_number, notional_amount + self.period_principal_payment, period_payment, self.period_principal_payment, actual_days_in_period])
current_date = period_end_date # Start next period the same day as the previous period's end date
payment_number += 1
df = pd.DataFrame(data, columns=['Period Start Date', 'Period End Date', 'Payment Date', 'Payment Number', 'Outstanding Balance', 'Period Payment', 'Principal Payment', 'Actual Days in Period'])
return df
# Usage
sla = StraightLineAmortization("8/1/2022", "8/1/2032", "9/1/2022", 600000, 7.03, "ACT", 360, 25, "3M")
amortization_schedule = sla.generate_schedule()
print(amortization_schedule)
|
gdelacruzv/Amortization_calculator
|
straightline_v2.py
|
straightline_v2.py
|
py
| 4,582 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26396707826
|
# Establish the Python Logger
import logging # built in python library that does not need to be installed
import time
from datetime import datetime
import os
import talking_code as tc
speaking_log = False
speaking_steps = False
def set_speaking_log(on_off_setting = False):
global speaking_log
speaking_log = on_off_setting
def get_speaking_log():
return speaking_log
def set_speaking_steps(on_off_setting = False):
global speaking_steps
speaking_steps = on_off_setting
def get_speaking_steps():
return speaking_steps
def talk(speech):
tc.say(speech)
return
def set_start_time():
start_time = time.time()
return(start_time)
def create_logger_Start(solution_name, start_time):
logging.basicConfig(level=logging.INFO, filename=solution_name + '.log',
filemode='w', format='%(asctime)s - %(levelname)s - %(message)s')
process_start_time_stamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'[:-3])
logging.info(f'START {solution_name} ' + ('=' * 45) )
logging.info(f'START {solution_name} Start Time = {process_start_time_stamp}')
logging.info(f'{solution_name} Step 0 - Initialize the configuration file parser')
# return f'logger_started for {solution_name} at {process_start_time_stamp}'
return logging
def create_logger_start(solution_name, start_time):
logging.basicConfig(level=logging.INFO, filename=solution_name + '.log',
filemode='w', format='%(asctime)s - %(levelname)s - %(message)s')
process_start_time_stamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'[:-3])
logging.info(f'START {solution_name} ' + ('=' * 45) )
logging.info(f'START {solution_name} Start Time = {process_start_time_stamp}')
logging.info(f'{solution_name} Step 0 - Initialize the configuration file parser')
# return f'logger_started for {solution_name} at {process_start_time_stamp}'
return logging
def append_log_file(solution_name):
log_filename=solution_name + '.log'
historical_log_filename=solution_name + '_history.log'
with open(log_filename) as log_file:
log_content = log_file.read()
with open(historical_log_filename,'a') as historical_log_file:
print(120*' ', file=historical_log_file)
print(120*'>', file=historical_log_file)
print(log_content, file=historical_log_file)
print(120*'<', file=historical_log_file)
print(120*' ', file=historical_log_file)
return(log_content)
def calculate_process_performance(solution_name, process_start_time):
import time
stop_time = time.time() # establish the stop time of the overall process.
process_duration = stop_time - process_start_time
process_stop_time_stamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'[:-3])
logging.info(f'PERFORMANCE {solution_name} The total process duration was:{process_duration:.2f}')
logging.info(f'PERFORMANCE {solution_name} Stop Time = {process_stop_time_stamp}')
status = f'END {solution_name} Duration Classification Error - Process Duration UNKNOWN'
if process_duration > 600.0:
logging.info(f'PERFORMANCE {solution_name} LONG process duration greater than 10 Minutes:{process_duration:.2f}')
logging.info(f'PERFORMANCE {solution_name} Performance optimization is required')
elif process_duration > 120.0:
logging.info(f'PERFORMANCE {solution_name} Medium process duration greater than 3 minutes:{process_duration:.2f}')
logging.info(f'PERFORMANCE {solution_name} Performance optimization is optional')
elif process_duration > 3.0:
logging.info(f'PERFORMANCE {solution_name} Low process duration less than 3 minutes:{process_duration:.2f}')
logging.info(f'PERFORMANCE {solution_name} Performance optimization is optional')
elif process_duration < 3.0:
logging.info(f'PERFORMANCE {solution_name} Short process duration less than 3 Seconds:{process_duration:.2f}')
logging.info(f'PERFORMANCE {solution_name} Performance optimization is not reccomended')
else:
status = f'PERFORMANCE {solution_name} Duration Classification Error - Process Duration UNKNOWN'
logging.info(f'END {solution_name} ' + ('=' * 45) )
return(status)
def set_start_time():
start_time = time.time()
return(start_time)
def pvlog(log_level, log_string):
global speaking_log
global speaking_steps
print(log_string)
if speaking_log:
tc.say(log_string)
if speaking_steps:
if log_string.find("Step") > -1:
tc.say(log_string)
if log_level == 'debug':
logging.debug(log_string)
if log_level == 'info':
logging.info(log_string)
if log_level == 'warn':
logging.warn(log_string)
if log_level == 'error':
logging.error(log_string)
if log_level == 'critical':
logging.critical(log_string)
|
JoeEberle/kids_ABC_book
|
quick_logger.py
|
quick_logger.py
|
py
| 5,079 |
python
|
en
|
code
| 1 |
github-code
|
6
|
26120509391
|
import os
import sys
import csv
from collections import Counter, defaultdict
import pandas as pd
from statsmodels.stats.inter_rater import aggregate_raters, fleiss_kappa
#from pptx import Presentation
# configure Django so we can use models from the annotate app
sys.path.append('/home/nejl/Dropbox/projects/tator/repo/tator')
os.environ['DJANGO_SETTINGS_MODULE'] = 'tagit.settings'
import django
django.setup()
from django.contrib.auth.models import User
from annotate.models import Query, Annotation, UserResponse
from templates import slide_template
# TODO: need to add sampling method that samples equally from dividing the
# probability mass of the distribution into thirds: top most frequent, middle,
# and bottom.
# TODO: fix analysis to have a global collection filter and then make sure adding
# annotations to queries in a different collection does not change the results
def split_data_frame_by_prob(df, column, nbins):
# splits a dataframe into 'nbins' of equal probability mass
# using column specified by 'coilumn'
df = df.sort_values(column, ascending=False)
values = df[column]
bin_probability = 1/nbins
total = sum(values)
cutoffs = []
cumulative_total = 0
next_bin_probability = bin_probability
for i, count in enumerate(values):
cumulative_total += count
if cumulative_total/total < next_bin_probability:
continue
cutoffs.append(i)
next_bin_probability += bin_probability
start = 0
new_dfs = []
while cutoffs:
cutoff = cutoffs.pop(0)
if len(cutoffs) == 0:
# last item; get the rest
new_dfs.append(df[start:])
else:
new_dfs.append(df[start:cutoff])
start = cutoff
return new_dfs
def load_queries(path):
with open(path) as csvfile:
df = pd.read_csv(csvfile, delimiter=';')
return df
def clean_queries(df):
"""Returns the input DataFrame of queries cleaned"""
# filter out queries with length less than 2 characters long
df = df[df['querystring'].str.len() > 1]
return df
def split_num(num, splits):
"""Returns the number 'num' divided into a list of numbers of size 'splits' """
splits = [int(num/splits)+1]*(num%splits) + [int(num/splits)]*(splits-num%splits)
assert sum(splits) == num
return splits
def import_queries(path, collection, sample='first', limit=None, allow_dupes=False):
df = load_queries(path)
df = clean_queries(df)
if not allow_dupes:
# remove existing queries from candidate queries to sample
existing = [query.text for query in Query.objects.all()]
df = df[~df['querystring'].isin(existing)]
if limit is not None:
if sample == 'first':
df = df[:limit]
elif sample == 'random':
df = df.sample(limit)
elif sample == 'proportional':
df = df.sample(limit, weights='countqstring')
elif sample == 'split':
split_size = 3
splits = split_data_frame_by_prob(df, 'countqstring', split_size)
sizes = split_num(limit, split_size)
sub_samples = []
for size, split_df in zip(sizes, splits):
sub_samples.append(split_df.sample(size, weights='countqstring'))
df = pd.concat(sub_samples)
assert len(df) == limit
else:
print('Unknown sampling method')
return
for i, values in enumerate(df.values.tolist()):
text, count = values
Query.objects.create(text=text, count=count, collection=collection)
print("Added {} queries to the database.\n".format(i+1))
print(df.describe())
def pretty_print_counter(counter, reverse=False):
lines = []
for key, value in sorted(counter.items(), reverse=reverse):
lines.append("{}: {}".format(key, value))
return "\n".join(lines)
def get_user_results(username, collection=None):
# for each user, display the number of results
# user
lines = ["*** Annotator: {} ***".format(username)]
lines.append("===================================\n")
responses = UserResponse.objects.filter(user__username=username)
if collection is not None:
responses = responses.filter(query__collection=collection)
annotations = [r for r in responses if r.annotation]
skipped = [r for r in responses if r.skipped]
lines.append("{} Skipped Queries:\n".format(len(skipped)))
for response in skipped:
line =' "{}"\n --- "{}"'.format(response.query.text,
response.skipped.description)
lines.append(line)
lines.append("\n{} Annotations:\n".format(len(annotations)))
lines.append(Annotation._meta.get_field('is_geo').verbose_name)
q1 = Counter(r.annotation.is_geo for r in annotations)
lines.append(pretty_print_counter(q1, reverse=True))
lines.append("")
lines.append(Annotation._meta.get_field('loc_type').verbose_name)
q2 = Counter(r.annotation.loc_type for r in annotations)
lines.append(pretty_print_counter(q2, reverse=True))
lines.append("")
lines.append(Annotation._meta.get_field('query_type').verbose_name)
q3 = Counter(r.annotation.query_type for r in annotations)
lines.append(pretty_print_counter(q3))
return "\n".join(lines)
def do_iaa_pairs(user_pairs, questions=(1,2,3), collection=None, level='fine'):
results = defaultdict(list)
for question in questions:
for users in user_pairs:
kappa = get_iaa(question, users=users, collection=collection, level=level)
results[question].append(kappa)
return results
def print_iaa_pairs(results, user_pairs):
print(' '+' '.join(', '.join(user) for user in user_pairs))
for question, kappas in results.items():
ks = ''.join("{:0<5.3} ".format(k) for k in kappas)
print("Q{}: {}".format(question, ks))
def get_iaa(question_num, queries=None, users=None, collection=None, level='fine'):
data = get_annotations(question_num, queries=queries, users=users, level=level, collection=collection)
#n_cat = Annotation.get_num_categories(question_num)
results = aggregate_raters(data, n_cat=None)
kappa = fleiss_kappa(results[0])
return kappa
def get_annotations(question_num, queries=None, users=None, level='fine', collection=None):
assert level in ('fine', 'coarse')
queries = Query.objects.exclude(responses__skipped__isnull=False).distinct()
if collection is not None:
queries = queries.filter(collection=collection)
if queries is not None:
queries = queries.filter(pk__in=queries)
data = []
for query in queries:
# get all non-skipped results
responses = query.responses.exclude(skipped__isnull=False)
if users is not None:
# restrict annotations to supplied users
responses = responses.filter(user__username__in=users)
results = [r.annotation.get_question(question_num) for r in responses]
if question_num in (2,3) and level == 'coarse':
# use course grained agreement
results = [r[0] for r in results]
if results:
data.append(results)
return data
def show_agreement(question_num, users, collection=None, skip_agree=True):
lines = []
queries = Query.objects.exclude(responses__skipped__isnull=False).distinct()
if collection is not None:
queries = queries.filter(collection=collection)
queries = sorted(queries, key=lambda x:x.pk)
users.sort()
col_width = max(len(u) for u in users) + 2
lines.append("".join("{u:{width}}".format(u=u, width=col_width)
for u in users))
agree = 0
disagree = 0
for query in queries:
responses = query.responses.order_by('user__username')
answers = [r.annotation.get_question(question_num) for r in responses]
if skip_agree and len(set(answers)) <= 1:
# all annotators agree, skip
agree += 1
continue
disagree += 1
line = "".join("{a:<{width}}".format(a=a, width=col_width)
for a in answers) + query.text
lines.append(line)
start = [
"Question {}:".format(question_num),
"Number all agree: {}".format(agree),
"Number with some disagreement: {}".format(disagree),
""
]
return "\n".join(start + lines)
def get_results(users):
queries = Query.objects.exclude(responses__skipped__isnull=False).distinct()
queries = sorted(queries, key=lambda x:x.pk)
users.sort()
rest_cols = ["Q{}_{}".format(num, user) for user in users for num in (1,2,3)]
header = ['id', 'query'] + rest_cols
rows = [header]
for query in queries:
row = [query.pk, query.text]
responses = query.responses.order_by('user__username')
for response in responses:
row.append(response.annotation.get_question(1))
row.append(response.annotation.get_question(2))
row.append(response.annotation.get_question(3))
rows.append(row)
return rows
def export_results_csv(users, outfile='annotations.csv'):
results = get_results(users)
with open(outfile, 'w', encoding='utf8', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
writer.writerows(results)
def make_slides_latex(users, csv=None, outfile='slides/slides.tex'):
if csv is None:
results = get_results(users)
else:
with open(csv, encoding='utf8') as csvfile:
results = list(csv.reader(csvfile))
lines = []
header = results[0]
for i, query in enumerate(results[1:]):
row1 = r"Q1 & {} & {} & {}\\".format(query[2], query[5], query[8])
row2 = r"Q2 & {} & {} & {}\\".format(query[3], query[6], query[9])
row3 = r"Q3 & {} & {} & {}\\".format(query[4], query[7], query[10])
rows = "\n".join([row1, row2, row3])
title = "Query {}".format(i+1)
slide = slide_template.format(title=title, query=query[1], rows=rows)
lines.append(slide)
with open(outfile, 'w', encoding='utf8') as texfile:
texfile.write('\n'.join(lines))
def make_slides_pptx(users, csv=None):
"""Not finished. Used latex instead"""
if csv is None:
results = get_results(users)
else:
with open(csv, encoding='utf8') as csvfile:
results = list(csv.reader(csvfile))
header = results[0]
prs = Presentation()
slide_layout = prs.slide_layouts[1]
for i, query in enumerate(results[1:]):
slide = prs.slides.add_slide(slide_layout)
slide.shapes.title.text = 'Query {}'.format(i+1)
body_shape = slide.shapes.placeholders[1]
tf = body_shape.text_frame
p = tf.paragraphs[0]
p.text = query[1]
p.level = 0
prs.save('test.pptx')
|
ned2/tator
|
notebooks/utils.py
|
utils.py
|
py
| 11,139 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71174455228
|
# -*- coding: utf-8 -*-
import time, functools
def metric(fn):
def decorator(func):
@functools.wraps(func)
def wrapper(*args,**kw):
print(fn) if fn.__str__()==fn else print('no metric args')
start_time=time.time()
return (func(*args,**kw),print('%s executed in %s ms' % (func.__name__, time.time()-start_time)))[0]
return wrapper
return decorator if fn.__str__()==fn else decorator(fn)
@metric
def fast(x, y):
time.sleep(0.0012)
return x + y;
@metric('test')
def slow(x, y, z):
time.sleep(0.1234)
return x * y * z;
f = fast(11, 22)
s = slow(11, 22, 33)
if f != 33:
print('测试失败!')
elif s != 7986:
print('测试失败!')
else:
print('测试成功!')
|
kfusac/LearnPython
|
LiaoxuefengPython/5_FunctionalProgramming/decorator.py
|
decorator.py
|
py
| 756 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23995078592
|
import os
from collections import deque
from typing import Dict, List, Optional, Any
import langchain
import openai
import pinecone
from langchain.chains import LLMChain
from langchain.chains.base import Chain
from langchain.agents import AgentType, ZeroShotAgent, Tool, AgentExecutor, initialize_agent
from langchain.llms import OpenAI, LlamaCpp, BaseLLM
from langchain.prompts import PromptTemplate
from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory
from langchain.utilities import GoogleSearchAPIWrapper
from langchain.vectorstores.base import VectorStore
from pydantic import BaseModel, Field
from langchain.embeddings import OpenAIEmbeddings
import faiss
from langchain.vectorstores import FAISS
from langchain.docstore import InMemoryDocstore
# Define your embedding model
embeddings_model = OpenAIEmbeddings()
# Initialize the vectorstore as empty
embedding_size = 1536
index = faiss.IndexFlatL2(embedding_size)
vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})
# Initialize our LLM
llm = OpenAI(temperature=0)
class TaskCreationChain(LLMChain):
"""Chain to generates tasks."""
@classmethod
def from_llm(cls, llm: BaseLLM, verbose: bool = True) -> LLMChain:
"""Get the response parser."""
task_creation_template = (
"You are a task creation AI that uses the result of an execution agent"
" to create new tasks with the following objective: {objective},"
" The last completed task has the result: {result}."
" This result was based on this task description: {task_description}."
" These are incomplete tasks: {incomplete_tasks}."
" Based on the result, create new tasks to be completed"
" by the AI system that do not overlap with incomplete tasks."
" Return the tasks as an array."
)
prompt = PromptTemplate(
template=task_creation_template,
input_variables=[
"result",
"task_description",
"incomplete_tasks",
"objective",
],
)
return cls(prompt=prompt, llm=llm, verbose=verbose)
class TaskPrioritizationChain(LLMChain):
"""Chain to prioritize tasks."""
@classmethod
def from_llm(cls, llm: BaseLLM, verbose: bool = True) -> LLMChain:
"""Get the response parser."""
task_prioritization_template = (
"You are a task prioritization AI tasked with cleaning the formatting of and reprioritizing"
" the following tasks: {task_names}."
" Consider the ultimate objective of your team: {objective}."
" Do not remove any tasks. Return the result as a numbered list, like:"
" #. First task"
" #. Second task"
" Start the task list with number {next_task_id}."
)
prompt = PromptTemplate(
template=task_prioritization_template,
input_variables=["task_names", "next_task_id", "objective"],
)
return cls(prompt=prompt, llm=llm, verbose=verbose)
class ExecutionChain(AgentExecutor):
"""Chain to execute tasks."""
@classmethod
def from_llm(cls, llm: BaseLLM, verbose: bool = True) -> AgentExecutor:
"""Get the response parser."""
template = """This is a conversation between a human and a bot:
{chat_history}
Write a summary of the conversation for {input}:
"""
prompt = PromptTemplate(template=template, input_variables=["chat_history", "input"])
memory = ConversationBufferMemory(memory_key="chat_history")
read_memory = ReadOnlySharedMemory(memory=memory)
summary_chain = LLMChain(memory=read_memory, prompt=prompt, llm=llm, verbose=True)
search = GoogleSearchAPIWrapper()
tools = [
Tool(
name = "Search",
func=search.run,
description="useful for when you need to answer questions about current events"
),
Tool(
name = "Summary",
func=summary_chain.run,
description="useful for when you summarize a conversation. The input to this tool should be a string, representing who will read this summary."
)
]
prefix = """
You are an AI who performs one task based on the following objective: {objective}.
Take into account these previously completed tasks: {context}.
"""
suffix = """
Your task: {task}.
Response:
{agent_scratchpad}
"""
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
input_variables=["objective", "context", "task", "agent_scratchpad"],
)
llm_chain = LLMChain(llm=llm, prompt=prompt)
agent = ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=verbose)
return cls.from_agent_and_tools(agent=agent, tools=tools, verbose=verbose, memory=memory)
todo_template = """
You are an expert in walking through your thoughts. You are able to take the main objective, as well as the tasks, and create step by step observations. Here is the objective: {objective}
"""
todo_prompt = PromptTemplate(template=todo_template, input_variables=["objective"])
todo_chain = LLMChain(llm=OpenAI(temperature=0), prompt=todo_prompt)
search = GoogleSearchAPIWrapper()
tools = [
Tool(
name="Search",
func=search.run,
description="useful for when you need to answer questions about current events",
),
Tool(
name="TODO",
func=todo_chain.run,
description="useful for when you need to come up brainstorming for the current task at hand. Input: an objective to create a todo list for as well as the task. Output: a rational thought process behind the objective and task, enough to help you craft a perfect response.",
),
]
prefix = """You are an AI who performs one task based on the following objective: {objective}. Take into account these previously completed tasks: {context}."""
suffix = """Question: {task}
{agent_scratchpad}"""
zeroshot_prompt = ZeroShotAgent.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
input_variables=["objective", "task", "context", "agent_scratchpad"],
)
def get_next_task(
task_creation_chain: LLMChain,
result: Dict,
task_description: str,
task_list: List[str],
objective: str,
) -> List[Dict]:
"""Get the next task."""
incomplete_tasks = ", ".join(task_list)
response = task_creation_chain.run(
result=result,
task_description=task_description,
incomplete_tasks=incomplete_tasks,
objective=objective,
)
new_tasks = response.split("\n")
return [{"task_name": task_name} for task_name in new_tasks if task_name.strip()]
def prioritize_tasks(
task_prioritization_chain: LLMChain,
this_task_id: int,
task_list: List[Dict],
objective: str,
) -> List[Dict]:
"""Prioritize tasks."""
task_names = [t["task_name"] for t in task_list]
next_task_id = int(this_task_id) + 1
response = task_prioritization_chain.run(
task_names=task_names, next_task_id=next_task_id, objective=objective
)
new_tasks = response.split("\n")
prioritized_task_list = []
for task_string in new_tasks:
if not task_string.strip():
continue
task_parts = task_string.strip().split(".", 1)
if len(task_parts) == 2:
task_id = task_parts[0].strip()
task_name = task_parts[1].strip()
prioritized_task_list.append({"task_id": task_id, "task_name": task_name})
return prioritized_task_list
def _get_top_tasks(vectorstore, query: str, k: int) -> List[str]:
"""Get the top k tasks based on the query."""
results = vectorstore.similarity_search_with_score(query, k=k)
if not results:
return []
sorted_results, _ = zip(*sorted(results, key=lambda x: x[1], reverse=True))
return [str(item.metadata["task"]) for item in sorted_results]
def execute_task(
vectorstore, execution_chain: LLMChain, objective: str, task: str, k: int = 5
) -> str:
"""Execute a task."""
context = _get_top_tasks(vectorstore, query=objective, k=k)
return execution_chain.run(objective=objective, context=context, task=task)
class BabyAGI(Chain, BaseModel):
"""Controller model for the BabyAGI agent."""
task_list: deque = Field(default_factory=deque)
task_creation_chain: TaskCreationChain = Field(...)
task_prioritization_chain: TaskPrioritizationChain = Field(...)
execution_chain: ExecutionChain = Field(...)
task_id_counter: int = Field(1)
vectorstore: VectorStore = Field(init=False)
max_iterations: Optional[int] = None
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def add_task(self, task: Dict):
self.task_list.append(task)
def print_task_list(self):
print("\033[95m\033[1m" + "\n*****TASK LIST*****\n" + "\033[0m\033[0m")
for t in self.task_list:
print(str(t["task_id"]) + ": " + t["task_name"])
def print_next_task(self, task: Dict):
print("\033[92m\033[1m" + "\n*****NEXT TASK*****\n" + "\033[0m\033[0m")
print(str(task["task_id"]) + ": " + task["task_name"])
def print_task_result(self, result: str):
print("\033[93m\033[1m" + "\n*****TASK RESULT*****\n" + "\033[0m\033[0m")
print(result)
@property
def input_keys(self) -> List[str]:
return ["objective"]
@property
def output_keys(self) -> List[str]:
return []
def _call(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Run the agent."""
objective = inputs["objective"]
first_task = inputs.get("first_task", "Make a todo list")
self.add_task({"task_id": 1, "task_name": first_task})
num_iters = 0
while True:
if self.task_list:
self.print_task_list()
# Step 1: Pull the first task
task = self.task_list.popleft()
self.print_next_task(task)
# Step 2: Execute the task
result = execute_task(
self.vectorstore, self.execution_chain, objective, task["task_name"]
)
this_task_id = int(task["task_id"])
self.print_task_result(result)
# Step 3: Store the result in Pinecone
result_id = f"result_{task['task_id']}"
self.vectorstore.add_texts(
texts=[result],
metadatas=[{"task": task["task_name"]}],
ids=[result_id],
)
# Step 4: Create new tasks and reprioritize task list
new_tasks = get_next_task(
self.task_creation_chain,
result,
task["task_name"],
[t["task_name"] for t in self.task_list],
objective,
)
for new_task in new_tasks:
self.task_id_counter += 1
new_task.update({"task_id": self.task_id_counter})
self.add_task(new_task)
self.task_list = deque(
prioritize_tasks(
self.task_prioritization_chain,
this_task_id,
list(self.task_list),
objective,
)
)
num_iters += 1
if self.max_iterations is not None and num_iters == self.max_iterations:
print(
"\033[91m\033[1m" + "\n*****TASK ENDING*****\n" + "\033[0m\033[0m"
)
break
return {}
@classmethod
def from_llm(
cls, llm: BaseLLM, vectorstore: VectorStore, verbose: bool = False, **kwargs
) -> "BabyAGI":
"""Initialize the BabyAGI Controller."""
task_creation_chain = TaskCreationChain.from_llm(llm, verbose=verbose)
task_prioritization_chain = TaskPrioritizationChain.from_llm(llm, verbose=verbose)
llm_chain = LLMChain(llm=llm, prompt=zeroshot_prompt)
tool_names = [tool.name for tool in tools]
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names)
agent_executor = AgentExecutor.from_agent_and_tools(
agent=agent, tools=tools, verbose=True
)
return cls(
task_creation_chain=task_creation_chain,
task_prioritization_chain=task_prioritization_chain,
execution_chain=agent_executor,
vectorstore=vectorstore,
**kwargs,
)
# Logging of LLMChains
verbose = False
# If None, will keep on going forever
max_iterations: Optional[int] = None
baby_agi = BabyAGI.from_llm(
llm=llm, vectorstore=vectorstore, verbose=verbose, max_iterations=max_iterations
)
baby_agi({"objective": 'Write a cohesive and articulated story about a man named Jack who conquers the world.'})
|
satpat2590/somelangchainfun
|
main.py
|
main.py
|
py
| 13,333 |
python
|
en
|
code
| 2 |
github-code
|
6
|
30728237360
|
import fileinput
import sys
from collections import deque, defaultdict, Counter
from functools import lru_cache
from itertools import permutations, combinations, combinations_with_replacement, product
sys.setrecursionlimit(10000000)
dd = defaultdict(lambda: 0)
dx = [0, 0, -1, 1] # NSWE
dy = [-1, 1, 0, 0] # NSWE
p1 = 6
p2 = 10
def part1(p1, p2):
die = 1
score1 = score2 = 0
turn = True
rolled = 0
while (score1 < 1000 and score2 < 1000):
d1 = die
die = die % 100 + 1
d2 = die
die = die % 100 + 1
d3 = die
die = die % 100 + 1
rolled += 3
roll = d1+d2+d3
if turn:
p1 = (p1 - 1 + roll) % 10 + 1
score1 += p1
else:
p2 = (p2 - 1 + roll) % 10 + 1
score2 += p2
turn = not turn
return min(score1, score2) * rolled
@lru_cache(maxsize=None)
def old_dp(p1, p2, score1, score2, turn):
if score1 >= 21:
return Counter({"p1": 1})
elif score2 >= 21:
return Counter({"p2": 1})
s = Counter()
for i, j, k in product([1, 2, 3], repeat=3):
if turn:
z = (p1 - 1 + i+j+k) % 10 + 1
s += old_dp(z, p2, score1 + z, score2, not turn)
else:
z = (p2 - 1 + i+j+k) % 10 + 1
s += old_dp(p1, z, score1, score2+z, not turn)
return s
dices = []
for i, j, k in product([1, 2, 3], repeat=3):
dices.append(i+j+k)
dices = Counter(dices)
@lru_cache(maxsize=None)
def dp(p1, p2, score1, score2):
if score1 >= 21:
return (1,0)
elif score2 >= 21:
return (0,1)
a = b = 0
for k,times in dices.items():
z = (p1 - 1 + k) % 10 + 1
x,y = dp(p2, z, score2, score1 + z)
a+=times * x
b+=times * y
return (b,a)
# print(part1(p1, p2), max(old_dp(p1, p2, 0, 0, True)))
print(part1(p1, p2), max(dp(p1, p2, 0, 0)))
|
mdaw323/alg
|
adventofcode2021/21.py
|
21.py
|
py
| 1,904 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72052703228
|
import sys, json
from urllib.request import urlopen
from collections import OrderedDict
list_host = 'http://localhost:5000'
list_url = list_host + '/api/3/action/organization_list'
get_url = list_host + '/api/3/action/organization_show'
contents = urlopen(list_url)
org_list = json.load(contents)['result']
for org_name in org_list:
org_url = get_url + "?id=" + org_name
print("=== Loading " +org_name + " from " + org_url)
org_content = urlopen(org_url)
org_obj = json.load(org_content)['result']
org = OrderedDict()
for key in ('name', 'title', 'description', 'site', 'email', 'region', 'identifier'):
if key in org_obj and org_obj[key]:
org[key] = org_obj[key]
org_filename = "orgs/"+org_name+".json"
with open(org_filename,"w+") as f:
f.write(json.dumps(org, indent=4))
print("=== Saved in "+org_filename+"\n")
|
italia/public-opendata-sources
|
export_orgs.py
|
export_orgs.py
|
py
| 888 |
python
|
en
|
code
| 17 |
github-code
|
6
|
20538743319
|
# https://leetcode.com/problems/rotting-oranges/
"""
Time complexity:- O(N)
Space Complexity:- O(N)
"""
"""
Intuition:
The algorithm uses Breadth-First Search (BFS) to simulate the rotting process, starting from initially rotten oranges.
The queue (q) is used to keep track of the rotten oranges and their coordinates.
The process continues until either all fresh oranges are rotten or there are no more rotten oranges.
The time variable keeps track of the minutes passed during the rotting process.
If there are still fresh oranges after the simulation, it means some oranges cannot be rotten, and -1 is returned.
The algorithm follows a simple and intuitive approach of simulating the rotting process through BFS traversal.
"""
import collections
from typing import List
class Solution:
def orangesRotting(self, grid: List[List[int]]) -> int:
q = collections.deque() # Using deque for efficient pop and append operations
fresh = 0 # Counter for fresh oranges
time = 0 # Variable to track time (minutes)
# Iterate through the grid to identify fresh and rotten oranges
for r in range(len(grid)):
for c in range(len(grid[0])):
if grid[r][c] == 1:
fresh += 1
if grid[r][c] == 2:
q.append((r, c)) # Add coordinates of rotten oranges to the queue
# Directions to check neighboring cells (up, down, left, right)
directions = [[0, 1], [0, -1], [1, 0], [-1, 0]]
# BFS traversal to simulate rotting process
while fresh > 0 and q:
length = len(q)
for i in range(length):
r, c = q.popleft() # Pop the front of the queue
for dr, dc in directions:
row, col = r + dr, c + dc
# Check if the neighboring cell is in bounds and contains a fresh orange
if (
row in range(len(grid))
and col in range(len(grid[0]))
and grid[row][col] == 1
):
grid[row][col] = 2 # Mark the orange as rotten
q.append((row, col)) # Add the coordinates to the queue
fresh -= 1 # Decrease the count of fresh oranges
time += 1 # Increment time after processing each minute
# Return the time required if all fresh oranges are rotten, otherwise return -1
return time if fresh == 0 else -1
|
Amit258012/100daysofcode
|
Day92/rotten_oranges.py
|
rotten_oranges.py
|
py
| 2,532 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19582017966
|
import socket
host = "192.168.0.1"
port = 80
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect((host,port))
buf = b'-' * 30
s.send(b'GET /HTTP/1.1\r\n\r\n')
resp = s.recv(2048)
print("Number of bytes",len(resp))
print(buf.decode())
s.close()
|
indrajithbandara/py-studies
|
client3.py
|
client3.py
|
py
| 255 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2694410426
|
import os
import sys
import signal
import threading
import multiprocessing
import atexit
import time
from ctypes import c_bool
from .module import StateIO
from .config import Config
class Controller:
def __init__(self, config: Config) -> None:
args, self.cfg = config.load()
self.pidfile_path = '/tmp/controller.pids' # should be config ?
# make sure self.on_exist is always called
atexit.register(self.on_shutdown)
self.procmem = {"StateIO": multiprocessing.Array(StateIO, 1)}
self.threads = {}
self.processes = {}
# parse config and import modules classes
self.modules_classes = {}
self.args_import(args)
# initialize all modules
self.modules_instances = [None]*99
self.modules_init()
def _module_init(self, mtype):
mclass = self.modules_classes[mtype]["ModuleName"]
args = self.modules_classes[mtype]["Attributes"]
init_order = self.modules_classes[mtype]["RunPriority"]
in_list = self.modules_classes[mtype]["InputMem"]
out_list = self.modules_classes[mtype]["OutputMem"]
# initialize BaseModule class
module = mclass(args, self.procmem, in_list, out_list)
module.INIT_NAME = mtype
module.INIT_ORDER = init_order
module.SHUTDOWN = multiprocessing.Value(c_bool, False)
# TODO tick_delta should be here with info msg if it is here
run_type = self.modules_classes[mtype]["RunType"]
if run_type == 0:
module.IS_MAIN = True
elif run_type == 1:
module.IS_THREAD = True
elif run_type == 2:
module.IS_PROCESS = True
else:
print("fatal error: incorect RunType config at:\n module: {0}".format(module.INIT_NAME))
sys.exit(1)
if self.modules_instances[init_order]:
_name = self.modules_instances[init_order].INIT_NAME
print("fatal error: equal RunPriority config at modules:\n"
"module{0}\nmodule: {1}".format(_name, module.INIT_NAME))
sys.exit(1)
self.modules_instances.pop(init_order)
self.modules_instances.insert(init_order, module)
print(" {0} initilized".format(module.INIT_NAME))
def modules_init(self):
for key in self.modules_classes:
asycn_init = self.modules_classes[key]["AsyncInit"]
if asycn_init:
thread = threading.Thread(
target=self._module_init,
args=([key])
)
thread.daemon = True
thread.start()
else:
self._module_init(key)
self.modules_instances = [ inst for inst in self.modules_instances if inst is not None ]
index = list(range(len(self.modules_instances)))
for instance, n in zip(self.modules_instances, index):
instance.INIT_ORDER = n
def start(self):
self.kill_processes() # FIXME make sure only one instance is running
# save main pid
with open(self.pidfile_path, "w") as pidfile:
pid0 = str(os.getpid())
pidfile.write("%s\n" % (pid0))
print(" controller started with PID(s):", pid0)
tmp = []
while not self.procmem["StateIO"][0].shutdown:
# TODO test if class_import works here (hot reload), should be async call when updating instances
# TODO add nice arg to processes
# FIXME on_start is out of order
for instance in self.modules_instances:
# print("tick:",instance.INIT_NAME)
shutdown = instance.SHUTDOWN.value
if not instance.IS_RUNNING:
if instance.IS_PROCESS and not shutdown:
instance.IS_RUNNING = True
proc = multiprocessing.Process(target=instance._tick)
proc.name = instance.INIT_NAME
self.processes[proc.name] = proc
proc.start()
# TODO should save here process pids
elif instance.IS_THREAD and not shutdown:
instance.IS_RUNNING = True
thread = threading.Thread(target=instance._tick)
thread.name = instance.INIT_NAME
thread.daemon = True
self.threads[thread.name] = thread
thread.start()
elif not instance.IS_MAIN and not shutdown:
instance.IS_RUNNING = True
instance._start()
if instance.IS_MAIN and not shutdown:
try:
instance.on_tick()
except Exception as e:
# TODO error reporing format
print(instance.INIT_NAME)
print(e)
if shutdown:
tmp.append(instance.INIT_ORDER)
# FIXME process is still be visible in top with 0 mem, when it is shutdown
tmp.reverse()
for n in tmp:
name = self.modules_instances[n].INIT_NAME
if self.modules_instances[n].IS_PROCESS:
self.processes[name].kill()
self.processes[name].join(timeout=0.001)
self.processes[name].close()
del self.processes[name]
elif self.modules_instances[n].IS_THREAD:
del self.threads[name]
elif self.modules_instances[n].IS_MAIN:
self.modules_instances[n]._stop()
self.modules_instances.remove(self.modules_instances[n])
print(" {0} is removed".format(name))
tmp.clear()
# t_instances = len(self.modules_instances) + 1
# t_threads = len(self.threads)
# t_processes = len(self.processes)
# t_main = t_instances - t_threads - t_processes
# print(" main process instence(s): {0}\n"
# " thread instance(s): {1}\n"
# " subprocess instance(s): {2}\n"
# " total running instances: {3}"\
# .format(t_main,
# t_threads,
# t_processes,
# t_instances
# ))
time.sleep(1)
def on_shutdown(self):
"""
Called each time application is exiting throught atexit
"""
# FIXME will error on joystick
# TODO make sure threads are exited correctly
# TODO wait for processes to exit, check for zombie processes
pass
def shutdown(self):
self.procmem["StateIO"][0].shutdown = True
def kill_processes(self):
pid = None
if os.path.exists(self.pidfile_path):
with open(self.pidfile_path, "r") as pidfile:
pids = pidfile.readlines()
pidfile.close()
for _pid in pids[::-1]:
pid = _pid[:-1]
while True:
if os.path.exists("/proc/" + pid):
print("Attempting to shutdown existing controller:", pid)
# FIXME hcitool will hangd when sending SIGINT
os.kill(int(pid), signal.SIGINT)
continue
break
def class_import(self, mtype, arg, mname=None):
# TODO check if configs class(s) is already imported in config
try:
print(mtype, arg, mname)
class_cfg = self.cfg[mtype][mname][arg]
module_name = class_cfg["ModuleName"]
cls_name = mtype[:3] + ":" + arg[:3] + ":" + mname + ":" + module_name
except KeyError:
print("import error: unknown module class name:", arg)
sys.exit(1)
self.modules_classes[cls_name] = class_cfg
try:
if module_name != 'BaseModule':
folder_name = mtype[:-1]
module_path = 'modules.'+ module_name + '.' + folder_name
class_name = folder_name.capitalize()
module = __import__(module_path, fromlist=[class_name])
self.modules_classes[cls_name]["ModuleName"] = getattr(module, class_name)
else:
# used for testing
module_path = 'controller'
module = __import__(module_path, fromlist=['BaseModule'])
self.modules_classes[cls_name]["ModuleName"] = getattr(module, 'BaseModule')
except AttributeError:
print("import error: failed to import class:", cls_name, 'modules.'+ module_name + '.' + mtype[:-1])
sys.exit(1)
try:
for m in self.modules_classes[cls_name]['InputMem']:
if not m in self.procmem:
module = __import__('modules.structures', fromlist=[m])
mem = getattr(module, m)
self.procmem[m] = multiprocessing.Array(mem, 1)
print(" shared memory {0} intialized".format(m))
except AttributeError:
print("import error: failed to import input shared memory class:", m)
sys.exit(1)
try:
for m in self.modules_classes[cls_name]['OutputMem']:
if not m in self.procmem:
module = __import__('modules.structures', fromlist=[m])
mem = getattr(module, m)
# mem = getattr(self.imported, m)
self.procmem[m] = multiprocessing.Array(mem, 1)
print(" shared memory {0} intialized".format(m))
except AttributeError:
print("import error: failed to import output shared memory class:", m)
sys.exit(1)
def _parse_module_args(self, const):
if const != 'default':
args = const.split(":")
return args[0], args[1]
else:
return const, const
def args_import(self, args):
if len(sys.argv) == 1:
# TODO add default args, when no argements provided
print("default args are not implemented!")
sys.exit(1)
if args.stop:
self.kill_processes()
sys.exit(0)
# reset sys args to avoid interference with other modules
sys.argv = [sys.argv[0]]
if args.hardware_only:
self.class_import("interfaces", args.hardware_only, "hardware")
return
if args.display_only:
interface, controller = self._parse_module_args(args.display_only)
self.class_import("interfaces", interface, "display")
self.class_import("controllers", controller, "display")
return
if args.sound_only:
interface, controller = self._parse_module_args(args.sound_only)
self.class_import("interfaces", interface, "speaker")
self.class_import("controllers", controller, "speaker")
return
if not args.no_hardware:
self.class_import("interfaces", 'default', "hardware")
# must below hardware, to support no-hardware flag
if args.actuators_only:
interface, controller = self._parse_module_args(args.actuators_only)
self.class_import("interfaces", interface, "actuators")
self.class_import("controllers", controller, "actuators")
return
if not args.actuators_only:
self.class_import("interfaces", 'default', "actuators")
self.class_import("controllers", 'default', "actuators")
if not args.no_display:
self.class_import("interfaces", 'default', "display")
self.class_import("controllers",'default', "display")
if not args.no_sound:
self.class_import("interfaces", 'default', "speaker")
self.class_import("controllers", 'default', "speaker")
if args.keyboard:
self.class_import("interfaces", args.keyboard, "keyboard")
if args.joystick:
self.class_import("interfaces", args.joystick, "joystick")
|
bitula/minipupper-dev
|
controller/controller.py
|
controller.py
|
py
| 12,689 |
python
|
en
|
code
| 2 |
github-code
|
6
|
35449426767
|
n = input().split()
def with_c(c):
temp = []
for i in n:
if c in i:
temp.append(i)
return temp
def vowel_count(pairs):
for i in pairs:
i = i.lower()
if i.count('a') + i.count('e') + i.count('i') + i.count('o') + i.count('u') != 2:
return False
else:
return True
main = []
for word in n:
for letter in word:
pairs = with_c(letter)
if vowel_count(pairs) and len(pairs) == 2:
main.append(pairs)
else:
break
else:
print(word)
|
robinroy03/CompetitiveProgramming
|
VPROPEL POD/09-03-23/main.py
|
main.py
|
py
| 567 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16616067005
|
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
from selenium.common.exceptions import NoSuchElementException
import logging
def has_booking_started(url: str) -> bool:
options = webdriver.ChromeOptions()
options.add_argument("--headless=new")
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_experimental_option('excludeSwitches', ['enable-logging'])
driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()), options=options)
driver.implicitly_wait(0.5)
driver.get(url)
try:
if driver.find_elements(By.XPATH, '//button[@data-name="get-tickets"]'):
logging.info("Ticket booking has started")
return True
else:
logging.info("Ticket booking hasn't started yet")
return False
except NoSuchElementException:
logging.info("Ticket booking hasn't started yet")
return False
finally:
driver.close()
|
CreatorSky/cineplex-notifier
|
utils/selenium_utils.py
|
selenium_utils.py
|
py
| 1,128 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14223858027
|
# Clase 1
import machine, time
from machine import ADC
# file=open("data.csv","w") # creation and opening of a CSV file in Write mode
# # Type Program Logic Here
# file.write(str(value)+",") # Writing data in the opened file
# # file.flush() # Internal buffer is flushed (not necessary if close() function is used)
# file.close() # The file is closed
rtc = machine.RTC()
#rtc.datetime((2020, 1, 21, 2, 10, 32, 36, 0))
print(rtc.datetime())
value_ad = ADC(26)
print("Starting...")
file = open("data/data.txt","w") # creation and opening of a CSV file in Write mode
led = machine.Pin(25, machine.Pin.OUT)
while True:
n = value_ad.read_u16()
print(n)
file.write(str(n)+",") # Writing data in the opened file
file.flush() # Internal buffer is flushed (not necessary if close() function is used)
time.sleep(1)
file.close() # The file is closed
|
giulianopalmisano/PDMyE
|
main.py
|
main.py
|
py
| 877 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14187604894
|
from kingadmin.admin_base import BaseKingAdmin
class AdminSite():
"""用于注册用的类"""
def __init__(self):
self.enabled_admins = {}
def register(self, model_class, admin_class = None):
"""注册admin表"""
app_name = model_class._meta.app_label
model_name = model_class._meta.model_name
if not admin_class:#为了避免多个model共享一个BaseKingAdmin内存对象
admin_class = BaseKingAdmin()
else:
admin_class = admin_class()
admin_class.model = model_class #把model_class赋值给了admin_class
if app_name not in self.enabled_admins:
self.enabled_admins[app_name] = {}
self.enabled_admins[app_name][model_name] = admin_class
site = AdminSite()
|
MurrayXiao/SchoolCRM
|
kingadmin/sites.py
|
sites.py
|
py
| 789 |
python
|
en
|
code
| 3 |
github-code
|
6
|
35613748044
|
from collections import OrderedDict
# from datetime import datetime
from django.conf import settings
from django.db import models
from django.utils import timezone
from jsonfield import JSONField
# Create your models here.
class fhir_Consent(models.Model):
""" Store User:application consent in fhir format
"""
user = models.ForeignKey(settings.AUTH_USER_MODEL)
application = models.ForeignKey(settings.OAUTH2_PROVIDER_APPLICATION_MODEL)
consent = JSONField(load_kwargs={'object_pairs_hook': OrderedDict})
created = models.DateTimeField(blank=True, null=True)
revoked = models.DateTimeField(blank=True, null=True)
valid_until = models.DateTimeField(blank=True, null=True)
key = models.TextField(max_length=250, blank=True, null=True)
def save(self, *args, **kwargs):
''' On save, update timestamps '''
if not self.id:
self.created = timezone.now()
# Update the key field
self.key = self.user.username + ":" + self.application.name + "["
self.key += self.created.strftime('%Y-%m-%dT%H:%M.%S') + "]"
if self.valid_until:
# print("\nChecking valid_until"
# " still valid:%s\nType:%s" % (self.valid_until,
# type(self.valid_until)))
if self.valid_until <= timezone.now():
if not self.revoked:
self.revoked = self.valid_until
return super(fhir_Consent, self).save(*args, **kwargs)
def revoke_consent(self, confirm=False, *args, **kwargs):
if confirm is True:
if not self.revoked:
self.revoked = timezone.now()
return super(fhir_Consent, self).save(*args, **kwargs)
def status(self):
consent_status = None
if self.revoked:
consent_status = "REVOKED"
else:
consent_status = "VALID"
return consent_status
def granted(self):
if self.created and self.revoked:
valid = False
else:
valid = True
return valid
def __str__(self):
name = '%s %s (%s)' % (self.user.first_name,
self.user.last_name,
self.user.username)
return ("%s:%s" % (name, self.application.name))
def __unicode__(self):
name = '%s %s (%s)' % (self.user.first_name,
self.user.last_name,
self.user.username)
return ("%s:%s" % (name, self.application.name))
|
shihuaxing/hhs_oauth_server
|
apps/fhir/fhir_consent/models.py
|
models.py
|
py
| 2,578 |
python
|
en
|
code
| null |
github-code
|
6
|
36517071630
|
import time
def reverseTimSort(array):
for i in range(len(array)):
for j in range(i):
if array[j] > array[i]:
array[j], array[i] = array[i], array[j]
return array
def getBiggerValue(array):
biggerValue = 0
for item in array:
if item > biggerValue:
biggerValue = item
return biggerValue
def convertValueToCents(value):
return int(float(value) * 100)
def openFile(filename):
with open(filename, 'r') as f:
value = convertValueToCents(f.readline())
coins = []
for line in f:
coins = coins + line.split()
coins = [int(coin) for coin in coins]
return value, coins
def countCoins(coins):
count = 0
for coin in coins:
count += coin * coins[coin]
return count
def validate(coins, change, value):
return change == countCoins(coins) and change >= value
def getTime(func, filename):
value, coins = openFile(filename)
start = time.time()
change, changeCoins = func(value, coins)
end = time.time()
print("Value: ", value)
print("Change: ", change)
print("Coins: ", changeCoins)
print("Is valid: ", validate(changeCoins, change, value))
print("Tempo de execucao: ", end - start)
|
taylorbyks/paa-coins-change
|
utils.py
|
utils.py
|
py
| 1,273 |
python
|
en
|
code
| 0 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.