seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
71133547387
|
from Logic.crud import add_cheltuiala, delete_cheltuiala
from Domain.cheltuiala import to_str
def new_menu():
'''
Meniu pentru consola noua
:return:
'''
print('''
Tasteaza comenzile separate prin ";". Comenzi acceptate: add, delete, showall, exit, help.
Tasteaza "help" pentru a afla detalii despre comenzi.
''')
def help():
'''
Functie prin intermediul careia se definesc comenzile:
add,
:return:
'''
print('''
help: arata meniu
add, <id>,<nr_ap>,<suma>,<data>,<tipul>: adauga cheltuiala
delete,<id>: sterge cheltuiala
showall: arata toate cheltuielile
exit: inchidere program
''')
def show_all(cheltuieli):
for cheltuiala in cheltuieli:
print(to_str(cheltuiala))
def run_comenzi(cheltuieli):
while True:
print(new_menu())
exit = False
comanda = input("Tastati comenzile despartite de ';': ").split(';')
for i in range(len(comanda)):
action = comanda[i].split(',')
if action[0] == 'help':
help()
elif action[0] == 'add':
try:
cheltuieli = add_cheltuiala(cheltuieli, action[1], action[2], action[3], action[4], action[5], [], [])
except IndexError as ie:
print(f"Eroare: {ie}")
elif action[0] == 'delete':
cheltuieli = delete_cheltuiala(cheltuieli, action[1], [], [])
elif action[0] == 'showall':
show_all(cheltuieli)
elif action[0] == 'exit':
exit = True
break
if exit is True:
print("Programul s-a oprit")
break
|
AP-MI-2021/lab-567-mirunaxb
|
UI/command_line_console.py
|
command_line_console.py
|
py
| 1,700 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14593025601
|
from Atum.models.hpo import HPOManageService
if __name__ == '__main__':
hpo_service = HPOManageService()
config = dict()
config['search_space'] = {
'learning_rate': {'_type': 'uniform', '_value': [0.001, 0.005]},
'batch_size': {'_type': 'choice', '_value': [32, 64, 128]}
}
config['parallel_search'] = 8
config['gpu_number_per_task'] = 1
config['optimize_mode'] = 'maximize'
config['search_time'] = '1h'
config['task_command'] = "python /mnt/beegfs/ssd_pool/docker/user/hadoop-automl/wengkaiheng/code/infra-mt-cvzoo-classification/tools/automl_tools/train_hpo.py /mnt/beegfs/ssd_pool/docker/user/hadoop-automl/wengkaiheng/code/infra-mt-cvzoo-classification/configs/custom/Res18_cifar.py --hpo"
hpo_service.update_config(config)
hpo_service.start()
|
TrellixVulnTeam/classification_LJ3O
|
tools/automl_tools/run_hpo.py
|
run_hpo.py
|
py
| 809 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34660221121
|
print('Are you ready to play!')
start=input('Yes(or)No :').lower()
score=0
if start !='yes':
quit()
print("let's play :)")
ans=input("which is the smallest ocean in the world? ").lower()
if ans=='arctic':
print('correct!')
score +=1
else:
print('incorrect!')
ans=input("which is the oldest language in the world? ").lower()
if ans=='tamil':
print('correct!')
score +=1
else:
print('incorrect!')
ans=input("brain of computer is? ").lower()
if ans=='cpu':
print('correct!')
score +=1
else:
print('incorrect!')
ans=input("1024 kilobytes is equal to? ").lower()
if ans=='1 mb':
print('correct!')
score +=1
else:
print('incorrect!')
print('your score: '+str(score))
print('your percentage :'+str((score/4)*100)+'%')
|
Itischandru/quiz_game
|
quiz game.py
|
quiz game.py
|
py
| 773 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31501456333
|
from django.urls import path, include
from . import views
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path("", views.index, name='index'),
path("news/<int:id>/", views.see_full_news),
path("main_page/", views.main_page, name='main_page'),
path("dataselect/", views.dataselect, name='dataselect'),
path("station/", views.station, name='station'),
path("geofrom", views.geoform, name='geofrom'), path('captcha/', include('captcha.urls')),
path("req_form", views.requsets_list, name = 'req_form'),
path("structure", views.struct, name= "structure"),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) + [path('captcha/', include('captcha.urls')),]
|
AlekseiMuryskin/SiteGS
|
gsras/urls.py
|
urls.py
|
py
| 809 |
python
|
en
|
code
| 0 |
github-code
|
6
|
44900991873
|
import torch
import torch.nn as nn
from argparse import Namespace
from layers.Layers import DualAttenion, Transpose
class CARD(nn.Module):
def __init__(self, args: Namespace) -> None:
super(CARD, self).__init__()
self.patch_len = args.patch_len
self.stride = args.stride
self.embed_dim = args.embed_dim
self.task_name = args.task_name
patch_num = int((args.seq_len - self.patch_len)/self.stride + 1)
self.patch_num = patch_num
self.pos_embed = nn.Parameter(
torch.randn(patch_num, args.embed_dim)*1e-2)
self.total_token_number = self.patch_num + 1
args.total_token_number = self.total_token_number
# embeding layer related
self.input_projection = nn.Linear(self.patch_len, args.embed_dim)
self.input_dropout = nn.Dropout(args.dropout)
self.cls = nn.Parameter(torch.randn(1, args.embed_dim)*1e-2)
# mlp decoder
self.out_proj = nn.Linear(
(patch_num+1+self.model_token_number)*args.embed_dim, args.pred_len)
# dual attention encoder related
self.Attentions_over_token = nn.ModuleList(
[DualAttenion(args) for i in range(args.hiden_layer_num)])
self.Attentions_over_channel = nn.ModuleList(
[DualAttenion(args, over_channel=True) for i in range(args.hiden_layer_num)])
self.Attentions_mlp = nn.ModuleList(
[nn.Linear(args.embed_dim, args.embed_dim) for i in range(args.hiden_layer_num)])
self.Attentions_dropout = nn.ModuleList(
[nn.Dropout(args.dropout) for i in range(args.hiden_layer_num)])
self.Attentions_norm = nn.ModuleList([nn.Sequential(Transpose(1, 2),
nn.BatchNorm1d(args.embed_dim,
momentum=args.momentum),
Transpose(1, 2)) for i in range(args.hiden_layer_num)])
def forward(self, z: torch.Tensor):
b, c, s = z.shape
# inputs nomralization
z_mean = torch.mean(z, dim=(-1), keepdims=True)
z_std = torch.std(z, dim=(-1), keepdims=True)
z = (z - z_mean)/(z_std + 1e-4)
# tokenization
zcube = z.unfold(dimension=-1, size=self.patch_len, step=self.stride)
z_embed = self.input_dropout(
self.input_projection(zcube)) + self.pos_embed
cls_token = self.cls.repeat(z_embed.shape[0], z_embed.shape[1], 1, 1)
z_embed = torch.cat((cls_token, z_embed), dim=-2)
# dual attention encoder
inputs = z_embed
b, c, t, h = inputs.shape
for a_2, a_1, mlp, drop, norm in zip(self.Attentions_over_token, self.Attentions_over_channel, self.
Attentions_mlp, self.Attentions_dropout, self.Attentions_norm):
output_1 = a_1(inputs.permute(0, 2, 1, 3)).permute(0, 2, 1, 3)
output_2 = a_2(output_1)
outputs = drop(mlp(output_1+output_2))+inputs
outputs = norm(outputs.reshape(b*c, t, -1)).reshape(b, c, t, -1)
inputs = outputs
# mlp decoder
z_out = self.out_proj(outputs.reshape(b, c, -1))
# denomrlaization
z = z_out * (z_std+1e-4) + z_mean
return z
|
Jarlene/AlgoRepo
|
models/ts/card.py
|
card.py
|
py
| 3,357 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9202556126
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from PIL import Image
def make_data_index(args, index):
'''
:param args:
:param index:
:return:
'''
# K is for K-fold cross-validation
# k is The k-th fold used for test
K = args.K_fold # Here, we don't use this because we use 10-fold cross-validation in default
k = args.k_test
# print('The index is ', index, 'Length of Index', len(index))
# Here we assume to use 10-fold Cross-validation, i.e., K = 10
# Approximately 60% Training set, 20% Validation set, 20% Testing set
num_test_ref = int(len(index) * 0.2)
num_val_ref = int(len(index) * 0.2)
num_train_ref = len(index) - num_test_ref - num_val_ref
# Assume k = 1 : 10 for 10-fold Cross-validation
threshold = int(len(index) / num_test_ref)
if k < threshold:
testindex = index[(k - 1) * num_test_ref: k * num_test_ref]
valindex = index[k * num_val_ref: (k + 1) * num_val_ref]
elif k == threshold:
testindex = index[(k - 1) * num_test_ref: k * num_test_ref]
# Check if the index num of validation set is less than num_val_ref
valindex = index[k * num_val_ref: (k + 1) * num_val_ref]
if len(valindex) < num_val_ref:
valindex = valindex.tolist()
for i in range(0, num_val_ref - len(valindex)):
valindex.append(index[i])
elif k == threshold + 1:
testindex = index[k * num_test_ref: (k + 1) * num_test_ref]
if len(testindex) < num_test_ref:
testindex = testindex.tolist()
for i in range(0, num_test_ref - len(testindex)):
testindex.append(index[i])
k -= threshold
valindex = index[(k + 2) * num_val_ref: (k + 3) * num_val_ref]
else:
k -= threshold
testindex = index[k * num_test_ref: (k + 1) * num_test_ref]
if len(testindex) < num_test_ref:
testindex = testindex.tolist()
for i in range(0, num_test_ref - len(testindex)):
testindex.append(index[i + num_test_ref])
valindex = index[(k + 2) * num_val_ref: (k + 3) * num_val_ref]
if len(valindex) < num_val_ref:
valindex = valindex.tolist()
for i in range(0, num_val_ref - len(valindex)):
valindex.append(index[i])
return valindex, testindex
def default_loader(path, channel=3):
"""
:param path: image path
:param channel: # image channel
:return: image
"""
if channel == 1:
return Image.open(path).convert('L')
else:
assert (channel == 3)
return Image.open(path).convert('RGB')
|
SuperBruceJia/NLNet-IQA
|
Cross Database Evaluations/lib/make_index.py
|
make_index.py
|
py
| 2,658 |
python
|
en
|
code
| 8 |
github-code
|
6
|
29969829561
|
"""
MongoDB Interaction - A simple example for future developments
Fabio Bove | [email protected]
"""
#!/usr/bin/env python
# coding: utf-8
# Imports
from pymongo import MongoClient
class MongoUtils:
def __init__(self, auth_param: str, collection_name: str, database_name: str, data: dict) -> None:
self.mongo_client = None
self.last_op_status = None
self.database = None
self.collection = None
self.database_list = None
self.collections_list = None
self.auth_param = auth_param
self.collection_name = collection_name
self.database_name = database_name
self.data = data
def get_last_op_status(self) -> str:
"""
get_last_op_status, this method returns a string containing the status of the last operation made by this class
param: None
return: last_op_status: A string containing the status of the last operation made by this class
"""
return self.last_op_status
def connect_to_cluster(self) -> None:
"""
connect_to_cluster, this method allow to instantiate a new cluster Connection using the pymongo lib
pram: None
return: None
"""
try:
self.mongo_client = MongoClient(self.auth_param)
self.last_op_status = "Successfully connected to Mongo Cluster"
except Exception as e:
self.last_op_status = f"Something went wrong during cluster connection: \n {e}"
self.mongo_client = None
def init_dabase(self, database_name:str) -> None:
"""
init_dabase method, creates (if don't exists yet) a new database with name <database_name>
param: database_name: A string with the name of the new database
return: Nothing
"""
try: # Get the list of databases for the current cluster
self.database_list = self.mongo_client.list_database_names()
self.last_op_status = f"Got the list of active databases: \n {self.database_list}"
except Exception as e:
self.last_op_status = f"Can't get the list of databases: \n {e}"
self.database_list = None
try:
if self.database_list is not None and database_name in self.database_list:
self.last_op_status = f"Database {database_name} already exists."
self.database = self.mongo_client.get_database(database_name)
else:
self.database = self.mongo_client[database_name]
self.last_op_status = f"Database <{database_name}> created successfully."
except Exception as e:
self.last_op_status = f"Something went wrong during database creation: \n {e}"
self.database = None
def init_collection(self, collection_name:str):
"""
init_collection method, initialize a collection if doesn't exists already otherwhise returns the existing one
param: collection_name: The name of the collection
return: Nothing
"""
try:
self.collections_list = self.database.list_collection_names()
except Exception as e:
self.last_op_status = f"Can't get the list of collection: \n {e}"
self.collection = None
self.collections_list = None
try:
if self.collections_list is not None and collection_name in self.collections_list:
self.last_op_status = f"Collection already exists."
self.collection = self.database.get_collection(collection_name)
else:
self.collection = self.database[collection_name]
self.last_op_status = f"Collection <{collection_name}> created successfully."
except Exception as e:
self.last_op_status = f"Something went wrong during collection creation: \n {e}"
self.collection = None
def init_documents(self, data:dict) -> None:
"""
init_documents method, inserts the documents into our collection taken from the given data
param: data: a dict containing all the data to load in the collection
return: Nothing
"""
try:
self.collection.insert_many(data) # [self.collection.insert_one(elem) for elem in data]
self.last_op_status = f"Documents loaded successfully."
except Exception as e:
self.last_op_status = f"Something went wrong during document insertion: \n {e}"
def clean_collection(self, collection_name: str) -> None:
"""
clean_collection method, remove all the documents of a collection
param: collection_name: A string containing the name of the collection.
return: Nothing
"""
if collection_name is not None: # Load the desired collection, if collection_name is empty use the last collection connected to the class
self.init_collection(collection_name)
if self.collection is not None:
if self.collection.count_documents({}) > 0: # Remove the old documents
self.collection.delete_many({})
self.last_op_status = f"Removed old files from the collection."
def init_cluster(self):
self.connect_to_cluster()
self.init_dabase(self.database_name)
self.init_collection(self.collection_name)
self.init_documents(self.data)
|
fabiobove-dr/mongo-db-interaction-utils
|
src/MongoUtils.py
|
MongoUtils.py
|
py
| 5,532 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71357251707
|
import socket
from _thread import *
import json
def global_logging(type_d, data):
if type_d == "v":
print("[*] VERBOSE :" + str(data))
elif type_d == "i":
print("[I] INFO :" + str(data))
elif type_d == 'd':
print("[D] DEBUG :" + str(data))
elif type_d == "a":
print("[A] AERLT :" + str(data))
elif type_d == "e":
print("[E] ERROR :" + str(data))
else:
print("로깅 예외")
class Menu:
def __init__(self):
self.menu = """Welcome, It's Server.\n\n 1. Start up server\n 2. Exit
"""
self.choice = None
def show_menu(self):
print(self.menu)
def select_menu(self):
self.choice = input("> ")
def return_choice(self):
return self.choice
class Server:
def __init__(self):
self.SERVER_IP = '127.0.0.1'
self.SERVER_PORT = 8820
self.server_data_buffer_size = 1024
self.server_socket = None
def __del__(self):
# Closing communication
if self.server_socket is not None:
self.server_socket.close()
def start_server(self):
global_logging("v", "Start up Server")
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
global_logging("v", "BIND IP")
self.server_socket.bind((self.SERVER_IP, self.SERVER_PORT))
self.server_socket.listen()
global_logging("i", "Server State : listening")
def thread_communication_main(self, client_socket, addr):
global_logging("i", ("Connected by :" + str(addr[0]) + ":" + str(addr[1])))
while True:
try:
data = client_socket.recv(self.server_data_buffer_size)
if not data:
global_logging("i", ("Disconnected by " + str(addr[0]) + ":" + str(addr[1])))
break
global_logging("i", ("Received from " + str(addr[0]) + ":" + str(addr[1]) + " data:" + data.decode()))
client_socket.send(data)
except ConnectionResetError as e:
global_logging("i", ("Disconnected by " + str(addr[0]) + ":" + str(addr[1])))
break
client_socket.close()
def run_loop(self):
while True:
global_logging("v", "Server State: Waiting for client")
client_socket, addr = self.server_socket.accept()
start_new_thread(self.thread_communication_main, (client_socket, addr))
def received_data(self):
pass
def parsing_data(self):
pass
if __name__ == "__main__":
menu = Menu()
server = Server()
ch = None
while True:
menu.show_menu()
menu.select_menu()
ch = menu.return_choice()
if ch == "1":
server.start_server()
server.run_loop()
elif ch == "2":
global_logging("v", "exit program bye!")
break
else:
pass
|
HyeonBell/Tool
|
python/simple_socket_server/server.py
|
server.py
|
py
| 3,051 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25069811529
|
"""TC:O(n*m),SC:O(m or n)"""
class Solution(object):
def matrixReshape(self, mat, r, c):
"""
:type mat: List[List[int]]
:type r: int
:type c: int
:rtype: List[List[int]]
"""
result = [[0 for _ in range(c)]for _ in range(r)]
if (r*c)!=(len(mat)*len(mat[0])):
return mat
else:
for i in range(r*c):
result[i//c][i%c]=mat[i//len(mat[0])][i%len(mat[0])]
return result
if __name__== "__main__":
mat = [[1,2],[3,4]]
r = 1
c = 4
a=Solution()
ans=a.matrixReshape(mat,r,c)
print(ans)
|
ankitarm/Leetcode
|
Python/566.Reshapethematrix.py
|
566.Reshapethematrix.py
|
py
| 705 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31263506941
|
from selenium import webdriver
from ordered_set import OrderedSet
import time
import os
import csv
os.system("cls")
f=open("99acres.csv","w",encoding='utf-8')
csv_writer= csv.writer(f)
csv_writer.writerow(['Project', 'specification', 'area','Value'])
driver = webdriver.Chrome('D:/virtualenvs_muthu/selenium_twitter/chromedriver_win32/chromedriver.exe')
os.system("cls")
scroll_list=OrderedSet()
for x in range(1,3):
driver.get(f"https://www.99acres.com/property-in-hadapsar-pune-ffid-page-{x}")
try:
mutiple_properties=driver.find_elements_by_class_name('srpTuple__tupleDetails')
time.sleep(2)
for elem in (mutiple_properties):
scroll_list.add(elem.text)
except:
continue
temp=list(scroll_list)
my_actual_list=[]
for x in temp:
xt=x.split("\n")
print(xt)
try:
if xt!=['']:
my_actual_list=[xt[2],xt[1],xt[4],xt[3]]
except:
temp_i=temp.index(x)
os.system("cls")
print("previous:")
print(temp[temp_i-1])
print("error:")
print(xt)
print(my_actual_list)
csv_writer.writerow(my_actual_list)
my_actual_list.clear()
f.close()
|
Muthu1612/Optimizing-real-estate-price-prediction-with-various-machine-learning-algorithms
|
Data extraction/extract_data.py
|
extract_data.py
|
py
| 1,450 |
python
|
en
|
code
| 1 |
github-code
|
6
|
10917250777
|
from collections import namedtuple
from backend.DBEntry import DBEntry, to_db_obj_name
from backend.Ingredient import Ingredient
class Recipe(DBEntry):
"""A recipe of a dish. Consists of ingredients with optional amount (in optional units)."""
table_main = "recipes"
associations = [
("user_meals","recipe_id", False),
("recipe_contents","recipe_id", True)
]
def __init__(self, name, instructions="", contents=set(), db=None, id=None):
"""Constructor. Returns functional object.
:param name: Inherits from DBEntry.
:param instructions: A string, instructios on how to cook the dish.
:param contents: A set of Content namedtuples - ingredients and their amounts
that comprise the dish.
:param db: Inherits from DBEntry.
:param id: Inherits from DBEntry.
"""
super().__init__(name=name, db=db, id=id)
self.instructions = instructions
self.contents = contents
@classmethod
def from_db(cls, db, id=None, name=None):
"""Search db for recipe entry by id or name (in that priority) and return constructed
object. Returns None if id is not found.
"""
recipe = super().from_db(db=db, id=id, name=name)
if not recipe:
return None
id = recipe.id
# Constructing contents set
needle = (id,)
db.c.execute(
'SELECT ingredient_id, amount, units FROM recipe_contents '
'WHERE recipe_id = ?',
needle
)
rows = db.c.fetchall()
contents = set()
for row in rows:
ingredient = Ingredient.from_db(db, row["ingredient_id"])
content = Content(ingredient, row["amount"], row["units"])
contents.add(content)
recipe.contents = contents
return recipe
def new_to_db(self):
"""Write a new recipe entry to the DB. Return id assigned by the DB."""
table_main = to_db_obj_name(self.table_main)
# Inserting values to the main table
recipe = (self.name, self.instructions)
self.db.c.execute(f'INSERT INTO "{table_main}" (name, instructions) VALUES (?, ?)',
recipe)
# Remembering id assigned by the DB
new_row_id = (self.db.c.lastrowid,)
self.db.c.execute(f'SELECT id FROM "{table_main}" WHERE rowid = ?', new_row_id)
row = self.db.c.fetchone()
id = row["id"]
# inserting contents to the associative table
contents = {(id, c.ingredient.id, c.amount, c.units) for c in self.contents}
self.db.c.executemany(
'INSERT INTO recipe_contents (recipe_id, ingredient_id, amount, units) '
' VALUES (?, ?, ?, ?)',
contents
)
return id
def edit_in_db(self):
"""Edit existing DB recipe to match current object state. Return number of
affected rows.
"""
table_main = to_db_obj_name(self.table_main)
rows_affected = 0
# Updating values to the main table
recipe = (self.name, self.instructions, self.id)
self.db.c.execute(f'UPDATE "{table_main}" SET name = ?, instructions = ? WHERE id = ?',
recipe)
rows_affected += self.db.c.rowcount
# Constructing sets of the recipe's old and new contents' ingredient ids
new_ingredient_ids = {c.ingredient.id for c in self.contents}
needle = (self.id,)
old_contents = self.db.c.execute('SELECT ingredient_id as id FROM recipe_contents WHERE '
'recipe_id = ?', needle).fetchall()
old_ingredient_ids = {c["id"] for c in old_contents}
# Removing contents missing in the new set
to_remove = {(self.id, i_id) for i_id in old_ingredient_ids - new_ingredient_ids}
self.db.c.executemany('DELETE FROM recipe_contents WHERE recipe_id = ? AND ingredient_id = ?',
to_remove)
rows_affected += self.db.c.rowcount
# Adding contents missing in the old set
new_contents = {c for c in self.contents
if c.ingredient.id in new_ingredient_ids - old_ingredient_ids}
to_add = {(self.id, c.ingredient.id, c.amount, c.units) for c in new_contents}
self.db.c.executemany(
'INSERT INTO recipe_contents (recipe_id, ingredient_id, amount, units) '
' VALUES (?, ?, ?, ?)',
to_add
)
rows_affected += self.db.c.rowcount
# Updating contents present in both the old and the new sets
updated_contents = self.contents - new_contents
to_update = {(c.amount, c.units, self.id, c.ingredient.id) for c in updated_contents}
self.db.c.executemany(
'UPDATE recipe_contents SET amount = ?, units = ? '
' WHERE recipe_id = ? AND ingredient_id = ?',
to_update
)
return rows_affected
@classmethod
def get_summary(cls, db, name_sort=False):
""""Return summary table for Recipe objects in DB as dictionary list.
id: recipe db_id.
name: recipe name.
instructions: instructions on how to prepare the dish.
contents: list of contents (ingredient name, amount, units).
dependents: number of other class entries referencing this id as a foreign key.
param name_sort: A boolean. If True, summary will be recursively sorted by
object name ascending.
"""
summary = []
# Get main table data
db.c.execute('SELECT id, name, instructions '
'FROM recipes '
'ORDER BY id ASC'
)
for db_row in db.c.fetchall():
row = {x: y for x, y in zip(db_row.keys(), db_row)}
summary.append(row)
# Get content lists
db.c.execute(
'SELECT recipe_contents.recipe_id, ingredients.name as ingredient, '
' recipe_contents.amount, recipe_contents.units '
'FROM recipe_contents '
'LEFT JOIN ingredients ON recipe_contents.ingredient_id = ingredients.id '
'ORDER BY recipe_id ASC'
)
db_rows = db.c.fetchall()
if db_rows:
it_summary = iter(summary)
s_row = next(it_summary)
for db_row in db_rows:
while not db_row["recipe_id"] == s_row["id"]:
# Ensure at least an empty 'cell' exists for this recipe before moving to next
try:
s_row["contents"]
except KeyError:
s_row["contents"] = []
s_row = next(it_summary)
content = {
"ingredient": db_row["ingredient"],
"amount" : db_row["amount"],
"units" : db_row["units"],
}
try:
s_row["contents"].append(content)
except KeyError:
s_row["contents"] = [content]
# Fill remaining rows with empty content lists
finished = False
while not finished:
try:
s_row = next(it_summary)
s_row["contents"] = []
except StopIteration:
finished = True
# Get dependents
db.c.execute(
'SELECT recipe_id, COUNT(user_id) as dependents FROM user_meals '
'GROUP BY recipe_id '
'ORDER BY recipe_id ASC'
)
db_rows = db.c.fetchall()
if db_rows:
it_summary = iter(summary)
s_row = next(it_summary)
for db_row in db_rows:
while not db_row["recipe_id"] == s_row["id"]:
# Set dependents = 0 for ingredients that don't exist in recipe_contents table
try:
s_row["dependents"]
except KeyError:
s_row["dependents"] = 0
s_row = next(it_summary)
s_row["dependents"] = db_row["dependents"]
# Fill remaining rows with dependents = 0
finished = False
while not finished:
try:
s_row = next(it_summary)
s_row["dependents"] = 0
except StopIteration:
finished = True
if name_sort:
summary.sort(key=lambda x: x["name"].lower())
for row in summary:
try:
row["contents"].sort(key=lambda x: x["ingredient"].lower())
except KeyError:
pass
return summary
def toJSONifiable(self):
dct = super().toJSONifiable()
contents_list = [item._asdict() for item in self.contents]
dct["contents"] = contents_list
return dct
# constructor validation from kindall"s answer at
# https://stackoverflow.com/a/42146452
ContentTuple = namedtuple("ContentTuple", "ingredient amount units")
class Content(ContentTuple):
"""Represents quantity or amount of a specific ingredient in a dish"""
__slots__ = ()
def __new__(cls, ingredient, amount, units=None):
try:
if not amount >= 0:
raise ValueError("amount must be a positive number")
except TypeError:
raise RuntimeError("amount must be a positive number")
return ContentTuple.__new__(cls, ingredient, amount, units)
def _replace(self, **kwargs):
try:
if not kwargs["amount"] >= 0:
raise ValueError("amount must be a positive number")
except ValueError:
raise RuntimeError("amount must be a positive number")
except KeyError:
pass
return super()._replace(**kwargs)
def toJSONifiable(self):
return self._asdict()
|
Longneko/demo_food_app
|
backend/Recipe.py
|
Recipe.py
|
py
| 10,035 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6951483307
|
def plot_sine_1D(amp,wavelength,phase):
x = np.arange(-500, 501, 1)
y = np.sin((2 * np.pi * x / wavelength)+phase)
plt.plot(x, y)
plt.show()
plot_sine_1D(1,300,0)
def plot_sine_2D(amp,wavelength,phase,angle):
x = np.arange(-500, 501, 1)
X, Y = np.meshgrid(x, x)
wavelength = 100
sine_2D = np.sin(
2*np.pi*(X*np.cos(angle) + Y*np.sin(angle)) / wavelength
)
plt.set_cmap("gray")
plt.imshow(sine_2D)
plot_sine_2D(1,200,0,np.pi)
def compute_fft(f):
ft = np.fft.fft2(f)
ft = np.fft.fftshift(ft)
return ft
sin = plot_sine_2D(1,200,0,np.pi,False)
ft = compute_fft(sin)
plt.xlim([480,520])
plt.ylim([520,480])
plt.imshow(abs(ft))
sin = plot_sine_2D(1,200,0,np.pi/6,False)
ft = compute_fft(sin)
plt.xlim([480,520])
plt.ylim([520,480])
plt.imshow(abs(ft))
f,ax = plt.subplots(1,2,figsize=(15,20))
ax = ax.flatten()
im = cv2.imread('/kaggle/input/randomimages/pic2.jpeg',-1)
im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
ax[0].imshow(im,cmap='gray')
ax[1].imshow(20*np.log(abs(compute_fft(im))),cmap='gray')
# Plotting image and its blurred version
f,ax = plt.subplots(1,2,figsize=(15,20))
ax = ax.flatten()
im = cv2.imread('../input/randomimages/pic1.jpeg',-1)
im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
im_blur = cv2.GaussianBlur(im,(7,7), 5, 5)
ax[0].imshow(im,cmap='gray')
ax[1].imshow(im_blur,cmap='gray')
def gaussian_filter(kernel_size,img,sigma=1, muu=0):
x, y = np.meshgrid(np.linspace(-1, 1, kernel_size),
np.linspace(-1, 1, kernel_size))
dst = np.sqrt(x**2+y**2)
normal = 1/(((2*np.pi)**0.5)*sigma)
gauss = np.exp(-((dst-muu)**2 / (2.0 * sigma**2))) * normal
gauss = np.pad(gauss, [(0, img.shape[0] - gauss.shape[0]), (0, img.shape[1] - gauss.shape[1])], 'constant')
return gauss
def fft_deblur(img,kernel_size,kernel_sigma=5,factor='wiener',const=0.002):
gauss = gaussian_filter(kernel_size,img,kernel_sigma)
img_fft = np.fft.fft2(img)
gauss_fft = np.fft.fft2(gauss)
weiner_factor = 1 / (1+(const/np.abs(gauss_fft)**2))
if factor!='wiener':
weiner_factor = factor
recon = img_fft/gauss_fft
recon*=weiner_factor
recon = np.abs(np.fft.ifft2(recon))
return recon
recon = fft_deblur(im_blur,7,5,factor=1)
plt.subplots(figsize=(10,8))
plt.imshow(recon,cmap='gray')
noise = np.random.rand(100,100)
noise_fft = np.fft.fft2(noise)
noise_fft = np.fft.fftshift(noise_fft)
f,ax = plt.subplots(1,2,figsize=(15,20))
ax = ax.flatten()
ax[0].imshow(noise,cmap='gray')
ax[0].set_title('Original Image')
ax[1].imshow(20*np.log(abs(compute_fft(noise_fft))),cmap='gray')
ax[1].set_title('Fourier Transform')
gauss = gaussian_filter(7,im,5)
gauss_fft = np.fft.fft2(gauss)
gauss_fft = np.fft.fftshift(gauss_fft)
f,ax = plt.subplots(1,2,figsize=(15,20))
ax = ax.flatten()
ax[0].imshow(gauss,cmap='gray')
ax[0].set_title('Original Image')
ax[1].imshow(np.abs(gauss_fft),cmap='gray')
ax[1].set_title('Fourier Transform')
f,ax = plt.subplots(1,2,figsize=(15,20))
recon = fft_deblur(im_blur,7,5,factor='wiener')
ax[0].imshow(im_blur,cmap='gray')
ax[1].imshow(recon)
ax[0].set_title('Blurry Image')
ax[1].set_title('Image reconstruction')
plt.show()
f,ax = plt.subplots(1,2,figsize=(15,20))
recon = fft_deblur(im_blur,7,5,factor='wiener',const=0.5)
ax[0].imshow(im_blur,cmap='gray')
ax[1].imshow(recon)
ax[0].set_title('Blurry Image')
ax[1].set_title('Image reconstruction')
plt.show()
|
rvats/PyRAVE
|
UnBlurImage.py
|
UnBlurImage.py
|
py
| 3,425 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73876831548
|
from os import path
from .dictbuilder import DictBuilder
# Dictionary is an in-memory representation of the dictionary of terms for a given corpus
# Internally, we represent the volume as a set, since that gives us O(1) access, which makes
# constructing an index much faster.
class Dictionary:
# key = dictionary path, value = set of terms
dictionary = {}
class __Dictionary:
def __init__(self, ctx):
self.ctx = ctx
self.terms = set()
if not path.exists(ctx.dict_path()):
DictBuilder(ctx).build()
with open(self.ctx.dict_path(), "r") as terms:
for term in terms:
self.terms.add(term.rstrip()) # remove trailing newline
def add(self, term):
self.terms.add(term)
def contains(self, term):
return term in self.terms
def __init__(self, ctx):
self.ctx = ctx
if ctx.dict_path() not in Dictionary.dictionary:
Dictionary.dictionary[ctx.dict_path()] = Dictionary.__Dictionary(ctx)
|
tsontario/minerva
|
pkg/dictionary/dictionary.py
|
dictionary.py
|
py
| 1,072 |
python
|
en
|
code
| 2 |
github-code
|
6
|
39991774220
|
import os
from glob import glob
from pathlib import Path
SOURCE_DIRECTORY = "CSVdatabases"
TARGET_DIRECTORY = "cleaned_databases"
CURRENT_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
DATABASES_FOLDER = f"{CURRENT_DIRECTORY}/{SOURCE_DIRECTORY}"
CLEANED_FOLDER = f"{CURRENT_DIRECTORY}/{TARGET_DIRECTORY}"
def clean_file_contents(contents: list[str]):
return contents[4:-2]
def clean_all_databases():
database_files = glob(f"{DATABASES_FOLDER}/**/*.csv")
for raw_data_file in database_files:
target_directory = os.path.dirname(raw_data_file).replace(
SOURCE_DIRECTORY, TARGET_DIRECTORY)
target_file = raw_data_file.replace(SOURCE_DIRECTORY, TARGET_DIRECTORY)
if not os.path.exists(target_directory):
os.makedirs(target_directory)
with open(raw_data_file, 'r') as source:
with open(target_file, 'w') as destination:
destination.writelines(clean_file_contents(source.readlines()))
def remove_empty_databases():
cleaned_data_files = glob(f"{CLEANED_FOLDER}/**/*.csv")
for cleaned_data_file in cleaned_data_files:
delete = False
with open(cleaned_data_file, 'r') as f:
if len(f.readlines()) < 2:
delete = True
if delete:
Path(cleaned_data_file).unlink()
if __name__ == '__main__':
clean_all_databases()
remove_empty_databases()
|
DekunZhang/UCL_IFRC_IXN_PUBLIC
|
DesInventar/data_cleaner.py
|
data_cleaner.py
|
py
| 1,417 |
python
|
en
|
code
| 0 |
github-code
|
6
|
44904287123
|
import torch
from experiment import Experiment, AutoDateSet, train, get_args
from model.Tree import Tree
from dataset.criteo import CriteoDataset
def get_model(args):
model = Tree(args)
return model
def get_dataset(args):
dataset = CriteoDataset(dataset_path=args.dataset_paths[0])
train_length = int(len(dataset) * 0.9)
valid_length = len(dataset) - train_length
train_dataset, valid_dataset = torch.utils.data.random_split(dataset, (train_length, valid_length))
test_dataset = CriteoDataset(dataset_path=args.dataset_paths[1])
return [train_dataset, valid_dataset, test_dataset], dataset.field_dims
def main():
args = get_args()
args.hidden= [1024,512,256,128,64]
args.embed_dim = 16
args.activation='relu'
args.label='y'
args.num_class = 2
args.dataset_paths= ['data/criteo/train.txt', 'data/criteo/test.txt']
datasets, field_dims= get_dataset(args)
print(field_dims)
args.field_dims = field_dims
model = get_model(args)
experiment = Experiment(model=model, args=args)
data = AutoDateSet(datasets, args.batch_size, args.batch_size, args.num_workers, args.pin_memory)
train(args, experiment, data)
if __name__ == "__main__":
main()
|
Jarlene/Experiment
|
tree_main.py
|
tree_main.py
|
py
| 1,237 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74416657149
|
# Ten program dzieli jedną liczbę przez drugą.
def main():
# Pobranie dwóch liczb.
num1 = int(input('Podaj liczbę : '))
num2 = int(input('Podaj następną liczbę: '))
# Jeżeli wartość num2 jest inna niż 0, następuje
# podzielenie num1 przez num2 i wyświetlenie wyniku.
if num2 != 0:
result = num1 / num2
print(num1, 'dzielone przez', num2, 'daje', result)
else:
print('Nie można dzielić przez zero.')
# Wywołanie funkcji main().
main()
|
JeanneBM/Python
|
Owoce Programowania/R06/06. Division2.py
|
06. Division2.py
|
py
| 506 |
python
|
pl
|
code
| 0 |
github-code
|
6
|
18598085905
|
from __future__ import annotations
from .base import BaseDAO
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from dto import PlayerDTO
class PlayerDAO(BaseDAO):
def create(self, data: PlayerDTO) -> None:
self._db_gateway.cursor.execute("INSERT INTO profiles (phone, username, description, age, height, weight) "
"VALUES (?, ?, ?, ?, ?, ?);",
(data.profile.phone, data.profile.username, data.profile.description,
data.profile.age, data.profile.height, data.profile.weight))
profile_id = self._db_gateway.cursor.lastrowid
self._db_gateway.cursor.execute("INSERT INTO players (email, name, surname, country_id, sport_type_id, "
"team_id, profile_id) VALUES (?, ?, ?, ?, ?, ?, ?);",
(data.email, data.name, data.surname, data.country_id, data.sport_type_id,
data.team_id, profile_id))
self._db_gateway.connection.commit()
def get_ids_list(self) -> list[int]:
result = self._db_gateway.cursor.execute("SELECT id FROM players;")
return result.fetchall()
def get_list(self) -> list[tuple]:
result = self._db_gateway.cursor.execute("SELECT players.id, players.name, players.surname, profiles.age, countries.name, teams.name "
"FROM players JOIN profiles ON players.profile_id = profiles.id JOIN countries ON "
"players.country_id = countries.id JOIN teams ON players.team_id = teams.id;")
return result.fetchall()
|
pyteacher123/py35-onl
|
sports_db_project/data_access/dao/player.py
|
player.py
|
py
| 1,747 |
python
|
en
|
code
| 2 |
github-code
|
6
|
2298576928
|
#!/usr/bin/env python
import sys
import logging
import time
import argparse
import pyBigWig
import math
import numpy as np
from sklearn import preprocessing
from MSTS.version import __version__
from MSTS.Parser.SimpleGffParser import SimpleGffParser
from MSTS.Db.FeatureDB import FeatureDB
from MSTS.Graphics import Graphics
def getCoordinatesFreeFromOtherGenes(lFeatures,feature,start, pivot='start'):
"...."
startNew = start
endNew = start
for feat in lFeatures:
if feature.strand == 1:
if pivot == 'start':
if feat.end <= feature.start and feat.end > startNew:
startNew = feat.end
elif pivot == 'end':
if feat.start >= feature.end and feat.start < endNew:
endNew = feat.start
elif feature.strand == -1:
if pivot == 'start':
if feat.start >= feature.end and feat.start < startNew:
startNew = feat.start
elif pivot == 'end':
if feat.end <= feature.start and feat.end > endNew:
endNew = feat.end
if pivot == 'start':
return startNew
elif pivot == 'end':
return endNew
def getBasesOverlappingOtherGenes(lOverlappingFeatures,Start, End, regionStart, regionEnd):
"""...."""
lOtherFeatures = []
for feat in lOverlappingFeatures:
if feat.start <= Start and feat.end >= End:
pass
else:
lOtherFeatures.append(feat)
dBases = {}
for val in range(regionStart, regionEnd):
dBases[val] = 0
for feat in lOtherFeatures:
for base in range(feat.start,feat.end):
if base in dBases:
dBases[base] += 1
return [value for (key,value) in sorted(dBases.items())]
def gaussianSmoothing(data, windowWidth=3, stdev=20):
"""smoothing"""
windowWidth = windowWidth
stdev = stdev
filter = [None]*(2*windowWidth*stdev+1)
sumt = 0.0;
for i in range(0,len(filter)):
x = float(i - 3 * stdev)
value = math.exp(-(x * x) / (2 * stdev * stdev))
filter[i] = value
sumt += value
for i in range(0,len(filter)):
filter[i] /=sumt
smoothed = [0]*len(data)
ltmp = [0.0]*(windowWidth*stdev)
data.extend(ltmp)
ltmp.extend(data)
data = ltmp
for i in range(0,len(smoothed)):
for j in range(0,len(filter)):
smoothed[i] += data[i + j] * filter[j]
smoothed[0:windowWidth*stdev] = [np.nan]*(windowWidth*stdev)
smoothed[-(windowWidth*stdev):] = [np.nan]*(windowWidth*stdev)
return smoothed
def readFeatureIds(lIdsFile):
"""Read feature Ids from file"""
lIds = []
try:
with open(lIdsFile, 'r') as f:
for line in f:
line = line.rstrip()
if line:
lIds.append(line)
f.close()
logging.info("{} features to analyze".format(len(lIds)))
except Exception as e:
logging.error("Can not read feature file: {}".format(lIdsFile))
sys.exit(1)
return lIds
def getMeanValue(bw):
"""Normalize with mean"""
lValues = []
for chrom in db.selectReferences():
logging.info('Requesting genes in sequence: {}'.format(chrom))
lFeatures = db.selectFeatureTypeFromReference(chrom, featType)
for feat in lFeatures:
values = bw.values(chrom,feat.start,feat.end)
lValues.extend(values)
return np.mean(lValues)
if __name__ == "__main__":
program = sys.argv[0]
version = __version__
description = 'todo, \
...'
parser = argparse.ArgumentParser(prog=program)
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--version', action='version', version='{} {}'.format(program,version))
parser.add_argument("bigWig", help="Input bigWig File", type=str)
parser.add_argument("gff3", help="Input genome annotation in gff3 format", type=str)
parser.add_argument("-wb","--windowBefore", help="window size to analyze before the feature, default=1000", type=int, default=1000)
parser.add_argument("-wa","--windowAfter", help="window size to analyez after the feature, default=1000", type=int, default=1000)
parser.add_argument("-ft","--featureType", help="feature type to analyze, default=gene", type=str, default='gene')
parser.add_argument("-p", "--pivot", help="feature bound to use, default=start, possible values=[start,end]",type=str,default='start')
parser.add_argument("--context", help="if set, defined features in context matter", action="store_true", default=False)
parser.add_argument("-o", "--out", help="name of output graph", type=str, default="graph.png")
parser.add_argument("-t", "--title", help="title text", type=str, default="title")
parser.add_argument("-x", "--xax", help="x axis text", type=str, default="window, bp")
parser.add_argument("-y", "--yax", help="y axis text", type=str, default="nb bases")
parser.add_argument("-z", "--zax", help="z axis text", type=str, default="signal coverage")
parser.add_argument("-d", "--sqliteDB", help="provide sqlite DB to avoid insertion, usefull for multi-analysis", type=str, default=None)
parser.add_argument("-n", "--noDeleteDB", help="Do not delete SQLite DB", action="store_true", default=False)
parser.add_argument("-s","--GaussianSmoothing", help="Perform Gaussian Smoothing on data, ", action="store_true", default=False)
parser.add_argument("-w","--windowWidth", help="window size for Gaussian smoothing, default=3", type=int, default=3)
parser.add_argument("-sd","--stdev", help="stdev for Gaussian smoothing, default=20", type=int, default=20)
parser.add_argument("--norm", help="Normalize signal value with the average signal of all features of the same type", action="store_true", default=False)
parser.add_argument("--flush",help="print phases on stdout to save in file, > phases.out", action="store_true", default=False)
parser.add_argument("-l","--lIds", help="txt file with ID list (one ID per line), limit phasogram to the features specified in the file. Features must be of the same type as featureType", type=str, default=None)
parser.add_argument("--heatmap", help="export coresponding heatmap", action="store_true", default=False)
parser.add_argument("--heatmapsmooth", help="Perform Gaussian smoothing on each feature (time consuming)", action="store_true", default=False)
parser.add_argument("--keepLIdOrder", help="export heatmap with respect to LIds order if supplied", action="store_true", default=False)
parser.add_argument("-v", "--verbosity", type=int, choices=[1,2,3],
help="increase output verbosity 1=error, 2=info, 3=debug")
args = parser.parse_args()
logLevel='ERROR'
if args.verbosity == 1:
logLevel = 'ERROR'
if args.verbosity == 2:
logLevel = 'INFO'
if args.verbosity == 3:
logLevel = 'DEBUG'
logging.getLogger().setLevel(logLevel)
featType = args.featureType
featTypeContext = featType
pivot = args.pivot
if pivot not in ['start','end']:
logging.error("Provided pivot: '{}' not allowed, choose 'start' or 'end'".format(pivot))
sys.exit(1)
context = args.context
if not args.sqliteDB:
logging.info("Parsing gff3 gene file")
parser = SimpleGffParser(args.gff3, logLevel)
lFeatures = []
for feat in parser.parse():
lFeatures.append(feat)
logging.info("{} features parsed".format(parser.nbFeatures))
logging.info("Inserting features in SQLite DB")
timestamp = int(time.time())
db = FeatureDB('sqlite-{}.db'.format(timestamp),False,logLevel)
db.insertlFeatures(lFeatures)
logging.info("Insertion done")
else:
logging.info("Using {} file as SQLite db".format(args.sqliteDB))
db = FeatureDB(args.sqliteDB,noCreate=True,logLevel=logLevel)
bw = pyBigWig.open(args.bigWig)
winBefore = args.windowBefore
winAfter = args.windowAfter
lPhases = [0]*(1+winBefore+winAfter)
lAllPhases = []
lPhasesNb = [0]*(1+winBefore+winAfter)
lOtherGenesNb = [0]*(1+winBefore+winAfter)
lIds = []
d = {}
if args.lIds:
lIds = readFeatureIds(args.lIds)
#lIds = ['Avrlm3_Lema_T200610.1', 'lm_SuperContig_0_v2_lmctg_0007_v2_egn4_orf_Lema_T000020.1']
for chrom in db.selectReferences():
logging.info('Requesting genes in sequence: {}'.format(chrom))
if len(lIds) > 0:
lFeatures = db.selectFeatureFromIdListAndType(chrom, lIds, featType)
else:
lFeatures = db.selectFeatureTypeFromReference(chrom, featType)
nbAnalyzedFeat = len(lAllPhases)
for ftIdx, feat in enumerate(lFeatures):
#print(feat.id)
if args.keepLIdOrder:
d[feat.id] = ftIdx+nbAnalyzedFeat
lAllPhases.append([0]*(1+winBefore+winAfter))
if feat.strand == 1:
if pivot == 'start':
start = max(1,feat.start-winBefore)
end = min(bw.chroms(chrom),feat.start+winAfter)
elif pivot == 'end':
start = max(1,feat.end-winBefore)
end = min(bw.chroms(chrom),feat.end+winAfter)
elif feat.strand == -1:
if pivot == 'start':
start = min(bw.chroms(chrom),feat.end+winBefore)
end = max(1,feat.end-winAfter)
elif pivot == 'end':
start = min(bw.chroms(chrom),feat.start+winBefore)
end = max(1,feat.start-winAfter)
else:
logging.error("Cannot perform analysis on feature witout strand")
sys.exit(1)
startNew = start
endNew = end
if pivot == 'start':
lOverlappingFeatures = db.selectFeatureTypeFromCoordinates(featTypeContext,chrom,min(start,end),max(start, end))
if context:
startNew = getCoordinatesFreeFromOtherGenes(lOverlappingFeatures,feat,start)
elif pivot == 'end':
lOverlappingFeatures = db.selectFeatureTypeFromCoordinates(featTypeContext,chrom,min(start,end),max(start, end))
if context:
endNew = getCoordinatesFreeFromOtherGenes(lOverlappingFeatures,feat,end,pivot='end')
index = 0
lOtherGenesBases = []
if feat.strand == 1:
if pivot == 'start':
lValues = bw.values(chrom,startNew-1,end)
decal = 0
if context:
lValues = bw.values(chrom,startNew-1,feat.end)
decal = (startNew-start)
for i in range(0+decal,min(len(lValues)+decal,winBefore+winAfter+1)):
lPhases[i] += lValues[i-decal]
lAllPhases[ftIdx+nbAnalyzedFeat][i] = lValues[i-decal]
lPhasesNb[i] += 1
index = i
for i in range(0,0+decal):
lAllPhases[ftIdx+nbAnalyzedFeat][i] = np.nan
for i in range(min(len(lValues)+decal,winBefore+winAfter+1), winBefore+winAfter+1):
lAllPhases[ftIdx+nbAnalyzedFeat][i] = np.nan
lOtherGenesBases = getBasesOverlappingOtherGenes(lOverlappingFeatures,feat.start,feat.end,start,end)
elif pivot == 'end':
lValues = bw.values(chrom,start-1,endNew)
decal=0
if context:
lValues = bw.values(chrom,feat.start-1,endNew)
decal = max((winBefore-(feat.end-feat.start)),0)
for i in range(decal,min(decal+len(lValues),winBefore+winAfter+1)):
lPhases[i] += lValues[i-decal]
lAllPhases[ftIdx+nbAnalyzedFeat][i] = lValues[i-decal]
lPhasesNb[i] += 1
index = i
for i in range(0,decal):
lAllPhases[ftIdx+nbAnalyzedFeat][i] = np.nan
for i in range(min(len(lValues)+decal,winBefore+winAfter+1), winBefore+winAfter+1):
lAllPhases[ftIdx+nbAnalyzedFeat][i] = np.nan
lOtherGenesBases = getBasesOverlappingOtherGenes(lOverlappingFeatures,feat.start,feat.end,start,end)
elif feat.strand == -1:
if pivot == 'start':
lValues = bw.values(chrom,end,startNew)
decal = 0
if context:
lValues = bw.values(chrom,feat.start-1,startNew)
decal = (startNew-start)
for i in range(-1+decal,max(-len(lValues)+decal-1,(-winAfter)+(-winBefore)+(-1)-1),-1):
lPhases[-i-1] += lValues[i-decal]
lAllPhases[ftIdx+nbAnalyzedFeat][-i-1] = lValues[i-decal]
lPhasesNb[-i-1] += 1
index = i
for i in range(-1,-1+decal, -1):
lAllPhases[ftIdx+nbAnalyzedFeat][-i-1] = np.nan
for i in range( max(-len(lValues)+decal-1,(-winAfter)+(-winBefore)+(-1)-1),(-winAfter)+(-winBefore)+(-1)-1 ,-1):
lAllPhases[ftIdx+nbAnalyzedFeat][-i-1] = np.nan
lOtherGenesBases = getBasesOverlappingOtherGenes(lOverlappingFeatures,feat.start,feat.end,end,start)[::-1]
elif pivot == 'end':
lValues = bw.values(chrom,endNew-1,start)
decal = 0
if context:
lValues = bw.values(chrom,endNew-1,feat.end)
decal = max((winBefore-(feat.end-feat.start)),0)
for i in range(-1-decal,max(-len(lValues)-decal,(-winAfter)+(-winBefore)+(-1)-1), -1):
lPhases[-i-1] += lValues[i+decal]
lAllPhases[ftIdx+nbAnalyzedFeat][-i-1] = lValues[i-decal]
lPhasesNb[-i-1] += 1
index = i
for i in range(-1,-1+decal, -1):
lAllPhases[ftIdx+nbAnalyzedFeat][-i-1] = np.nan
for i in range( max(-len(lValues)+decal-1,(-winAfter)+(-winBefore)+(-1)-1),(-winAfter)+(-winBefore)+(-1)-1 ,-1):
lAllPhases[ftIdx+nbAnalyzedFeat][-i-1] = np.nan
lOtherGenesBases = getBasesOverlappingOtherGenes(lOverlappingFeatures,feat.start,feat.end,end,start)[::-1]
else:
pass
for i in range(0,len(lOtherGenesBases)):
lOtherGenesNb[i] += lOtherGenesBases[i]
# print(lPhases)
lAveragePhases = [0]*(1+winBefore+winAfter)
# print(len(d.keys()))
# print(len(lIds))
if args.keepLIdOrder:
lSortedAllPhases = []
for i in lIds:
#print(i)
lSortedAllPhases.append(lAllPhases[d[i]])
lAllPhases = lSortedAllPhases
# for idx,feature_phase in enumerate(lAllPhases):
# lSmoothedAllPhases.append(gaussianSmoothing(feature_phase, args.windowWidth, args.stdev))
# if not (idx+1)%1000:
# logging.info("{} feature values smoothed on {}".format(idx+1, len(lAllPhases)))
# heatvalues = np.array(lSmoothedAllPhases, dtype=np.float)
heatvalues = np.array(lAllPhases, dtype=np.float64)
lAllPhasesNorm = []
for u,s in enumerate(lAllPhases):
mi= np.nanmin(s)
ma = np.nanmax(s)
l = []
for x in s:
if np.isnan(np.array([x])):
l.append(np.nan)
else:
l.append((x-mi*1.0)/(ma-mi))
lAllPhasesNorm.append(l)
heatvalues = np.array(lAllPhasesNorm, dtype=np.float)
lSmoothedAllPhases = []
if args.heatmapsmooth:
heatvaluesNoNaN = np.nan_to_num(heatvalues)
for idx,feature_phase in enumerate(heatvaluesNoNaN):
lSmoothedAllPhases.append(gaussianSmoothing(list(feature_phase), args.windowWidth, args.stdev))
if not (idx+1)%1000:
logging.info("{} feature values smoothed on {}".format(idx+1, len(lAllPhases)))
for i,s in enumerate(heatvalues):
for j,x in enumerate(s):
if np.isnan(x):
lSmoothedAllPhases[i][j] = np.nan
for a,b in enumerate(lPhases):
if lPhasesNb[a] != 0:
lAveragePhases[a] = lPhases[a]/lPhasesNb[a]
if args.norm:
mean = getMeanValue(bw)
lAveragePhases = [v/mean for v in lAveragePhases]
logging.info("Normalizing values with average coverage of {}: {}".format(args.featureType,mean))
if args.GaussianSmoothing:
logging.info("Smoothing data with Gaussian blur, window: {}, stdev: {}".format(args.windowWidth, args.stdev))
lAveragePhases = gaussianSmoothing(lAveragePhases, args.windowWidth, args.stdev)
lenFilter = 2*args.windowWidth*args.stdev+1
if args.noDeleteDB:
logging.info('SQLite db: {} not removed'.format(db.getDbFileName()))
else:
logging.info('SQLite db: {} removed'.format(db.getDbFileName()))
db.deleteDB()
logging.info("Drawing graph in {}".format(args.out))
if args.GaussianSmoothing:
Graphics.plotDistributionWithGeneHistogram([x for x in range(-winBefore,winAfter+1)],lAveragePhases[0:(winBefore+winAfter+1)],lPhasesNb[0:(winBefore+winAfter+1)],lOtherGenesNb[0:(winBefore+winAfter+1)],out=args.out, title=args.title, xax=args.xax, yax=args.yax, yax2=args.zax)
else:
Graphics.plotDistributionWithGeneHistogram([x for x in range(-winBefore,winAfter+1)],lAveragePhases[0:(winBefore+winAfter+1)],lPhasesNb[0:(winBefore+winAfter+1)],lOtherGenesNb[0:(winBefore+winAfter+1)],out=args.out, title=args.title, xax=args.xax, yax=args.yax, yax2=args.zax)
if args.heatmap:
if args.heatmapsmooth:
Graphics.plotHeatmap(lSmoothedAllPhases, out="heatmap-{}".format(args.out), title="Heatmap - {}".format(args.title), xax="position bp", yax="#features", mi=args.windowBefore, ma=args.windowAfter)
else:
Graphics.plotHeatmap(heatvalues, out="heatmap-{}".format(args.out), title="Heatmap - {}".format(args.title), xax="position bp", yax="#features", mi=args.windowBefore, ma=args.windowAfter)
if args.flush:
for x in range(0,winBefore+winAfter+1):
print("{}\t{}".format(x-winBefore,lAveragePhases[x]))
|
nlapalu/MSTS
|
bin/MSTS_feature_phasogram.py
|
MSTS_feature_phasogram.py
|
py
| 18,845 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6365552957
|
import threading
from flask import Flask, jsonify, Response
import asyncio
from camera import Camera
from websocketServer import VideoWs
from flask_cors import CORS
from config import ip, http_port
from queue import Queue
app = Flask(__name__)
CORS(app)
camera = Camera()
event = threading.Event()
queue = Queue()
@app.route('/open')
def open_camera():
open_ws_conn()
dic = {
'code': 1,
'msg': 'open camera success'
}
return jsonify(dic)
@app.route('/close')
def close_camera():
camera.close_camera()
return jsonify({
'code': 1
})
def open_ws_conn():
openWs()
def run():
print('执行')
app.run(host=ip, port=http_port)
def main():
flask_thread = threading.Thread(target=run, name='flask thread')
flask_thread.start()
def openWs():
t = threading.Thread(target=openws1, name='ws thread', args=(1, event, queue))
t1 = threading.Thread(target=openVideo1, name='video thread', args=(1, event, queue))
t.start()
t1.start()
def openws1(args, event, queue):
openws2(args, event, queue).send(None)
def openws2(args, event, queue):
asyncio.run(VideoWs.start(args, event, queue))
def openVideo1(args, event, queue):
openVideo(args, event, queue).send(None)
def openVideo(args, event, queue):
asyncio.run(camera.open_camera(args, event, queue))
if __name__ == '__main__':
main()
|
l5769389/py-vue-ws-camera
|
router.py
|
router.py
|
py
| 1,400 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27215468512
|
# # Dependency for album cover download
# from urllib import request
#
# thumbnail_path = "https://i.ytimg.com/vi/uijHb5U1pD8/default.jpg"
# album_cover = request.urlretrieve(thumbnail_path, "albumcover.jpg")
import requests
f = open('image.jpg', 'wb')
f.write(requests.get('https://i.ytimg.com/vi/uijHb5U1pD8/default.jpg').content)
f.close()
|
iostate/PythonProjects
|
DownloadMP4/open_image.py
|
open_image.py
|
py
| 343 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23753938918
|
# -*- coding: utf-8 -*-
'''
Created on 2019��3��5��
@author: Administrator
'''
def is_palindrome(n):
str_n = str(n)
half_length = len(str_n)/2
for i in range(half_length):
if str_n[i] != str_n[len(str_n)-1-i]:
return False
return True
if __name__ == '__main__':
output = filter(is_palindrome, range(1, 1000))
print('1~1000:', list(output))
if list(filter(is_palindrome, range(1, 200))) == [1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 22, 33, 44, 55, 66, 77, 88, 99, 101, 111, 121, 131, 141, 151, 161, 171, 181, 191]:
print('测试成功!')
else:
print('测试失败!')
|
chptcleo/PythonPractice
|
com/list/palindrome/palindrome_num.py
|
palindrome_num.py
|
py
| 637 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71897328507
|
import os
import time
import psutil
def run_excel():
while True:
os.system(f'start excel possible_duplicate_people.xlsx')
while True:
time.sleep(1)
excel_running = False
for process in psutil.process_iter(attrs=['pid', 'name']):
if "EXCEL.EXE" in process.info['name']:
excel_running = True
break
if not excel_running:
break
break
print("Excel file has been closed. Now, running additional code.")
|
FrontendPony/XML-parser
|
open_excel.py
|
open_excel.py
|
py
| 550 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72489782587
|
# This program computes hardcoded values with 4th-order Rutta-Kunge for testing.
# It takes reference from the below link, but is modified to converge on a solution
# and uses two differentials instead of one.
# https://www.codesansar.com/numerical-methods/runge-kutta-fourth-order-rk4-python-program.htm
import math
def f(a,u,z):
return z
def g(a,u,z): #du/da
return -u + (3/2)*pow(u,2)
def rk4(a0,u0,z0,an,n):
h = (an-a0)/n
for i in range(n):
k1 = h * (f(a0, u0, z0))
l1 = h * (g(a0, u0, z0))
k2 = h * (f((a0+h/2), (u0+k1/2), (z0+l1/2)))
l2 = h * (g((a0+h/2), (u0+k1/2), (z0+l1/2)))
k3 = h * (f((a0+h/2), (u0+k2/2), (z0+l2/2)))
l3 = h * (g((a0+h/2), (u0+k2/2), (z0+l2/2)))
k4 = h * (f((a0+h), (u0+k3), (z0+l3)))
l4 = h * (g((a0+h), (u0+k3), (z0+l3)))
k = (k1+2*k2+2*k3+k4)/6
l = (l1+2*l2+2*l3+l4)/6
un = u0 + k
print('%.4f\t%.4f\t%.4f\t%.4f\t(%.4f)'% (a0,u0,z0,un,1/un) )
print('-------------------------')
u0 = un
a0 = a0+h
z0 = z0 + l
print('\nAt a=%.4f, u=%.4f, z=%.4f' %(an,un,z0))
print('1 / un = %.4f' % (1/un))
def rk4_step(a,u,z,h):
k1 = h * (f(a, u, z))
l1 = h * (g(a, u, z))
k2 = h * (f((a+h/2), (u+k1/2), (z+l1/2)))
l2 = h * (g((a+h/2), (u+k1/2), (z+l1/2)))
k3 = h * (f((a+h/2), (u+k2/2), (z+l2/2)))
l3 = h * (g((a+h/2), (u+k2/2), (z+l2/2)))
k4 = h * (f((a+h), (u+k3), (z+l3)))
l4 = h * (g((a+h), (u+k3), (z+l3)))
k = (k1+2*k2+2*k3+k4)/6
l = (l1+2*l2+2*l3+l4)/6
return a+h,u+k,z+l
def rk4_r0(a0,u0,z0,u_max,da_target):
"""
Use 4th-order Rutta-Kunge with an adaptive stepsize to find
the u=1/r_0 for du/da = g(a, u, z).
"""
MIN_ITER = 30
MAX_ITER = 100
h = 0.5
al, ul = a0, u0
for i in range(MAX_ITER): #max iterations
a0,u0,z0 = rk4_step(a0,u0,z0,h)
print(f"{i: >2}: {a0:.3f} {u0:.5f} {z0:.5f}")
if u0>u_max:
return a0,-1,z0
if i>MIN_ITER and abs(al-a0)<da_target:
return a0,u0,z0
if ul>u0:
h = -h/2
al, ul = a0, u0
return a0,u0,z0
b = 3
print(f"Impact parameter (b) = {b}")
a0,u0,z0 =0,0,1/b
print(" ai ui zi")
an,un,zn = rk4_r0(a0,u0,z0,10,0.00000001)
print("done:")
print(f"{an:.10f}\t{un:.10f}\t{zn:.3f}")
|
curz46/bholetrace
|
rk4_r0.py
|
rk4_r0.py
|
py
| 2,381 |
python
|
en
|
code
| 1 |
github-code
|
6
|
11740151374
|
'''
Author: Ganesh Manal
Example code: Check if text is palindrome
'''
def is_palindrome(input_string):
'''
check if input text is palindrome
Input: text
Output: boolean - True if text is palindrome
'''
start_index, last_index = 0, len(input_string)-1
while start_index <= last_index:
if input_string[start_index].lower() != input_string[last_index].lower():
return False
start_index += 1
last_index -= 1
return True
if __name__ == "__main__":
text_inputs = [
"madam",
"racecar",
"level",
"mom",
"rotator",
"wow",
"No lemon, no melon"
]
for text in text_inputs:
RESULT = is_palindrome(text.replace(" ", ""))
print(f"\ninput string: {text} \nis palindrome: {RESULT}")
|
GaneshManal/TestCodes
|
python/training/module-01/03-check_palindrome.py
|
03-check_palindrome.py
|
py
| 823 |
python
|
en
|
code
| 2 |
github-code
|
6
|
12161841216
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 26 14:10:45 2021
@author: manssandsjo
"""
import matplotlib.animation as ani
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
lstm_lw = pd.read_pickle('./pickle_graphs/totValueLSTM_LW.pkl') #.to_numpy()
lstm_scm = pd.read_pickle('./pickle_graphs/totValueLSTM_SCM.pkl') #.to_numpy()
sma_scm = pd.read_pickle('./pickle_graphs/totValueSMA_SCM.pkl') #.to_numpy()
df = pd.DataFrame(columns=['SMA_SCM','LSTM_LW','LSTM_SCM'])
df['LSTM_LW'] = lstm_lw.iloc[:,0]
df['LSTM_SCM'] = lstm_scm.iloc[:,0]
df['SMA_SCM'] = sma_scm.iloc[:,0]
color = ['red', 'green', 'blue']
fig = plt.figure()
plt.subplots_adjust(bottom = 0.2, top = 0.9) #ensuring the dates (on the x-axis) fit in the screen
plt.ylabel('Total value of portfolio, SEK')
plt.xlabel('Year')
def buildmebarchart(i=int):
plt.legend(df.columns)
p = plt.plot(df[:i].index, df[:i].values) #note it only returns the dataset, up to the point i
for i in range(0,3):
p[i].set_color(color[i]) #set the colour of each curve
anim = ani.FuncAnimation(fig, buildmebarchart, interval = 100)
from matplotlib import rc
# equivalent to rcParams['animation.html'] = 'html5'
rc('animation', html='html5')
anim
|
MansSandsjo/StockBot
|
src/Data/testGraphs.py
|
testGraphs.py
|
py
| 1,267 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10012294494
|
#!/usr/bin/python
# coding=utf-8
import paramiko
import xlrd
import time
import os
import handle_file
server_info_list = handle_file.read_excel_xlsx('documents/server_info.xlsx', 'Sheet1')
Host = server_info_list[0][0]
Port = server_info_list[0][1]
Username = server_info_list[0][2]
Password = server_info_list[0][3]
def ssh_exec_cmd():
'''ssh远程登录:windows客户端连接Linux服务器,并输入指令'''
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
#连接Linux服务器
#ssh.connect('10.20.24.89', 22, 'root', 'Cfcs2@07380')
ssh.connect(Host, Port, Username, Password)
#执行Linux相关命令
#不管环境上是否存在kafka进程,都kill -9 kafka进程
stdin, stdout, stderr = ssh.exec_command("ps -ef | grep kafka | grep -v grep | awk '{print $2}' | xargs kill -9")
#不管环境上是否存在zookeeper进程,都kill -9 zookeeper进程
stdin, stdout, stderr = ssh.exec_command("ps -ef | grep zookeeper | grep -v grep | awk '{print $2}' | xargs kill -9")
#不管环境上是否存在redis进程,都kill -9 redis进程
stdin, stdout, stderr = ssh.exec_command("ps -ef | grep redis | grep -v grep | awk '{print $2}' | xargs kill -9")
#不管环境上是否存在nginx进程,都kill -9 nginx进程
stdin, stdout, stderr = ssh.exec_command("ps -ef | grep nginx | grep -v grep | awk '{print $2}' | xargs kill -9")
"""
解压zookeeper安装包,并启动zookeeper服务
"""
stdin, stdout, stderr = ssh.exec_command("cd /usr/local && rm -rf zookeeper " )
stdin, stdout, stderr = ssh.exec_command("cd /usr/local && unzip -o zookeeper-3.4.12.zip -d /usr/local ")
time.sleep(3)
stdin, stdout, stderr = ssh.exec_command("cd /usr/local && mv zookeeper-3.4.12 zookeeper && chmod -R 777 zookeeper ")
stdin, stdout, stderr = ssh.exec_command("source .bash_profile; sh /usr/local/zookeeper/bin/zkServer.sh start ")
time.sleep(3)
"""
解压kafka安装包,并修改kafka配置文件,然后启动kafka服务
"""
stdin, stdout, stderr = ssh.exec_command("cd /usr/local && rm -rf kafka ")
stdin, stdout, stderr = ssh.exec_command("cd /usr/local && unzip -o kafka_2.11-0.11.0.3.zip -d /usr/local")
time.sleep(3)
stdin, stdout, stderr = ssh.exec_command("cd /usr/local && mv kafka_2.11-0.11.0.3 kafka && chmod -R 777 kafka")
#修改kafka配置文件
# stdin, stdout, stderr = ssh.exec_command("cd /usr/local/kafka/config; sed -i 's#zookeeper.connect=.*#zookeeper.connect=10.20.158.33:2181#g' server.properties")
# stdin, stdout, stderr = ssh.exec_command("cd /usr/local/kafka/config; sed -i 's#listeners=.*#listeners=PLAINTEXT://10.20.158.33:9092#g' server.properties")
stdin, stdout, stderr = ssh.exec_command("cd /usr/local/kafka/config; sed -i " + "'" + "s#zookeeper.connect=.*#zookeeper.connect=" + Host + ":2181#g" + "'" + " server.properties")
stdin, stdout, stderr = ssh.exec_command("cd /usr/local/kafka/config; sed -i " + "'" + "s#listeners=.*#listeners=PLAINTEXT://" + Host + ":9092#g" + "'" + " server.properties")
#启动kafka服务
stdin, stdout, stderr = ssh.exec_command("source .bash_profile; sh /usr/local/kafka/bin/kafka-server-start.sh -daemon /usr/local/kafka/config/server.properties")
time.sleep(3)
"""
解压redis安装包,并修改redis.conf配置文件,然后启动redis服务
"""
stdin, stdout, stderr = ssh.exec_command("cd /usr/local && rm -rf redis ")
print("删除redis文件夹")
stdin, stdout, stderr = ssh.exec_command("cd /usr/local && tar xzvf redis-4.0.14.tar.gz -C /usr/local" ,get_pty=True)
time.sleep(3)
print("成功解压redis包")
stdin, stdout, stderr = ssh.exec_command("cd /usr/local && mv redis-4.0.14 redis && chmod -R 777 redis")
print("redis文件夹赋予权限777")
stdin, stdout, stderr = ssh.exec_command("cd /usr/local/redis && make > log_make.log", get_pty=True)
time.sleep(100)
print("make命令执行结束")
stdin, stdout, stderr = ssh.exec_command("cd /usr/local/redis && make install PREFIX=/usr/local/redis > log_makeinstall.log", get_pty=True)
time.sleep(100)
print("make install命令执行结束")
# 修改redis.conf配置文件
stdin, stdout, stderr = ssh.exec_command("cd /usr/local/redis && sed -i " + "'" + "s#bind 127.0.0.1#bind " +'"' + Host +'"' + "#g' redis.conf", get_pty=True)
time.sleep(5)
stdin, stdout, stderr = ssh.exec_command("cd /usr/local/redis && sed -i 's/# requirepass .*/requirepass taredis/g' redis.conf", get_pty=True)
time.sleep(5)
print("修改redis.conf文件成功")
stdin, stdout, stderr = ssh.exec_command("cd /usr/local/redis/bin ; ./redis-server ../redis.conf >> redis.log 2>&1 &")
time.sleep(5)
print("启动redis服务成功")
"""
解压nginx安装包
"""
stdin, stdout, stderr = ssh.exec_command("cd /usr/local && rm -rf nginx")
stdin, stdout, stderr = ssh.exec_command("cd /usr/local && tar xzvf nginx-1.12.2.tar.gz -C /usr/local", get_pty=True)
time.sleep(3)
stdin, stdout, stderr = ssh.exec_command("cd /usr/local && mv nginx-1.12.2 nginx && chmod -R 777 nginx")
stdin, stdout, stderr = ssh.exec_command("cd /usr/local/nginx; ./configure --prefix=/usr/local/nginx --conf-path=/usr/local/nginx/nginx.conf > log_configure.log", get_pty=True)
time.sleep(3)
err = stderr.readlines()
#out = stdout.readlines()
print("./configure命令输出结果打印开始:")
if (err):
print('error:')
print(err)
# else:
# print('out:')
# print(out)
print("./configure命令输出结果打印结束!")
print("ending")
stdin, stdout, stderr = ssh.exec_command("cd /usr/local/nginx; make > log_make.log", get_pty=True)
time.sleep(30)
stdin, stdout, stderr = ssh.exec_command("cd /usr/local/nginx; make install > log_make_install.log", get_pty=True)
time.sleep(30)
#将环境变量/etc/profile中删除包含的NGINX_HOME变量的行
stdin, stdout, stderr = ssh.exec_command("sed -i '/export NGINX_HOME=/d' /etc/profile")
#将环境变量/etc/profile中使用到$NGINX_HOME变量的地方删除,主要目的是删除;$NGINX_HOME/sbin字符串
stdin, stdout, stderr = ssh.exec_command("sed -i 's#;$NGINX_HOME/sbin##g' /etc/profile")
#在环境变量/etc/profile中添加NGINX_HOME变量
stdin, stdout, stderr = ssh.exec_command("sed -i '/export PATH=/i\export NGINX_HOME=/usr/local/nginx' /etc/profile")
#在环境变量PATH路径末尾添加;$NGINX_HOME/sbin内容
stdin, stdout, stderr = ssh.exec_command("sed -i 's#export PATH=.*#&;$NGINX_HOME/sbin#g' /etc/profile")
#本地上传一份nginx.conf模板,并修改里面的IP信息
###############################
sftp_upload_file()
###############################
#修改nginx.conf配置文件
stdin, stdout, stderr = ssh.exec_command("cd /usr/local/nginx/conf; sed -i " + "'" + "s#server 10.20.24.89#server " + Host + "#g' nginx.conf", get_pty=True)
time.sleep(5)
stdin, stdout, stderr = ssh.exec_command("source /etc/profile; /usr/local/nginx/sbin/nginx -c /usr/local/nginx/conf/nginx.conf", get_pty=True)
time.sleep(5)
err = stderr.readlines()
#out = stdout.readlines()
if (err):
print('error:')
print(err)
# else:
# print('out:')
# print(out)
print("ending")
ssh.close()
def sftp_upload_file():
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
#ssh.connect(Host, Port, Username, Password)
#连接Linux服务器
ssh.connect(Host, Port, Username, Password)
transport = paramiko.Transport((Host, Port))
transport.banner_timeout = 10
transport.connect(username=Username, password=Password)
sftp = paramiko.SFTPClient.from_transport(transport)
try:
sftp.put('./documents/nginx.conf', '/usr/local/nginx/conf/nginx.conf')
print("上传成功")
except Exception as e:
print(e)
transport.close()
if __name__ == '__main__':
ssh_exec_cmd()
|
July666/python_workspace_component
|
main.py
|
main.py
|
py
| 8,134 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41675461400
|
# 전역 임무
"""
1. 기지에 진입
2. 숫자가 작은 순서대로 정렬한다.
3. -1의 숫자를 저장해놓고 제일 약한 몬스터부터 조진다.
4. 몬스터를 죽일 파워가 부족하다면 -1을 하나 소비한다.
5. 파워가 부족하고 소비할 -1이 없다면 임무 달성 실패
6. 끝까지 올라간다면 기지 파괴 성공 (나가기 전에 가진 아이템을 모두 소진한다)
"""
import sys
input = sys.stdin.readline
N, M, power = map(int, input().split())
bases = [list(map(int, input().split())) for _ in range(N)]
for base in bases:
base.sort()
item_cnt = 0
start_i = 0
for i in range(len(base)):
if base[i] == -1:
item_cnt += 1
else:
start_i = i
break
i = start_i
while i < len(base):
if power >= base[i]:
power += base[i]
i += 1
else:
if item_cnt > 0:
power *= 2
item_cnt -= 1
else:
print(0)
exit(0)
for _ in range(item_cnt):
power *= 2
print(1)
|
jisupark123/Python-Coding-Test
|
contest/제2회 보라매컵 본선/30205.py
|
30205.py
|
py
| 1,124 |
python
|
ko
|
code
| 1 |
github-code
|
6
|
3668384154
|
#!/usr/local/bin/python3
import boto3
networkAclId = 'acl-26cc1243'
ignoreEntries = [100, 32767]
session = boto3.Session()
client = session.client('ec2')
acls = client.describe_network_acls()
def getMaxOfRuleNumbers():
result = [0]
for acl in acls['NetworkAcls']:
if acl['NetworkAclId'] == networkAclId:
for entries in acl['Entries']:
if entries['RuleNumber'] not in ignoreEntries:
if entries['RuleAction'] == 'deny':
result.append(entries['RuleNumber'])
return max(result)
def existEntry(cidrBlock):
for acl in acls['NetworkAcls']:
if acl['NetworkAclId'] == networkAclId:
for entries in acl['Entries']:
if entries['RuleNumber'] not in ignoreEntries:
if entries['RuleAction'] == 'deny':
if cidrBlock == entries['CidrBlock']:
return True
return False
def createNetworkAclIngressEntry(ruleNumber, cidrBlock):
params = {}
params["NetworkAclId"] = networkAclId
params["RuleNumber"] = ruleNumber
params["Protocol"] = '-1'
params["CidrBlock"] = cidrBlock
params["Egress"] = False
params["RuleAction"] = "DENY"
client.create_network_acl_entry(**params)
def blockIp(ip):
ip = ip + '/32'
if not existEntry(ip):
maxId = getMaxOfRuleNumbers()
maxId = maxId + 1
if maxId not in ignoreEntries:
createNetworkAclIngressEntry(maxId, ip)
print("BlockIP: %s" % ip)
blockIp('174.129.214.250')
|
debeando/MyScripts
|
aws/aws_network_acl_block_ip.py
|
aws_network_acl_block_ip.py
|
py
| 1,606 |
python
|
en
|
code
| 1 |
github-code
|
6
|
71750331067
|
from flask import Flask, render_template, request, session, redirect, url_for
from flask_session import Session
import sqlite3
from ast import literal_eval
import datetime
app = Flask(__name__)
# Set the secret key to some random bytes. Keep this really secret!
app.secret_key = b'_5#y2L"F4Q8z\n\xec]/'
@app.route('/')
def index():
if session.get("user_id") is None:
session["user_id"]=[]
#Get settings
conn = sqlite3.connect('my_database.db')
settings = conn.execute("select start,stop from settings order by rowid DESC limit 1").fetchone()
try:
start = literal_eval(settings[0])
stop = literal_eval(settings[1])
start = datetime.time(start[0],start[1],start[2])
stop = datetime.time(stop[0],stop[1],stop[2])
recent_settings = (str(start),str(stop))
conn.close()
print(recent_settings)
return render_template('index.html', username=session["user_id"], settings=recent_settings)
except:
return render_template('index.html', username=session["user_id"])
@app.route('/login', methods=["POST", "GET"])
def login():
if session.get("user_id") is None:
session["user_id"]=[]
if request.method == 'POST':
conn = sqlite3.connect('my_database.db')
username = request.form.get('username') # access the data inside
password = request.form.get('password')
if conn.execute("SELECT * FROM users WHERE user=?", (username,)).fetchone() is None:
return render_template("login.html", message="Username not found.", username=session["user_id"])
else: # Username exists, check pw
user = conn.execute("SELECT * FROM users where user=?", (username,)).fetchone()
print(user, user[0], user[1])
if user[1] == password:
session["user_id"] = user[0]
print(session["user_id"],'test')
return render_template("login.html", message=f"Logged in as {user[0]}.", username=session["user_id"])
else:
return render_template("login.html", message="Incorrect password, try again", username=session["user_id"])
else:
return render_template("login.html", username=session["user_id"])
@app.route('/update', methods=["POST", "GET"])
def update_settings():
if session.get("user_id") is None:
session["user_id"]=[]
#Get current settings
conn = sqlite3.connect('my_database.db')
message = ''
if request.method == 'POST':
start_time = request.form.get('start_time').split(':') # access the data inside
stop_time = request.form.get('stop_time').split(':')
print(start_time)
try:
start_time = [int(i) for i in start_time]
stop_time = [int(i) for i in stop_time]
conn.execute("insert into settings(start, stop) values (?,?)", (str(start_time), str(stop_time)))
conn.commit()
message="Updated successfully"
times = (start_time,stop_time)
except:
message = "Failed to split into datetime format, make sure to use format HH:MM:SS"
settings = conn.execute("select start,stop from settings order by rowid DESC limit 1").fetchone()
try:
start = literal_eval(settings[0])
stop = literal_eval(settings[1])
start = datetime.time(start[0],start[1],start[2])
stop = datetime.time(stop[0],stop[1],stop[2])
current_settings = (str(start),str(stop))
print(current_settings)
conn.close()
return render_template("update_settings.html", username=session["user_id"], settings=current_settings, message=message)
except:
return render_template("update_settings.html", username=session["user_id"], message=message)
@app.route('/logout')
def logout():
# remove the username from the session if it's there
session.pop('user_id', None)
return redirect(url_for('index'))
if __name__ == '__main__':
app.run(host='127.0.0.1', port=8000, debug=True)
|
mason-landry/lighttimer
|
lighttimer_app/app.py
|
app.py
|
py
| 4,048 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73307642428
|
import pandas as pd
import psycopg2 as ps
import datetime as dt
from datetime import datetime
from dateutil import tz
from config import POSTGRES_DBNAME, POSTGRES_PASSWORD, POSTGRES_USERNAME, POSTGRES_PORT, POSTGRES_ADDRESS, API_KEY
""" Functions for Flask App """
# dict with available exchange/trading pair for trade predictions
exchanges={'bitfinex': ['btc_usd', 'eth_usd', 'ltc_usd'],
'coinbase_pro': ['btc_usd', 'eth_usd', 'ltc_usd'],
'hitbtc': ['btc_usdt', 'eth_usdt', 'ltc_usdt']}
# Insert DB Credentials - Don't push to GH
credentials = {'POSTGRES_ADDRESS': POSTGRES_ADDRESS,
'POSTGRES_PORT': POSTGRES_PORT,
'POSTGRES_USERNAME': POSTGRES_USERNAME,
'POSTGRES_PASSWORD': POSTGRES_PASSWORD,
'POSTGRES_DBNAME': POSTGRES_DBNAME,
'API_KEY': API_KEY
}
# dictionary used to rename column values with correct time period
model_periods = {'bitfinex_ltc_usd': '1440T',
'bitfinex_btc_usd':'1200T',
'bitfinex_eth_usd': '1200T',
'hitbtc_ltc_usdt': '1440T',
'hitbtc_btc_usdt': '360T',
'hitbtc_eth_usdt': '1440T',
'coinbase_pro_btc_usd': '960T',
'coinbase_pro_eth_usd': '960T',
'coinbase_pro_ltc_usd': '960T'}
def create_conn(credentials):
""" Function that creates a connection with DB """
# creating connection
conn = ps.connect(host=credentials['POSTGRES_ADDRESS'],
database=credentials['POSTGRES_DBNAME'],
user=credentials['POSTGRES_USERNAME'],
password=credentials['POSTGRES_PASSWORD'],
port=credentials['POSTGRES_PORT'])
# creating cursor
cur = conn.cursor()
return conn, cur
def retrieve_one_trp(exchange, trading_pair, model_periods):
""" Retrieve the last trade recommender prediction from DB """
# to retrieve period of trp model
period = model_periods[exchange + '_' + trading_pair]
# create connection and cursor
conn, cur = create_conn(credentials)
# Change limit number to whatever amount of rows you want to retrieve
cur.execute("""SELECT * FROM prediction.trp
WHERE exchange = '{exchange}'
AND trading_pair = '{trading_pair}'
ORDER by p_time desc limit 1;""".format(trading_pair=trading_pair, exchange=exchange))
result = cur.fetchall()
# creates dataframe from results and rename columns
result = pd.DataFrame(result)
result = result.rename(
columns={0: 'p_time', 1: 'c_time', 2: 'exchange', 3: 'trading_pair', 4: 'prediction'})
# get the one value in p_time
pt = result['p_time'].values
# get UTC timezone
from_zone = tz.gettz('UTC')
# get PST timezone
to_zone = tz.gettz('US/Pacific')
utc = datetime.strptime(str(pt[0]), '%Y-%m-%d %H:%M:%S')
utc = utc.replace(tzinfo=from_zone)
pcf = utc.astimezone(to_zone)
# returning new timezone value to the df
result['p_time'] = str(pcf)[:-6]
# creating df to dictionary
result = result.to_dict()
# add the new period key and value to dictionary
result.update({'period': period})
# close connection
conn.close()
return result
# This is for retrieving arbitrage predictions from DB
def retrieve_one_arb(exchange_1, exchange_2, trading_pair):
''' Retrieve the last arbitrage prediction from DB '''
# create connection and cursor
conn, cur = create_conn(credentials)
try:
cur.execute("""SELECT * FROM prediction.arp
WHERE exchange_1 = '{exchange_2}'
AND exchange_2 = '{exchange_1}'
AND trading_pair = '{trading_pair}'
OR exchange_1 = '{exchange_1}'
AND exchange_2 = '{exchange_2}'
AND trading_pair = '{trading_pair}'
ORDER by p_time desc limit 1;""".format(trading_pair=trading_pair, exchange_2=exchange_2, exchange_1=exchange_1))
result = cur.fetchall()
# creates dataframe from results and rename columns
result = pd.DataFrame(result)
result = result.rename(
columns={0: 'p_time', 1: 'c_time', 2: 'exchange_1', 3: 'exchange_2', 4: 'trading_pair', 5: 'prediction'})
# get the one value in p_time
pt = result['p_time'].values
# get UTC time zone
from_zone = tz.gettz('UTC')
# get PST time zone
to_zone = tz.gettz('US/Pacific')
utc = datetime.strptime(str(pt[0]), '%Y-%m-%d %H:%M:%S')
utc = utc.replace(tzinfo=from_zone)
pcf = utc.astimezone(to_zone)
# returning new timezone value to the df
result['p_time'] = str(pcf)[:-6]
# creating df to dictionary
result = result.to_dict()
# closing connection
conn.close()
return result
except:
pass
def retrieve_tr_pred():
"""
Retrieves trade recommender predictions from DB and returns result in JSON format
"""
# create connection and cursor
conn, cur = create_conn(credentials)
# Gets last 20 prediction results from trp table
cur.execute("""SELECT * FROM prediction.trp
ORDER by p_time desc limit 500;""")
result = cur.fetchall()
# creates dataframe from results and rename columns
result = pd.DataFrame(result)
result = result.rename(columns={0: 'p_time', 1: 'c_time', 2: 'exchange', 3: 'trading_pair', 4: 'prediction'})
# filter predictions to get one for each combination
result = result.drop_duplicates(subset=['exchange','trading_pair'])
# creating new column with exchange_trading_pair name combined
result['period'] = result['exchange'] +'_'+ result['trading_pair']
# use the values in period to rename them with the dict 'model_periods' values
result['period'] = result['period'].apply(lambda x: model_periods[x])
# drop unnecessary columns
result.drop(columns=['c_time'], inplace=True)
# Creating List of prediction time values
pt = result['p_time'].values
# getting UTC timezone
from_zone = tz.gettz('UTC')
# getting PST timezone
to_zone = tz.gettz('US/Pacific')
nt = []
# Looping thru 'p_time' values to change time to PST
for p in pt:
utc = datetime.strptime(str(p), '%Y-%m-%d %H:%M:%S')
utc = utc.replace(tzinfo=from_zone)
pcf = utc.astimezone(to_zone)
# append new PST time to nt list
nt.append(str(pcf)[:-6] + ' PST')
# Give new PST time value to 'p_time" column
result['p_time'] = nt
# Create json output
result = (result.groupby(['exchange', 'trading_pair'], as_index=True)
.apply(lambda x: x[['p_time', 'period', 'prediction']].to_dict('r')).to_dict())
# close connection
conn.close()
return result
def retrieve_arb_pred():
"""
Retrieves arbitrage predictions from DB and returns result in JSON format
"""
# create connection and cursor
conn, cur = create_conn(credentials)
# Gets last 500 prediction results from arp table
cur.execute("""SELECT * FROM prediction.arp
ORDER by p_time desc limit 500;""")
result = cur.fetchall()
# creates dataframe from results and rename columns
result = pd.DataFrame(result)
result = result.rename(
columns={0: 'p_time', 1: 'c_time', 2: 'exchange_1', 3: 'exchange_2', 4: 'trading_pair', 5: 'prediction'})
# result = result.drop(columns='c_time')
result = result.drop_duplicates(subset=['exchange_1', 'exchange_2', 'trading_pair'])
# converts p_time column to datetime
result['datetime'] = pd.to_datetime(result['p_time'])
# create time threshold to 15 minutes, to only return results in the last 15 min
# filters result to the last 15 min
t = dt.datetime.now() - dt.timedelta(minutes=15)
result = result[result['datetime'] > t]
# drop unnecessary columns
result.drop(columns=['datetime', 'c_time'], inplace=True)
# creating a list of prediction time values
pt = result['p_time'].values
# Getting UTC timezone
from_zone = tz.gettz('UTC')
# Getting PST timezone
to_zone = tz.gettz('US/Pacific')
nt = []
# Looping thru 'p_time' values to change time to PST
for p in pt:
utc = datetime.strptime(str(p), '%Y-%m-%d %H:%M:%S')
utc = utc.replace(tzinfo=from_zone)
pcf = utc.astimezone(to_zone)
# appends new time to nt list
nt.append(str(pcf)[:-6] + ' PST')
# give new PST time value to 'p_time" column
result['p_time'] = nt
# Create json output
result = (result.groupby(['exchange_1', 'exchange_2', 'trading_pair'], as_index=True)
.apply(lambda x: x[['p_time', 'prediction']].to_dict('r')).to_dict())
# close connection to DB
conn.close()
return result
|
dbreddyAI/cryptolyticapp
|
api_source_code/Flask_App/utils.py
|
utils.py
|
py
| 9,012 |
python
|
en
|
code
| 2 |
github-code
|
6
|
20955203674
|
from selenium import webdriver
from selenium.webdriver.common.by import By
import time
# from selenium.webdriver.common.keys import Keys
# from selenium.webdriver.support import expected_conditions as EC
# from selenium.webdriver.support.ui import WebDriverWait
def Scrape(today):
# setup
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("--mute-audio")
driver = webdriver.Chrome(options=chrome_options)
driver.get(f"https://nytcrosswordanswers.org/nyt-crossword-answers-{today}/")
print(driver.title)
time.sleep(3)
uls = driver.find_elements(By.TAG_NAME, "ul")
across = uls[1].find_elements(By.TAG_NAME, "li")
down = uls[2].find_elements(By.TAG_NAME, "li")
across_clues = []
across_answers = []
for group in across:
clue = group.find_element(By.TAG_NAME, "a").get_attribute('innerText')
answer = group.find_element(By.TAG_NAME, "span").get_attribute('innerText')
across_clues.append(clue)
across_answers.append(answer)
down_clues = []
down_answers = []
for group in down:
clue = group.find_element(By.TAG_NAME, "a").get_attribute('innerText')
answer = group.find_element(By.TAG_NAME, "span").get_attribute('innerText')
down_clues.append(clue)
down_answers.append(answer)
driver.quit()
return across_clues, across_answers, down_clues, down_answers
|
colin-lankau/Crossword-Practice-Tool
|
Scrape.py
|
Scrape.py
|
py
| 1,416 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21700191213
|
# Reference : https://textgears.com/api/
import requests
text = input()
text = text.replace(' ','+')
url = "https://api.textgears.com/check.php?text=" + text + "&key=DEMO_KEY"
resp = requests.get(url)
resp = resp.json()
if resp['result']:
for err in resp['errors']:
print("Error at position : ",err['offset']+1)
print("Error : ",err['bad'])
print("Possible corrections are : ")
for corr in err['better']:
print(corr)
print()
else:
print(resp['description'])
|
kapoor-rakshit/pyfiddle
|
grammar.py
|
grammar.py
|
py
| 475 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13638156261
|
#!/usr/bin/env python
import RPi.GPIO as GPIO
import time
# Pin Definitions
output_pins = [17 , 22, 6, 19] # BCM pin
def main():
# Pin Setup:
# Board pin-numbering scheme
GPIO.setmode(GPIO.BCM)
# set pin as an output pin with optional initial state of HIGH
for p in output_pins:
GPIO.setup(p, GPIO.OUT, initial=GPIO.HIGH)
print("Starting demo now! Press CTRL+C to exit")
curr_value = GPIO.HIGH
try:
while True:
time.sleep(1)
# Toggle the output every second
for p in output_pins:
GPIO.output(p, curr_value)
print("Outputting {} to pin {}".format(curr_value, p))
curr_value ^= GPIO.HIGH
finally:
GPIO.cleanup()
if __name__ == '__main__':
main()
|
p513817/Custom_Coral_Teachable_Machine
|
project_teachable_v3/check_leds.py
|
check_leds.py
|
py
| 806 |
python
|
en
|
code
| 1 |
github-code
|
6
|
3037264375
|
import os
import numpy as np
import pandas as pd
from PIL import Image
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
from sklearn.manifold import TSNE
from sklearn.manifold import Isomap
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from matplotlib import offsetbox
#directory = './avatars'
directory = '/home/kayla/pylearn/manifold/cupcakes'
pics = []
data = pd.DataFrame()
count = 0
for filename in os.listdir(directory):
f = os.path.join(directory, filename)
# checking if it is a file
if os.path.isfile(f):
# creating a object
pics.append(Image.open(f))
pics[count] = pics[count].resize((100,100))
image = pd.Series(np.asarray(pics[count]).ravel())
data = pd.concat([data,image.to_frame().T],ignore_index=True)
count += 1
dims = np.asarray(pics[0]).shape
data = data.fillna(data.mean())
model = Isomap(n_components=10)
proj = model.fit_transform(data)
kmeans = KMeans(n_clusters=10)
kmeans.fit(proj)
clusters = kmeans.predict(proj)
data['cluster'] = clusters
#avg_data = data.groupby(by=['cluster']).sample().drop(['cluster'], \
# axis=1)
avg_data = data.groupby(by=['cluster']).median()
avg_image = []
for i in avg_data.index.astype('int'):
avg_image.append(avg_data.loc[i].to_numpy().reshape(dims).astype('int'))
fig, ax = plt.subplots(2,5,figsize=(8,3))
for axi, img in zip(ax.flat, avg_image):
axi.set(xticks=[],yticks=[])
axi.imshow(img,interpolation='nearest')
plt.show()
|
estimatrixPipiatrix/decision-scientist
|
pythonCode/kmeans/kmeans_avatars.py
|
kmeans_avatars.py
|
py
| 1,534 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72528403069
|
import os, csv
from random import sample
import nltk as nlp
import pandas as pd
import matplotlib.pyplot as plt
sampleSize = 200
sampleTimes = 50
TTR = []
years =[]
# Iterate through word count/list file
with open('wordCountsNLTK.csv', 'r', encoding="ISO-8859-1") as csvFile:
reader = csv.reader(csvFile)
next(reader)
for row in reader:
tokens = nlp.word_tokenize(row[2])
TTRSampled =[]
print(row[0] + " " + row[1] + " " + "Population: " + str(len(tokens)))
if len(tokens) > sampleSize:
for i in range(sampleTimes):
tokensSample = sample(tokens,sampleSize)
types = nlp.Counter(tokensSample)
TTRSampled.append(len(types)/len(tokensSample)*100)
years.append(row[0])
TTR.append(sum(TTRSampled)/sampleTimes)
else:
pass
# Store values into PD dataframe and plot average word count values
dfTTR = pd.DataFrame({"Year": years, "TTR": TTR})
print(dfTTR)
dfTTR.groupby(["Year"]).mean().plot()
plt.xlabel('Year', fontsize=15)
plt.ylabel('TTR', fontsize=15)
plt.ylim([30, 90])
plt.title("Sampled Type Token Ratio per year")
plt.show()
|
stkeller/Replication-Thesis
|
Code/LexicalTTRSampling.py
|
LexicalTTRSampling.py
|
py
| 1,075 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23559591731
|
import numpy as np
import matplotlib.pyplot as plt
N = 1000 #Nombre de tirages
X = np.random.rand(N) #Tirages independants de la loi uniforme sur [0,1]
#####################################
# But: calculer la suite des moyennes
# empiriques pour n de 1 a N
#####################################
## Calcul de la moyenne empirique:
## avec une boucle
#####################################
sumEmp = X[0]
moyenneEmp = X[0] * np.ones(N)
for i in range(1,N):
sumEmp = sumEmp + X[i]
moyenneEmp[i] = sumEmp / (i+1)
######################################
### Calcul de la moyenne empirique:
### avec des fonctions numpy
######################################
integers1toN = np.arange(1,N+1) #Un array contenant les entiers de 1 a N
moyenneEmp = np.cumsum(X) / integers1toN
######################################
## Affichage
######################################
plt.plot(integers1toN, moyenneEmp, color="b", label="Moyenne empirique")
plt.axhline(0.5, color="r", label="Esperance")
plt.legend(loc="best")
plt.show()
|
FanJiang718/Courses-Exercises
|
MAP556/TP1/TP1 Python solutions-20181211/MAP556_PC1_Exo1_1_sol.py
|
MAP556_PC1_Exo1_1_sol.py
|
py
| 1,073 |
python
|
fr
|
code
| 0 |
github-code
|
6
|
27281672101
|
import math
import time
import sys
import numpy as np
import operator
import random
import os
from kll import KLL
random.seed(30)
#def get_approx_res(value, i):
def get_approx_res(value, true_median, true_avg, true_tail):
#global all_median
#global all_avg
#global all_tail
#Using sketch to store digests
kll = KLL(sketch_size)
for v in value:
kll.update(v)
min_diff_50 = 1000
min_diff_99 = 1000
pint_median = 0
pint_tail = 0
for (ii, quantile) in kll.cdf():
diff = quantile - 0.5
if diff < 0:
diff = diff * -1
if diff<min_diff_50:
min_diff_50 = diff
pint_median = ii
diff = quantile - 0.99
if diff < 0:
diff = diff * -1
if diff<min_diff_99:
min_diff_99 = diff
pint_tail = ii
pint_avg=sum(value)/float(len(value))
#if all_median[i]!=0:
# error_median=(all_median[i]-pint_median)/float(all_median[i])*100
#if all_median[i]==0:
# error_median=0
#error_avg=(all_avg[i]-pint_avg)/float(all_avg[i])*100
#error_tail=(all_tail[i]-pint_tail)/float(all_tail[i])*100
if true_median != 0:
error_median = (true_median-pint_median)/float(true_median)*100
else:
error_median = 0
if true_avg != 0:
error_avg = (true_avg-pint_avg)/float(true_avg)*100
else:
error_avg = 0
if true_tail != 0:
error_tail = (true_tail-pint_tail)/float(true_tail)*100
else:
error_tail = 0
if error_median<0:
error_median=error_median*-1
if error_avg<0:
error_avg=error_avg*-1
if error_tail<0:
error_tail=error_tail*-1
return error_median, error_avg, error_tail
def get_final_res(packet_results_res):
res_map = {}
for packets in packets_range:
res_map[packets] = {}
for ap in all_approx:
res_map[packets][ap] = 0
for i in range(5):
for packets in packets_range:
for ap in all_approx:
if ap not in packet_results_res[i][packets]:
continue
res = packet_results_res[i][packets][ap]
res_map[packets][ap] += (round(sum(res)/float(len(res)),2)) # Average of independently repeating experiments
#res_map[packets][ap] += np.median(res) # Median of independently repeating experments
for packets in packets_range:
for ap in all_approx:
res_map[packets][ap] = round(res_map[packets][ap]/5.0, 2) # Average relative error
return res_map
# Constant variables
dint_threshold = 1
#dint_threshold = 5
#dint_threshold = 10
sketch_size=100
#packets_range=list(range(100,1100,100)) # Use 5-hop Fat Tree Topology with one flow
packets_range=[100, 500, 1000, 5000, 10000]
all_approx=set() # Approximate coefficient
approx_map={} # Approximate coefficiet -> bit number
for packets in packets_range:
for bits in [4,8]:
if bits==4:
ap=0.42
if bits==8:
ap=0.022
all_approx.add(ap)
approx_map[ap]=bits
# dev:packetnum:ap:approximate_values
approx={}
for i in range(5):
approx[i] = {}
for packets in packets_range:
approx[i][packets]={}
for ap in all_approx:
approx[i][packets][ap] = []
# dev:pktnum:ap:relative_errors
packet_results_avg={}
packet_results_median={}
packet_results_tail={}
for i in range(5):
packet_results_avg[i] = {}
packet_results_median[i] = {}
packet_results_tail[i] = {}
# dev:latencies
all_data = {}
for i in range(5):
all_data[i] = []
f=open("experiments/delays/processed_data","r")
for line in f:
digests=line.strip().split(" ")
assert(len(digests)>0 and len(digests)<=5)
iscontinue = False
for digest in digests:
if int(digest) < 0:
iscontinue = True
break
if iscontinue:
continue
for i in range(len(digests)):
all_data[i].append(int(digests[i]))
f.close()
all_median, all_avg, all_tail = [], [], []
for i in range(5):
all_data[i] = sorted(all_data[i])
all_median.append(np.median(all_data[i]))
all_avg.append(sum(all_data[i])/float(len(all_data[i])))
all_tail.append(np.percentile(all_data[i],99))
for packets in packets_range:
for i in range(5):
if packets not in packet_results_avg[i]:
packet_results_avg[i][packets]={}
if packets not in packet_results_median[i]:
packet_results_median[i][packets]={}
if packets not in packet_results_tail[i]:
packet_results_tail[i][packets]={}
truth, pint, dint_prev1, dint_prev2 = {}, {}, {}, {}
for i in range(5):
truth[i] = []
#pint[i] = []
dint_prev1[i], dint_prev2[i] = 0, 0
f=open("experiments/delays/processed_data","r")
for line in f:
digests=line.strip().split(" ")
assert(len(digests)>0 and len(digests)<=5)
iscontinue = False
for digest in digests:
if int(digest) < 0:
iscontinue = True
break
if iscontinue:
continue
for i in range(len(digests)):
digest = int(digests[i])
truth[i].append(digest)
if (random.randint(1, 2) == 1) or (i==len(digests)-1): # Sampling by global hashing for PINT
# PINT
#pint[i].append(digest)
for ap in all_approx: # Value approximation
if digest==0:
approx[i][packets][ap].append(0)
continue
range_1=int(math.log(digest, (1+ap)**2))
range_2=int(math.log(digest, (1+ap)**2)+0.5)
approx_value_1=(1+ap)**(2*range_1)
approx_value_2=(1+ap)**(2*range_2)
diff_1=digest-approx_value_1
if diff_1<0:
diff_1=-1*diff_1
diff_2=digest-approx_value_2
if diff_2<0:
diff_2=-1*diff_2
if diff_1<=diff_2:
approx[i][packets][ap].append(int(approx_value_1))
if diff_1>diff_2:
approx[i][packets][ap].append(int(approx_value_2))
#if len(pint[i])==packets: # E.g., Collect every 100 packets for dev i
if len(truth[0])==packets: # E.g., Collect every 100 INT events
for i in range(len(digests)):
true_median=np.median(truth[i])
true_avg=sum(truth[i])/float(len(truth[i]))
true_tail=np.percentile(truth[i],99)
for ap in all_approx:
value=sorted(approx[i][packets][ap])
if len(value)<=1:
continue
##error_median, error_avg, error_tail = get_approx_res(value, i)
error_median, error_avg, error_tail = get_approx_res(value, true_median, true_avg, true_tail)
#error_median, error_avg, error_tail = get_approx_res(value, all_median[i], all_avg[i], all_tail[i])
if ap not in packet_results_avg[i][packets]:
packet_results_avg[i][packets][ap]=[]
packet_results_avg[i][packets][ap].append(error_avg)
if ap not in packet_results_median[i][packets]:
packet_results_median[i][packets][ap]=[]
packet_results_median[i][packets][ap].append(error_median)
if ap not in packet_results_tail[i][packets]:
packet_results_tail[i][packets][ap]=[]
packet_results_tail[i][packets][ap].append(error_tail)
approx[i][packets]={}
for ap in all_approx:
approx[i][packets][ap]=[]
#pint[i]=[]
truth[i] = []
f.close()
avg_map = get_final_res(packet_results_avg)
os.system("mkdir -p final_results/delays_PINT/")
fw=open("final_results/delays_PINT/avg_delay","w")
fw.write("# of packets,PINT4,value,PINT8,value\n")
for k,v in avg_map.items():
packets = k
write_string=str(packets)
for ap,res in v.items():
write_string=write_string+","+"PINT"+str(approx_map[ap])+","+str(res)
fw.write(write_string+"\n")
fw.close()
median_map = get_final_res(packet_results_median)
fw=open("final_results/delays_PINT/median_delay","w")
fw.write("# of packets,PINT4,value,PINT8,value\n")
for k,v in median_map.items():
packets=k
write_string=str(packets)
for ap,res in v.items():
write_string=write_string+","+"PINT"+str(approx_map[ap])+","+str(res)
fw.write(write_string+"\n")
fw.close()
tail_map = get_final_res(packet_results_tail)
#packet_results_tail=sorted(packet_results_tail.items(),key=operator.itemgetter(0))
fw=open("final_results/delays_PINT/tail_delay","w")
fw.write("# of packets,PINT4,value,PINT8,value\n")
for k,v in tail_map.items():
packets=k
write_string=str(packets)
for ap,res in v.items():
write_string=write_string+","+"PINT"+str(approx_map[ap])+","+str(res)
fw.write(write_string+"\n")
fw.close()
|
deltaint-project/deltaint
|
Mininet-DINT/generate_delay_results_PINT.py
|
generate_delay_results_PINT.py
|
py
| 9,232 |
python
|
en
|
code
| 1 |
github-code
|
6
|
16233043242
|
from flask import Flask, url_for, request, json, session
from flask_cors import CORS
from flask_sqlalchemy import SQLAlchemy
import random
from passlib.hash import sha256_crypt
app = Flask(__name__)
app.secret_key = "alon"
app.config.from_object(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///site.db"
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
CORS(app)
db = SQLAlchemy(app)
def generate_pin(digits):
pin = ""
for k in range(digits):
pin += str(random.randint(0, 9))
return pin
class User(db.Model):
__tablename__ = "user"
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(100), unique=True, nullable=False)
password = db.Column(db.String(100), nullable=False)
quizzes_done = db.Column(db.Integer, default=0)
correct_answers = db.Column(db.Integer, default=0)
# relationship with Quiz
quizzes = db.relationship("Quiz", backref="author", lazy=True, cascade="all, delete-orphan")
class Quiz(db.Model):
__tablename__ = "quiz"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100), default=f"Quiz{id}")
pin = db.Column(db.String(8), unique=True)
published = db.Column(db.Boolean, default=False, nullable=False)
# connection with user
user_id = db.Column(db.Integer, db.ForeignKey("user.id"), nullable=False)
# relationship with ChoiceQuestion
choice_questions = db.relationship("ChoiceQuestion", backref="author", lazy=True, cascade="all, delete-orphan")
# relationship with Status
statuses = db.relationship("Status", backref="author", lazy=True, cascade="all, delete-orphan")
def __repr__(self):
return f"{self.name}, pin={self.pin}"
def get_json(self, with_answers=False):
my_json = {"name": self.name, "pin": self.pin, "published": self.published, "choice_questions": []}
for choice_question in self.choice_questions:
my_json["choice_questions"].append(choice_question.get_json(with_answers))
return my_json
def get_statuses_json(self):
statuses_json = []
for status in self.statuses:
statuses_json.append(status.get_json())
return statuses_json
class ChoiceQuestion(db.Model):
__tablename__ = "choice_question"
id = db.Column(db.Integer, primary_key=True)
number = db.Column(db.Integer)
question = db.Column(db.String(120))
# connection to quiz
quiz_id = db.Column(db.Integer, db.ForeignKey("quiz.id"), nullable=False)
# relationship with choice
choices = db.relationship("Choice", backref="author", lazy=True, cascade="all, delete-orphan")
def __repr__(self):
string = f"Q{self.number}"
string += f"\nquestion: {self.question}"
for choice in self.choices:
string += "\n" + choice.__repr__()
return string
def get_json(self, with_answers=False):
my_json = {"number": self.number, "question": self.question, "choices": []}
for choice in self.choices:
my_json["choices"].append(choice.get_json(with_answers))
return my_json
class Choice(db.Model):
__tablename__ = "choice"
id = db.Column(db.Integer, primary_key=True)
text = db.Column(db.String(120))
correct = db.Column(db.Boolean)
# connection to question
choice_question_id = db.Column(db.Integer, db.ForeignKey("choice_question.id"), nullable=False)
def __repr__(self):
return f"text={self.text}, correct={self.correct}"
def get_json(self, with_answers=False):
if not with_answers:
return {"text": self.text}
else:
return {"text": self.text, "correct": self.correct}
class Status(db.Model):
__tablename__ = "status"
id = db.Column(db.Integer, primary_key=True)
grade = db.Column(db.Integer)
amount_played = db.Column(db.Integer, default=1)
user_id = db.Column(db.Integer, nullable=False)
# connection to quiz
quiz_id = db.Column(db.Integer, db.ForeignKey("quiz.id"), nullable=False)
def get_json(self):
user = User.query.filter_by(id=self.user_id).first()
return {"username": user.username, "grade": self.grade, "amount": self.amount_played}
# create a new user. Return true if created, false otherwise
@app.route("/user/signup", methods=["POST"])
def sign_up():
response = request.get_json()
username = response["username"]
password = response["password"]
hashed_password = sha256_crypt.encrypt(password)
user = User.query.filter_by(username=username).first()
if user is not None:
return {"created": "false"}
new_user = User(username=username, password=hashed_password)
db.session.add(new_user)
db.session.commit()
return {"created": "true"}
# try to login. Return user id if able to login
@app.route("/user/login", methods=["GET"])
def login():
data = json.loads(request.args.get("data"))
username = data["username"]
password = data["password"]
user = User.query.filter_by(username=username).first()
if user is None or not sha256_crypt.verify(password, user.password):
return {"user_id": "None"}
return {"user_id": user.id}
# get user id. Returns stats of user with this id
@app.route("/home/userinfo", methods=["GET"])
def user_info():
user_id = json.loads(request.args.get("data"))
user = User.query.filter_by(id=user_id).first()
if user is None:
return {"found": "false"}
return_data = {"username": user.username,
"quizzes_done": user.quizzes_done,
"correct_answers": user.correct_answers,
"quizzes_made": len(user.quizzes),
"found": "true"
}
return return_data
# create a new quiz for user with given id. Returns the pin.
@app.route("/create/newQuiz", methods=["GET"])
def new_quiz():
user_id = json.loads(request.args.get("data"))
pin = generate_pin(8)
while Quiz.query.filter_by(pin=pin).first() is not None:
pin = generate_pin(8)
quiz = Quiz(name="MyQuiz", pin=pin, user_id=user_id)
db.session.add(quiz)
db.session.commit()
return {"pin": pin}
# get current state of quiz questions and update quiz accordingly for user.
@app.route("/create/postQuestions", methods=["POST"])
def post_questions():
response = request.get_json()["quiz"]
user_id = request.get_json()["user_id"]
pin = response["pin"]
quiz = Quiz.query.filter_by(pin=pin, user_id=user_id).first()
if quiz is None or quiz.published:
return {"posted": "false"}
questions = response["questions"]
quiz.name = response["name"]
quiz.choice_questions = []
for question in questions:
if question["type"] == "ChoiceQuestion":
number = question["number"]
question_text = question["question"]
question_db = ChoiceQuestion(number=number, question=question_text, quiz_id=quiz.id)
db.session.add(question_db)
db.session.commit()
for choice in question["choices"]:
text = choice["text"]
correct = choice["correct"]
choice_db = Choice(text=text, correct=correct, choice_question_id=question_db.id)
db.session.add(choice_db)
db.session.commit()
return {"posted": "true"}
# publish quiz of certain pin, allowing others to play it.
@app.route("/create/publishQuiz", methods=["POST"])
def publish_quiz():
pin = request.get_json()["pin"]
user_id = request.get_json()["user_id"]
quiz = Quiz.query.filter_by(pin=pin, user_id=user_id).first()
if quiz is None:
return {"published": "false"}
quiz.published = True
db.session.commit()
return {"published": "true"}
# get pin of quiz and return whether a quiz with that pin exists and was published
@app.route("/enterPin/quizExists", methods=["GET"])
def quiz_exists():
data = json.loads(request.args.get("data"))
pin = data["pin"]
quiz = Quiz.query.filter_by(pin=pin).first()
if (quiz is None) or (not quiz.published):
return {"exists": "false", "pin": pin}
return {"exists": "true", "pin": pin}
# get pin and return a published quiz with that pin
@app.route("/play/getQuiz", methods=["GET"])
def get_quiz():
data = json.loads(request.args.get("data"))
pin = data["pin"]
quiz = Quiz.query.filter_by(pin=pin).first()
if (quiz is None) or (not quiz.published):
return {"exists": "false", "pin": pin}
return quiz.get_json()
# gets pin of quiz, user and what player answered. Returns number of questions he got right.
# also updates user stats accordingly
@app.route("/play/correctAnswers", methods=["POST"])
def correct_answers():
response = request.get_json()["quiz"]
user_id = request.get_json()["user_id"]
pin = response["pin"]
quiz = Quiz.query.filter_by(pin=pin).first()
if (quiz is None) or (not quiz.published):
return {"error": "cannot play quiz"}
correct = 0
# go over each question sent
for question in response["questions"]:
if question["type"] == "ChoiceQuestion":
is_correct = True
# find matching question in quiz
number = question["number"]
question_text = question["question"]
quiz_question = ChoiceQuestion.query.filter_by(quiz_id=quiz.id, number=number, question=question_text).first()
# go over each choice in question sent
for choice in question["choices"]:
# find matching choice in question
text = choice["text"]
question_choice = Choice.query.filter_by(text=text, choice_question_id=quiz_question.id).first()
if choice["correct"] != question_choice.correct:
is_correct = False
break
if is_correct:
correct += 1
user = User.query.filter_by(id=user_id).first()
grade = (correct * 100) / len(quiz.choice_questions)
# update user stats
user.correct_answers += correct
user.quizzes_done += 1
# create new status
status = Status.query.filter_by(user_id=user_id, quiz_id=quiz.id).first()
if status is None:
new_status = Status(grade=grade, user_id=user_id, quiz_id=quiz.id)
db.session.add(new_status)
else:
status.amount_played += 1
status.grade = max(grade, status.grade)
db.session.commit()
return {"correctAnswers": correct}
# gets quiz pin and returns all user statuses for that quiz
@app.route("/leaderboard/getStatuses", methods=["GET"])
def get_statuses():
data = json.loads(request.args.get("data"))
pin = data["pin"]
quiz = Quiz.query.filter_by(published=True, pin=pin).first()
if quiz is None:
return {"found": "false"}
return {"found": "true", "statuses": quiz.get_statuses_json()}
# get list of all quizzes this user has creates and not published
@app.route("/edit/getUserQuizzes", methods=["GET"])
def get_user_quizzes():
data = json.loads(request.args.get("data"))
user = User.query.filter_by(id=data).first()
json_to_return = []
for quiz in user.quizzes:
if not quiz.published:
json_to_return.append(quiz.get_json())
return {"quizzes": json_to_return}
# get quiz with answers for user to edit
@app.route("/create/getQuizWithAnswers", methods=["GET"])
def get_quiz_with_answers():
data = json.loads(request.args.get("data"))
user_id = data["user_id"]
pin = data["pin"]
quiz = Quiz.query.filter_by(user_id=user_id, pin=pin, published=False).first()
return {"quiz": quiz.get_json(True)}
# delete quiz of certain pin created by user
@app.route("/edit/deleteQuiz", methods=["GET"])
def delete_quiz():
data = json.loads(request.args.get("data"))
user_id = data["user_id"]
pin = data["pin"]
quiz = Quiz.query.filter_by(user_id=user_id, pin=pin, published=False).first()
if quiz is not None:
db.session.delete(quiz)
db.session.commit()
return {"deleted": "true", "pin": pin}
return {"deleted": "false", "pin": pin}
if __name__ == "__main__":
app.run()
db.create_all()
|
nivschuman/QuizServer
|
main.py
|
main.py
|
py
| 12,314 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22449340075
|
"""
Реализация программу взаимодействия виджетов друг с другом:
Форма для приложения (ui/d_eventfilter_settings.ui)
Программа должна обладать следующим функционалом:
1. Добавить для dial возможность установки значений кнопками клавиатуры(+ и -),
выводить новые значения в консоль
2. Соединить между собой QDial, QSlider, QLCDNumber
(изменение значения в одном, изменяет значения в других)
3. Для QLCDNumber сделать отображение в различных системах счисления (oct, hex, bin, dec),
изменять формат отображаемого значения в зависимости от выбранного в comboBox параметра.
4. Сохранять значение выбранного в comboBox режима отображения
и значение LCDNumber в QSettings, при перезапуске программы выводить
в него соответствующие значения
"""
from PySide6 import QtWidgets, QtCore, QtGui
from PySide6.QtCore import Qt
class Window(QtWidgets.QWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.initUi()
self.initSignal()
self.settings = QtCore.QSettings('Data')
self.loadData()
def initUi(self):
self.dial = QtWidgets.QDial()
self.dial.setRange(0, 100)
self.dial.installEventFilter(self)
self.comboBox = QtWidgets.QComboBox()
self.comboBox.addItem("dec")
self.comboBox.addItem("hex")
self.comboBox.addItem("oct")
self.comboBox.addItem("bin")
self.LCDNumber = QtWidgets.QLCDNumber()
self.slider = QtWidgets.QSlider()
self.slider.setOrientation(Qt.Orientation.Horizontal)
layout1 = QtWidgets.QVBoxLayout()
layout2 = QtWidgets.QHBoxLayout()
layout3 = QtWidgets.QVBoxLayout()
layout1.addWidget(self.comboBox)
layout1.addWidget(self.LCDNumber)
layout2.addWidget(self.dial)
layout2.addLayout(layout1)
layout3.addLayout(layout2)
layout3.addWidget(self.slider)
self.setLayout(layout3)
def initSignal(self):
self.slider.valueChanged.connect(self.dial.setValue)
self.slider.valueChanged.connect(self.LCDNumber.display)
self.dial.valueChanged.connect(self.slider.setValue)
self.dial.valueChanged.connect(self.LCDNumber.display)
self.comboBox.currentTextChanged.connect(self.updateLcd)
def keyPressEvent(self, event: QtGui.QKeyEvent):
if event.key() == QtCore.Qt.Key_Plus:
self.dial.setValue(self.dial.value() + 1)
elif event.key() == QtCore.Qt.Key_Minus:
self.dial.setValue(self.dial.value() - 1)
else:
super().keyPressEvent(event)
def updateLcd(self):
if self.comboBox.currentText() == "dec":
self.LCDNumber.setDecMode()
elif self.comboBox.currentText() == "bin":
self.LCDNumber.setBinMode()
elif self.comboBox.currentText() == "oct":
self.LCDNumber.setOctMode()
elif self.comboBox.currentText() == "hex":
self.LCDNumber.setHexMode()
def loadData(self):
self.LCDNumber.display(self.settings.value("Value", ""))
self.comboBox.setCurrentText(self.settings.value("Text", ""))
def closeEvent(self, event: QtGui.QCloseEvent):
self.settings.setValue("Value", self.LCDNumber.intValue())
self.settings.setValue("Text", self.comboBox.currentText())
if __name__ == "__main__":
app = QtWidgets.QApplication()
window = Window()
window.show()
app.exec()
|
julsmi/DevPyQt
|
scripts/Практика 2/b_Самостоятельная + домашняя работа/d_eventfilter_settings.py
|
d_eventfilter_settings.py
|
py
| 3,953 |
python
|
ru
|
code
| null |
github-code
|
6
|
18211764194
|
import argparse
import os
import unittest
from bin.get_alleles_from_srst2_mlst import get_mismatch_and_depth, get_new_and_existing_alleles, write_alleles_file, get_arguments
class TestProcessResults(unittest.TestCase):
TEST_OUTPUT_PREFIX = 'test'
TEST_FILE = 'tests/test_data/input/test__mlst__Streptococcus_agalactiae_MLST_alleles__results.txt'
TEST_OUT1 = 'tests/test_data/output/test_mlst_alleles.txt'
TEST_OUT2 = 'tests/test_data/output/test_mlst_alleles2.txt'
def test_get_mismatch_and_depth(self):
actual = get_mismatch_and_depth(self.TEST_FILE)
self.assertEqual(actual, ('adhP_1/1snp', 173.614142857, 'ST-1'))
def test_get_new_and_existing_alleles(self):
actual = get_new_and_existing_alleles(('adhP_1/1snp', 173.614142857, 'ST-1'), 30, self.TEST_OUTPUT_PREFIX)
f = open(self.TEST_OUTPUT_PREFIX + '_new_mlst_alleles.txt', "r")
actual = "".join(f.readlines())
self.assertEqual(actual, """Alleles found\nadhP_1\n""")
os.remove(self.TEST_OUTPUT_PREFIX + '_new_mlst_alleles.txt')
def test_get_new_and_existing_alleles_low_depth(self):
actual = get_new_and_existing_alleles(('adhP_1/1snp', 29.99, 'ST-1'), 30, self.TEST_OUTPUT_PREFIX)
f = open(self.TEST_OUTPUT_PREFIX + '_new_mlst_alleles.txt', "r")
actual = "".join(f.readlines())
self.assertEqual(actual, """test: No new MLST alleles were found with sufficient read depth above 30.\n""")
os.remove(self.TEST_OUTPUT_PREFIX + '_new_mlst_alleles.txt')
def test_get_new_and_existing_alleles_multi_alleles(self):
actual = get_new_and_existing_alleles(('adhP_1/1snp;pheS_1/1snp', 173.614142857, 'ST-1'), 30, self.TEST_OUTPUT_PREFIX)
f = open(self.TEST_OUTPUT_PREFIX + '_new_mlst_alleles.txt', "r")
actual = "".join(f.readlines())
self.assertEqual(actual, """Alleles found\nadhP_1\npheS_1\n""")
os.remove(self.TEST_OUTPUT_PREFIX + '_new_mlst_alleles.txt')
def test_get_new_and_existing_alleles_no_mismatches(self):
actual = get_new_and_existing_alleles(('0', 173.614142857, 'ST-1'), 30, self.TEST_OUTPUT_PREFIX)
f = open(self.TEST_OUTPUT_PREFIX + '_existing_mlst_alleles.txt', "r")
actual = "".join(f.readlines())
self.assertEqual(actual, """ID\tST\ntest\tST-1\n""")
os.remove(self.TEST_OUTPUT_PREFIX + '_existing_mlst_alleles.txt')
def test_alleles_file(self):
write_alleles_file(['Alleles found', 'adhP_1', 'pheS_1'], self.TEST_OUT1)
f = open(self.TEST_OUT1, "r")
actual = "".join(f.readlines())
self.assertEqual(actual, """Alleles found\nadhP_1\npheS_1\n""")
def test_alleles_file_without_alleles(self):
write_alleles_file(['No new MLST alleles were found.'], self.TEST_OUT2)
f = open(self.TEST_OUT2, "r")
actual = "".join(f.readlines())
self.assertEqual(actual, """No new MLST alleles were found.\n""")
def test_arguments(self):
actual = get_arguments().parse_args(
['--mlst_results_file', 'mlst_file', '--min_read_depth', '30',
'--output_prefix', 'out'])
self.assertEqual(actual,
argparse.Namespace(mlst='mlst_file', min_depth=30, output='out'))
def test_arguments_short_options(self):
actual = get_arguments().parse_args(
['-m', 'mlst_file', '-d', '30', '-o', 'out'])
self.assertEqual(actual,
argparse.Namespace(mlst='mlst_file', min_depth=30, output='out'))
|
sanger-bentley-group/GBS-Typer-sanger-nf
|
tests/get_alleles_from_srst2_mlst_test.py
|
get_alleles_from_srst2_mlst_test.py
|
py
| 3,532 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72931175228
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
#Leio os datasets e retiro o que preciso
df= pd.read_csv('DATASET_MobileRobotNav.csv', sep=';')
#Mostra a descrição do dataset (Média, Conta o número de tuplas, min, máx, etc)
print(df.describe())
#print(df)
#Separa o modelo para fazer a
from sklearn.model_selection import train_test_split
#labels = np.array(df['Out_Vel_Linear(m/s)','Out_Vel_Angula(rad/s)'])
features= df.drop(columns=['Out_Vel_Linear(m/s)','Out_Vel_Angula(rad/s)'], axis = 1)
#print(features)
labels= df['Out_Vel_Linear(m/s)']
#y2= df['Out_Vel_Angula(rad/s)']
features_list = list(features.columns)
print(features_list)
features_train, features_test, labels_train, labels_test= train_test_split(features,labels,test_size= 0.3)
print('Training Features Shape:', features_train.shape)
print('Training Labels Shape:', labels_train.shape)
print('Testing Features Shape:', features_train.shape)
print('Testing Labels Shape:', labels_test.shape)
from sklearn.neural_network import MLPRegressor
mlp = MLPRegressor(hidden_layer_sizes=(8,8,8), activation='tanh', solver='adam', max_iter=500, alpha = 0.001)
mlp.fit(features_train,labels_train)
#Faz os testes separados anteriormente
predictions= mlp.predict(features_test)
print("a" ,predictions, "B:" ,labels_test)
#Calcula erros absolutos (mudar pra erro quadrático depois?)
errors = abs(predictions - labels_test)
print('Mean Absolute Error:', round(np.mean(errors), 2), 'degrees.')
#Calcula qtd de erros
mape = 100 * (errors / labels_test)
#Mostra a precisão
accuracy = 100 - np.mean(mape)
print('Accuracy:', round(accuracy, 2), '%.')
|
jpavargasf/ML_RandomForest_ANN
|
Pedro/neural_network.py
|
neural_network.py
|
py
| 1,716 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31824180951
|
import tarfile
def extract(tar_path, target_path):
try:
tar = tarfile.open(tar_path, "r:gz")
filenames = tar.getnames()
for filename in filenames:
tar.extract(filename, target_path)
tar.close()
except Exception as e:
print('extract error %s' %e)
extract('/tmp/tarball.tar.gz', '/tmp/x')
|
Jerry-Luo/geek-python
|
stdlib/UnCompressFile.py
|
UnCompressFile.py
|
py
| 351 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41550685644
|
import os, sys, serial, time, traceback
from . codes import CMDTYPE, LEDTYPE, SPIChipsets, BufferChipsets
from . devices import Devices
from . import io
from .. channel_order import ChannelOrder
from .. driver_base import DriverBase
from ... util import exception, log, util
from ... drivers.return_codes import (
RETURN_CODES, print_error, raise_error, BiblioSerialError)
class Serial(DriverBase):
"""Main driver for Serial based LED strips and devices like the AllPixel
Provides the same parameters of
:py:class:`bibliopixel.drivers.driver_base.DriverBase` as
well as those below:
:param ledtype: LED protocol type. One of
:py:func:`bibliopixel.drivers.ledtype.LEDTYPE`
:param str dev: Serial device address/path. If left empty, first device
found will be used.
:param int spi_speed: SPI datarate for applicable LED types, in MHz
:param int restart_timeout: Seconds to wait between reconfigure reboot
and reconnection attempt
:param int device_id: Device ID to connect to.
:param str hardwareID: A valid USB VID:PID pair such as "1D50:60AB"
:param int baudrate: Baud rate to connect to serial device
"""
def __init__(self, ledtype=None, num=0, dev='',
c_order='RGB', spi_speed=2,
gamma=None, restart_timeout=3,
device_id=None, hardwareID="1D50:60AB",
baudrate=921600, **kwds):
if ledtype is None:
raise ValueError('Must provide ledtype value!')
if num == 0:
raise ValueError('Must provide num value >0!')
super().__init__(num, c_order=c_order, gamma=gamma, **kwds)
self.devices = Devices(hardwareID, baudrate)
if not (1 <= spi_speed <= 24 and ledtype in SPIChipsets):
spi_speed = 1
self._spi_speed = spi_speed
self._com = None
self._ledtype = ledtype
self._bufPad = 0
self.dev = dev
self.device_version = 0
self.device_id = device_id
self._sync_packet = util.generate_header(CMDTYPE.SYNC, 0)
if self.device_id is not None and not (0 <= self.device_id <= 255):
raise ValueError("device_id must be between 0 and 255")
resp = self._connect()
if resp == RETURN_CODES.REBOOT: # reboot needed
log.info(REBOOT_MESSAGE)
self._close()
time.sleep(restart_timeout)
resp = self._connect()
if resp != RETURN_CODES.SUCCESS:
raise_error(resp)
else:
log.info("Reconfigure success!")
elif resp != RETURN_CODES.SUCCESS:
raise_error(resp)
if type in SPIChipsets:
log.info("Using SPI Speed: %sMHz", self._spi_speed)
def cleanup(self):
if self._com:
log.info("Closing connection to: %s", self.dev)
exception.report(self._close)
self._com = None
def _connect(self):
try:
if not self.dev:
self.devices.find_serial_devices()
idv = self.devices.get_device(self.device_id)
self.device_id, self.dev, self.device_version = idv
try:
self._com = serial.Serial(
self.dev, baudrate=self.devices.baudrate, timeout=5)
except serial.SerialException:
ports = self.devices.devices.values()
error = "Invalid port specified. No COM ports available."
if ports:
error = ("Invalid port specified. Try using one of: \n" +
"\n".join(ports))
log.info(error)
raise BiblioSerialError(error)
packet = util.generate_header(CMDTYPE.SETUP_DATA, 4)
packet.append(self._ledtype) # set strip type
byteCount = self.bufByteCount()
if self._ledtype in BufferChipsets:
if self._ledtype == LEDTYPE.APA102 and self.device_version >= 2:
pass
else:
self._bufPad = BufferChipsets[
self._ledtype](self.numLEDs) * 3
byteCount += self._bufPad
packet.append(byteCount & 0xFF) # set 1st byte of byteCount
packet.append(byteCount >> 8) # set 2nd byte of byteCount
packet.append(self._spi_speed)
self._write(packet)
code = self._read()
if code is None:
self.devices.error()
return code
except serial.SerialException as e:
error = ("Unable to connect to the device. Please check that "
"it is connected and the correct port is selected.")
log.error(traceback.format_exc())
log.error(error)
raise e
def set_device_brightness(self, brightness):
packet = util.generate_header(CMDTYPE.BRIGHTNESS, 1)
packet.append(self._brightness)
self._write(packet)
code = self._read()
if code == RETURN_CODES.SUCCESS:
return True
print_error(code)
def _send_packet(self):
if not self._com:
return
self._write(self._packet)
code = self._read()
if code is None:
self.devices.error(fail=False)
elif code != RETURN_CODES.SUCCESS:
print_error(code)
else:
self._flushInput()
return True
def _compute_packet(self):
count = self.bufByteCount() + self._bufPad
self._packet = util.generate_header(CMDTYPE.PIXEL_DATA, count)
self._render()
self._packet.extend(self._buf)
self._packet.extend([0] * self._bufPad)
def _send_sync(self):
self._write(self._sync_packet)
def _read(self):
return io.read_byte(self._com)
def _close(self):
try:
return self._com and self._com.close()
except Exception:
log.exception('Serial exception in close')
finally:
self._com = None
def _write(self, packet):
try:
return self._com and self._com.write(packet)
except Exception:
log.exception('Serial exception in write')
def _flushInput(self):
try:
return self._com and self._com.flushInput()
except Exception:
log.exception('Serial exception in flushInput')
class TeensySmartMatrix(Serial):
"""Variant of :py:class:`Serial` for use with the Teensy and
SmartMatrix library. The following provides compatible firmware:
https://github.com/ManiacalLabs/BiblioPixelSmartMatrix
All parameters are the same as with :py:class:`Serial`, except the
default hardwareID is changed to match the Teensy.
The main difference is that SmartMatrix requires a sync command to keep
multiple instances of this driver running smoothly.
"""
def __init__(self, width, height, dev="", device_id=None,
hardwareID="16C0:0483", **kwds):
super().__init__(ledtype=LEDTYPE.GENERIC, num=width * height,
device_id=device_id, hardwareID=hardwareID, **kwds)
self.sync = self._send_sync
REBOOT_MESSAGE = """Reconfigure and reboot needed!
Waiting for controller to restart..."""
from ... util import deprecated
if deprecated.allowed(): # pragma: no cover
DriverSerial = Serial
DriverTeensySmartMatrix = TeensySmartMatrix
|
ManiacalLabs/BiblioPixel
|
bibliopixel/drivers/serial/driver.py
|
driver.py
|
py
| 7,526 |
python
|
en
|
code
| 263 |
github-code
|
6
|
70292952188
|
# define the function
def hi(lang):
# function body
if lang=="en":
print("Hello!!!")
elif lang=="ro":
print("Salut!!!")
elif lang=="ru":
print("Привет!!!")
else:
print(lang,":SORRY, WE DON'T KNOW THIS LANGUAGE")
def bye(lang):
if lang=="en":
print("Good bye!!!")
elif lang=="ro":
print("O zi buna!!!")
elif lang=="ru":
print("Досвидания!!!")
else:
print(lang,":THIS LANGUAGE IS UNKNOWN FOR US. WE ARE SORRY!!!")
# call the function
hi("ru")
hi("ro")
hi("en")
hi("fr")
bye("ru")
bye("ro")
bye("en")
bye("fr")
|
Axelum-tech/functions
|
function_ex1.py
|
function_ex1.py
|
py
| 670 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29358752973
|
from math import sqrt
from Qt import QtCore, QtGui, QtWidgets, Qt
from ts2 import utils
from ts2.routing import position
from ts2.scenery import lineitem, enditem
from ts2.scenery.signals import signalaspect, signalitem
translate = QtWidgets.qApp.translate
class TrainStatus(QtCore.QObject):
"""Holds the enum describing the status of a
:class:`~ts2.trains.train.Train`"""
INACTIVE = 0
"""Not yet entered on the scene"""
RUNNING = 10
"""Running with a positive speed"""
STOPPED = 20
"""Scheduled stop, e.g. at a station"""
WAITING = 30
"""Unscheduled stop, e.g. at a red signal"""
OUT = 40
"""Exited the area"""
END_OF_SERVICE = 50
"""Ended its service and no new service assigned"""
@classmethod
def text(cls, status):
"""
:return: Text corresponding to each status to display in the application
:rtype: str
"""
if status == cls.INACTIVE:
return translate("TrainStatus", "Inactive")
elif status == cls.RUNNING:
return translate("TrainStatus", "Running")
elif status == cls.STOPPED:
return translate("TrainStatus", "Stopped at station")
elif status == cls.WAITING:
return translate("TrainStatus", "Waiting at red signal")
elif status == cls.OUT:
return translate("TrainStatus", "Exited the area")
elif status == cls.END_OF_SERVICE:
return translate("TrainStatus", "End of service")
else:
return ""
class TrainListModel(QtCore.QAbstractTableModel):
"""Model for displaying trains as a list during the game.
"""
def __init__(self, simulation):
"""Constructor for the TrainListModel class"""
super().__init__()
self.simulation = simulation
def rowCount(self, parent=QtCore.QModelIndex(), *args):
"""Returns the number of rows of the model, corresponding to the
number of trains of the simulation"""
return len(self.simulation.trains)
def columnCount(self, parent=QtCore.QModelIndex(), *args):
"""Returns the number of columns of the model
@TODO ?? wtf
"""
return 8
def data(self, index, role=Qt.DisplayRole):
"""Returns the data at the given index"""
train = self.simulation.trains[index.row()]
if train.nextPlaceIndex is not None:
line = train.currentService.lines[train.nextPlaceIndex]
else:
line = None
if role == Qt.DisplayRole:
if index.column() == 0:
return train.serviceCode
elif index.column() == 1:
return TrainStatus.text(train.status)
elif index.column() == 2 and train.currentService:
return train.currentService.entryPlaceName
elif index.column() == 3 and train.currentService:
return train.currentService.exitPlaceName
elif index.column() == 4 and line is not None:
return line.place.placeName
elif index.column() == 5 and line is not None:
return line.trackCode
elif index.column() == 6 and line is not None:
if line.mustStop:
return line.scheduledArrivalTime.toString("hh:mm:ss")
else:
return self.tr("Non-stop")
elif index.column() == 7 and line is not None:
return line.scheduledDepartureTime.toString("hh:mm:ss")
else:
return ""
elif role == Qt.ForegroundRole:
if train.status == TrainStatus.RUNNING:
return QtGui.QBrush(Qt.darkGreen)
elif train.status == TrainStatus.STOPPED:
return QtGui.QBrush(Qt.darkBlue)
elif train.status == TrainStatus.WAITING:
return QtGui.QBrush(Qt.red)
else:
return QtGui.QBrush(Qt.darkGray)
return None
def headerData(self, column, orientation, role=Qt.DisplayRole):
"""Returns the column headers to display"""
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
if column == 0:
return self.tr("Code")
elif column == 1:
return self.tr("Status")
elif column == 2:
return self.tr("Entry Point")
elif column == 3:
return self.tr("Exit Point")
elif column == 4:
return self.tr("Next place")
elif column == 5:
return self.tr("Track")
elif column == 6:
return self.tr("Arrival time")
elif column == 7:
return self.tr("Departure time")
else:
return ""
return None
def flags(self, index):
"""Returns the flags of the model"""
return Qt.ItemIsSelectable | Qt.ItemIsEnabled
@QtCore.pyqtSlot(str)
def update(self, trainId):
"""Emits the dataChanged signal for the train defined by trainId."""
row = int(trainId)
self.dataChanged.emit(self.index(row, 0), self.index(row, 7))
class TrainsModel(QtCore.QAbstractTableModel):
"""Model for displaying trains as a list in the editor
"""
def __init__(self, editor):
"""Constructor for the TrainsModel class"""
super().__init__()
self._editor = editor
def rowCount(self, parent=None, *args):
"""Returns the number of rows of the model, corresponding to the
number of trains of the editor"""
return len(self._editor.trains)
def columnCount(self, parent=None, *args):
"""Returns the number of columns of the model"""
return 7
def data(self, index, role=Qt.DisplayRole):
"""Returns the data at the given index"""
if role == Qt.DisplayRole or role == Qt.EditRole:
train = self._editor.trains[index.row()]
if index.column() == 0:
return index.row()
elif index.column() == 1:
return train.serviceCode
elif index.column() == 2:
return train.trainTypeCode
elif index.column() == 3:
return train.appearTimeStr
elif index.column() == 4:
return train.trainHeadStr
elif index.column() == 5:
return train.initialSpeed
elif index.column() == 6:
return train.initialDelayStr
else:
return ""
return None
def headerData(self, column, orientation, role=Qt.DisplayRole):
"""Returns the column headers to display"""
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
if column == 0:
return self.tr("id")
elif column == 1:
return self.tr("Service code")
elif column == 2:
return self.tr("Train type")
elif column == 3:
return self.tr("Entry time")
elif column == 4:
return self.tr("Entry position")
elif column == 5:
return self.tr("Entry speed")
elif column == 6:
return self.tr("Initial Delay")
else:
return ""
return None
def setData(self, index, value, role=None):
"""Updates data when modified in the view"""
if role == Qt.EditRole:
if index.column() == 1:
self._editor.trains[index.row()].serviceCode = value
elif index.column() == 2:
self._editor.trains[index.row()].trainTypeCode = value
elif index.column() == 3:
self._editor.trains[index.row()].appearTimeStr = value
elif index.column() == 4:
self._editor.trains[index.row()].trainHeadStr = value
elif index.column() == 5:
self._editor.trains[index.row()].initialSpeed = value
elif index.column() == 6:
self._editor.trains[index.row()].initialDelayStr = value
else:
return False
self.dataChanged.emit(index, index)
return True
return False
def flags(self, index):
"""Returns the flags of the model"""
flags = Qt.ItemIsSelectable | Qt.ItemIsEnabled
if index.column() != 0:
flags |= Qt.ItemIsEditable
return flags
@property
def simulation(self):
"""Returns the simulation this model is attached to."""
return self._editor
class TrainInfoModel(QtCore.QAbstractTableModel):
"""Model for displaying a single service information in a view
"""
def __init__(self, simulation):
"""Constructor for the TrainInfoModel class"""
super().__init__()
self.simulation = simulation
self._train = None
def rowCount(self, parent=None, *args, **kwargs):
"""Returns the number of rows in the model"""
if self._train is not None:
return 12
else:
return 0
def columnCount(self, parent=None, *args, **kwargs):
"""Returns the number of columns of the model"""
if self._train is not None:
return 2
else:
return 0
def data(self, index, role=Qt.DisplayRole):
"""Returns the data at the given index"""
if self._train is not None:
nextPlaceIndex = self._train.nextPlaceIndex
if nextPlaceIndex is not None:
line = self._train.currentService.lines[nextPlaceIndex]
else:
line = None
if role == Qt.DisplayRole:
if index.column() == 0:
if index.row() == 0:
return self.tr("Service Code:")
elif index.row() == 1:
return self.tr("Status")
elif index.row() == 2:
return self.tr("Speed:")
elif index.row() == 3:
return self.tr("Train Type:")
elif index.row() == 4:
return ""
elif index.row() == 5:
return self.tr("Entry point:")
elif index.row() == 6:
return self.tr("Exit point:")
elif index.row() == 7:
return ""
elif index.row() == 8:
return self.tr("Next:")
elif index.row() == 9:
return self.tr("Track:")
elif index.row() == 10:
return self.tr("Arrival time:")
elif index.row() == 11:
return self.tr("Departure time:")
elif index.column() == 1:
if index.row() == 0:
return self._train.serviceCode
elif index.row() == 1:
return TrainStatus.text(self._train.status)
elif index.row() == 2:
return self.tr("%3.0d km/h") % \
(float(self._train.speed) * 3.6)
elif index.row() == 3:
return self._train.trainType.description
elif index.row() == 4:
return ""
elif index.row() == 5:
return self._train.currentService.entryPlaceName
elif index.row() == 6:
return self._train.currentService.exitPlaceName
elif index.row() == 7:
return ""
elif index.row() == 8 and line is not None:
return line.place.placeName
elif index.row() == 9 and line is not None:
return line.trackCode
elif index.row() == 10 and line is not None:
if line.mustStop:
return \
line.scheduledArrivalTime.toString("hh:mm:ss")
else:
return self.tr("Non-stop")
elif index.row() == 11 and line is not None:
return \
line.scheduledDepartureTime.toString("hh:mm:ss")
else:
return ""
elif role == Qt.ForegroundRole:
if index.row() == 1 and index.column() == 1:
if self._train.status == TrainStatus.RUNNING:
return QtGui.QBrush(Qt.darkGreen)
elif self._train.status == TrainStatus.STOPPED:
return QtGui.QBrush(Qt.darkBlue)
elif self._train.status == TrainStatus.WAITING:
return QtGui.QBrush(Qt.red)
else:
return QtGui.QBrush(Qt.darkGray)
return QtGui.QBrush()
return None
def headerData(self, column, orientation, role=Qt.DisplayRole):
"""Returns the headers for this model"""
if self._train is not None \
and orientation == Qt.Horizontal \
and role == Qt.DisplayRole:
if column == 0:
return self.tr("Key")
elif column == 1:
return self.tr("Value")
else:
return ""
return None
def flags(self, index):
"""Returns the flags of the model"""
return Qt.ItemIsEnabled
@property
def train(self):
"""Returns the train instance associated with this model"""
return self._train
@QtCore.pyqtSlot(str)
def setTrainByTrainId(self, trainId):
"""Sets the train instance associated with this model from its
trainId"""
self.beginResetModel()
self._train = self.simulation.trains[int(trainId)]
self.endResetModel()
@QtCore.pyqtSlot()
def update(self):
"""Emits the dataChanged signal for the lines that may change."""
self.dataChanged.emit(self.index(1, 1), self.index(2, 1))
self.dataChanged.emit(self.index(8, 1), self.index(11, 1))
@QtCore.pyqtSlot()
def updateSpeed(self):
"""Emits the dataChanged signal for the speed only."""
self.dataChanged.emit(self.index(2, 1), self.index(2, 1))
class Train(QtCore.QObject):
"""A ``Train`` is a stock running on a track at a certain speed and to which
is assigned a :class:`~ts2.trains.service.Service` .
"""
def __init__(self, parameters):
"""
:param dict paramaters:
"""
super().__init__()
self._parameters = parameters
self.simulation = None
self._serviceCode = parameters["serviceCode"]
self._trainType = None
self._speed = parameters.get('speed')
self._initialSpeed = parameters.get("initialSpeed", 0.0)
self._accel = 0
self._trainHead = parameters["trainHead"]
self._status = parameters.get("status", TrainStatus.INACTIVE)
self._lastSignal = None
self._signalActions = [(0, 999)]
self._applicableActionIndex = 0
self._actionTime = 0
self._nextPlaceIndex = None
self._stoppedTime = 0
if "stoppedTime" in parameters:
self._stoppedTime = parameters["stoppedTime"]
self._minimumStopTime = 0
self._initialDelayProba = \
utils.DurationProba(parameters["initialDelay"])
self._initialDelay = 0
self._appearTime = QtCore.QTime.fromString(parameters["appearTime"])
self._shunting = False
# FIXME Throw back all these actions to MainWindow
self.assignAction = QtWidgets.QAction(self.tr("Reassign service..."),
self)
self.assignAction.triggered.connect(self.reassignService)
self.resetServiceAction = QtWidgets.QAction(self.tr("Reset service"),
self)
self.resetServiceAction.triggered.connect(self.resetService)
self.reverseAction = QtWidgets.QAction(self.tr("Reverse"), self)
self.reverseAction.triggered.connect(self.reverse)
self.splitAction = QtWidgets.QAction(self.tr("Split train"), self)
self.splitAction.triggered.connect(self.splitTrainPopUp)
self.proceedAction = QtWidgets.QAction(self.tr("Proceed with caution"), self)
self.proceedAction.triggered.connect(self.proceedWithCaution)
def initialize(self, simulation):
"""Initialize the train once everything else is loaded.
:param simulation: The simulation on which to initialize this Train
:type simulation: simulation.Simulation"""
if not self._parameters:
raise Exception("Internal error: Train already initialized !")
params = self._parameters
self.simulation = simulation
self._trainType = simulation.trainTypes[params["trainTypeCode"]]
self.trainHead.initialize(simulation)
if self.simulation.context == utils.Context.GAME:
self.trainStatusChanged.connect(simulation.trainStatusChanged)
self.reassignServiceRequested.connect(
simulation.simulationWindow.openReassignServiceWindow
)
self.splitTrainRequested.connect(
simulation.simulationWindow.openSplitTrainWindow
)
self._parameters = None
def updateData(self, msg):
self.nextPlaceIndex = msg["nextPlaceIndex"]
self.serviceCode = msg["serviceCode"]
self._speed = msg["speed"]
self._status = msg["status"]
self._trainType = self.simulation.trainTypes[msg["trainTypeCode"]]
self._trainHead = position.Position(parameters=msg["trainHead"])
self._trainHead.initialize(self.simulation)
self.trainStatusChanged.emit(self.trainId)
def for_json(self):
"""Dumps this train to JSON."""
if self.simulation.context != utils.Context.GAME or \
self.status == TrainStatus.INACTIVE:
speed = self.initialSpeed
appearTime = self.appearTimeStr
initialDelay = self.initialDelay
elif self.status == TrainStatus.OUT:
speed = 0
appearTime = "00:00:00"
initialDelay = 0
else:
speed = self.speed
appearTime = self.simulation.currentTime.toString("hh:mm:ss")
initialDelay = 0
return {
"__type__": "Train",
"trainId": self.trainId,
"serviceCode": self.serviceCode,
"trainTypeCode": self.trainTypeCode,
"status": self.status,
"speed": speed,
"initialSpeed": self.initialSpeed,
"trainHead": self.trainHead,
"appearTime": appearTime,
"initialDelay": initialDelay,
"nextPlaceIndex": self.nextPlaceIndex,
"stoppedTime": self.stoppedTime
}
trainStoppedAtStation = QtCore.pyqtSignal(str)
trainDepartedFromStation = QtCore.pyqtSignal(str)
trainStatusChanged = QtCore.pyqtSignal(str)
trainExitedArea = QtCore.pyqtSignal(str)
reassignServiceRequested = QtCore.pyqtSignal(str)
splitTrainRequested = QtCore.pyqtSignal(str)
# ## Properties ######################################################
@property
def trainId(self):
"""Returns the train Id which is index of this train inside the train
list of the simulation."""
try:
trainId = str(self.simulation.trains.index(self))
except ValueError:
trainId = ""
return trainId
@property
def initialDelay(self):
"""
:return: the number of seconds of delay that this train had when it
was activated.
:rtype: int
"""
return self._initialDelay
@property
def minimumStopTime(self):
"""
:return: the minimum stopping time for next station
:rtype: int
"""
return self._minimumStopTime
@property
def stoppedTime(self):
"""
:return: the number of seconds that this train is stopped at then
current station.
:rtype: int
"""
return self._stoppedTime
@property
def serviceCode(self):
"""
:return: the service code of this train
:rtype: str
"""
return self._serviceCode
@serviceCode.setter
def serviceCode(self, serviceCode):
"""Changes the train current service code to serviceCode"""
if serviceCode not in self.simulation.services:
raise Exception(self.tr("No service with code %s") % serviceCode)
self._serviceCode = serviceCode
@property
def status(self):
"""
:return: the status of the train
:rtype: :class:`~ts2.trains.train.TrainStatus`
"""
return self._status
@property
def currentService(self):
"""Returns the Service object assigned to this train"""
if self._serviceCode is not None:
return self.simulation.service(self._serviceCode)
@property
def nextPlaceIndex(self):
"""Returns the index of the next place, that is the index of the
ServiceLine of the current service pointing to the next place the
train is scheduled to.
:rtype : int"""
return self._nextPlaceIndex
@nextPlaceIndex.setter
def nextPlaceIndex(self, index):
"""Setter function for the nextPlaceIndex property."""
if index is None or \
index < 0 or \
index >= len(self.currentService.lines):
self._nextPlaceIndex = None
else:
self._nextPlaceIndex = index
@property
def trainType(self):
"""
:return: The TrainType of this Train
:rtype: :class:`~ts2.trains.traintype.TrainType`
"""
return self._trainType
@property
def trainTypeCode(self):
"""Returns the code of the train type"""
"""
:return: The code of this trains type
:rtype: str
"""
return self._trainType.code
@trainTypeCode.setter
def trainTypeCode(self, value):
"""Setter function for the trainTypeCode property"""
if self.simulation.context == utils.Context.EDITOR_TRAINS:
try:
self._trainType = self.simulation.trainTypes[value]
except KeyError:
pass
@property
def speed(self):
"""Returns the current speed of the Train."""
return self._speed
@property
def signalActions(self):
"""Returns the list of actions asked by the last seen signal. List of
(target, speed) tuples."""
return self._signalActions
@property
def applicableActionIndex(self):
"""Returns the applicable action in the action list."""
return self._applicableActionIndex
@property
def actionTime(self):
"""
:return: the time at which the current action has been achieved or 0.
:rtype: ``QTime`` or 0
"""
return self._actionTime
@property
def initialSpeed(self):
"""Returns the initial speed of the train, i.e. the speed it has when
it appears on the scene"""
return self._initialSpeed
@initialSpeed.setter
def initialSpeed(self, value):
"""Setter function for the initialSpeed property"""
if self.simulation.context == utils.Context.EDITOR_TRAINS:
if value is None or value == "":
value = "0.0"
self._initialSpeed = float(value)
@property
def trainHead(self):
"""
:return: the Position of the head of this train.
:rtype: :class:`~ts2.routing.position.Position`
"""
return self._trainHead
@trainHead.setter
def trainHead(self, value):
"""Setter function for the trainHead property"""
if self.simulation.context == utils.Context.EDITOR_TRAINS:
self._trainHead = value
def _getTrainHeadStr(self):
"""
:return: the Position of the head of this train.
:rtype: `str`
"""
return str(self._trainHead)
def _setTrainHeadStr(self, value):
"""Setter function for the trainHeadStr property."""
if self.simulation.context == utils.Context.EDITOR_TRAINS:
values = eval(value.strip('()'))
if len(values) != 3:
return
tiId, ptiId, posOnTI = eval(value.strip('()'))
trackItem = self.simulation.trackItem(str(tiId))
previousTI = self.simulation.trackItem(str(ptiId))
self.trainHead = position.Position(trackItem, previousTI, posOnTI)
trainHeadStr = property(_getTrainHeadStr, _setTrainHeadStr)
@property
def lastSignal(self):
"""Returns the last signal that the driver has seen, which may be the
one just in front."""
return self._lastSignal
@property
def initialDelayStr(self):
"""Returns the initialDelay probability function as a string."""
return str(self._initialDelayProba)
@initialDelayStr.setter
def initialDelayStr(self, value):
"""Setter function for the initialDelayStr property."""
if self.simulation.context == utils.Context.EDITOR_TRAINS:
self._initialDelayProba = utils.DurationProba(value)
@property
def appearTimeStr(self):
"""Returns the time at which this train appears on the scene as a
String."""
return self._appearTime.toString("HH:mm:ss")
@appearTimeStr.setter
def appearTimeStr(self, value):
"""Setter function for the appearTime property"""
if self.simulation.context == utils.Context.EDITOR_TRAINS:
self._appearTime = QtCore.QTime.fromString(value)
@property
def shunting(self):
"""
:return: True if the train is shunting False otherwise
"""
return self._shunting
# ## Methods ########################################################
def isOut(self):
"""
:return: True if the train exited the area
:rtype: bool
"""
return \
self._trainHead.isOut() and \
self._trainHead.positionOnTI() > self._trainType.length()
def isActive(self):
"""
:return: ``True`` if the train is in the area and its current service
is not finished
:rtype: bool"""
return \
self._status != TrainStatus.INACTIVE and \
self._status != TrainStatus.OUT and \
self._status != TrainStatus.END_OF_SERVICE
def isOnScenery(self):
"""
:return: True if the train is on the scenery
:rtype: bool
"""
return \
self._status != TrainStatus.INACTIVE and \
self._status != TrainStatus.OUT
def showTrainActionsMenu(self, widget, pos):
"""Pops-up the train actions menu on the given QWidget"""
contextMenu = QtWidgets.QMenu(widget)
contextMenu.addAction(self.proceedAction)
contextMenu.addAction(self.reverseAction)
contextMenu.addAction(self.assignAction)
contextMenu.addAction(self.resetServiceAction)
# contextMenu.addAction(self.splitAction)
contextMenu.exec_(pos)
@QtCore.pyqtSlot()
def reverse(self):
"""Reverses the train direction."""
self.simulation.simulationWindow.webSocket.sendRequest("train", "reverse", {'id': int(self.trainId)})
@QtCore.pyqtSlot()
def reassignService(self):
""" Pops up a dialog for the user to choose the new service and
reassign it to this train, if the service is not already assigned
to another train"""
self.reassignServiceRequested.emit(self.trainId)
@QtCore.pyqtSlot()
def resetService(self):
"""Resets the service, i.e. sets the pointer to the first station."""
if QtWidgets.QMessageBox.question(
self.simulation.simulationWindow,
self.tr("Reset a service"),
self.tr("Are you sure you really "
"want to reset service %s?"
% self.serviceCode),
QtWidgets.QMessageBox.Ok | QtWidgets.QMessageBox.Cancel
) == QtWidgets.QMessageBox.Ok:
self.simulation.simulationWindow.webSocket.sendRequest("train", "resetService", {'id': int(self.trainId)})
@QtCore.pyqtSlot()
def proceedWithCaution(self):
self.simulation.simulationWindow.webSocket.sendRequest("train", "proceed", {'id': int(self.trainId)})
@QtCore.pyqtSlot()
def splitTrainPopUp(self):
"""Pops up a dialog for the user to choose where to split the train and
then split it."""
reason = None
if len(self.trainType.elements) < 2:
reason = self.tr("This train cannot be split")
if self.speed != 0:
reason = self.tr("This train is not stopped")
if reason:
QtWidgets.QMessageBox.warning(
self.simulation.simulationWindow,
self.tr("Unable to split train"),
reason,
QtWidgets.QMessageBox.Ok
)
else:
self.splitTrainRequested.emit(self.trainId)
def splitTrain(self, splitIndex):
"""Splits this train at the given index.
:param splitIndex: The index at which to split the train. 1 is between
the first and second element, 2 between the second and third, etc.
counting from the train head."""
headElements = self.trainType.elements[:splitIndex]
tailElements = self.trainType.elements[splitIndex:]
headTrainType = None
tailTrainType = None
for trainType in self.simulation.trainTypes.values():
if headElements == trainType.elements or \
(len(headElements) == 1 and
headElements[0] == trainType):
headTrainType = trainType
if tailElements == trainType.elements or \
(len(tailElements) == 1 and
tailElements[0] == trainType):
tailTrainType = trainType
if headTrainType and tailTrainType:
break
# Check if there exists a new train type for the head and tail trains
if not headTrainType or not tailTrainType:
QtWidgets.QMessageBox.warning(
self.simulation.simulationWindow,
self.tr("Unable to split train"),
self.tr("This train cannot be split"),
QtWidgets.QMessageBox.Ok
)
return
# Change our own train type to the head type
self._trainType = headTrainType
# Create a new train for the tail
parameters = {
"__type__": "Train",
"serviceCode": None,
"trainTypeCode": tailTrainType.code,
"status": TrainStatus.INACTIVE,
"speed": 0.0,
"initialSpeed": 0.0,
"trainHead": self.trainHead - headTrainType.length - 1.0,
"appearTime": self.simulation.currentTime.toString(),
"initialDelay": 0,
"nextPlaceIndex": None,
"stoppedTime": 1.0
}
newTrain = Train(parameters)
self.simulation.addTrain(newTrain)
newTrain.initialize(self.simulation)
newTrain.reassignService()
|
ts2/ts2
|
ts2/trains/train.py
|
train.py
|
py
| 31,937 |
python
|
en
|
code
| 43 |
github-code
|
6
|
36414916188
|
# Solution1
class Solution:
def isRobotBounded(self, instructions: str) -> bool:
directions = [1, 1, -1, -1]
cur_direction = 0
cur_position = [0, 0]
for instruction in instructions:
if instruction == "G":
cur_position[(cur_direction % 2 + 1)%2] += directions[cur_direction]
elif instruction == "L":
cur_direction -= 1
if cur_direction < 0:
cur_direction = 3
elif instruction == "R":
cur_direction = (cur_direction + 1) % 4
cur_position = tuple(cur_position)
return cur_position == (0, 0) or cur_direction != 0
# Solution2
class Solution:
def isRobotBounded(self, instructions: str) -> bool:
horizontal_dir = 0
vertical_dir = 1
cur_position = [0, 0]
for instruction in instructions:
if instruction == "G":
cur_position[0] += horizontal_dir
cur_position[1] += vertical_dir
elif instruction == "L":
horizontal_dir, vertical_dir = -1*vertical_dir, horizontal_dir
else:
horizontal_dir, vertical_dir = vertical_dir, -1*horizontal_dir
cur_position = tuple(cur_position)
return cur_position == (0, 0) or (horizontal_dir, vertical_dir) != (0, 1)
|
eyosiasbitsu/Competitive-programming-A2SV
|
A2SV - Squid Game/Robot Bounded In Circle/robot-bounded-in-circle.py
|
robot-bounded-in-circle.py
|
py
| 1,458 |
python
|
en
|
code
| 3 |
github-code
|
6
|
31886163933
|
import random
class ValueHighError(Exception):
def __init__(self, msg, value):
self.msg = msg
self.value = value
class ValueLowError(Exception):
def __init__(self, msg, value):
self.msg = msg
self.value = value
def test_input_value(value: str, desired_range: tuple):
try:
int(value)
except ValueError as e:
raise Exception(random_error_msg(value)) from e
if int(value) < desired_range[0]:
raise ValueLowError(
f'Error: {value} is not in presented options! Should be between {desired_range[0]} and {desired_range[1]}',
value)
if int(value) > desired_range[1]:
raise ValueLowError(
f'Error: {value} is not in presented options! The number should be between {desired_range[0]} and {desired_range[1]}',
value)
def random_error_msg(value):
msg = [
f"Error: This '{value}' should be number not a string!",
f"Error: Why you wouldn't understand '{value}' should be number not a string!",
f"Error: Dude are you fucking stupid? Why you wouldn't understand '{value}' should be number not a string!",
]
return random.choice(msg)
|
NikolaVasilev/WeaponFactoryOOP
|
validations.py
|
validations.py
|
py
| 1,199 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18100446664
|
"""
https://leetcode.com/problems/jump-game-iv/
1345. Jump Game IV
Given an array of integers arr, you are initially positioned at the first index of the array.
In one step you can jump from index i to index:
i + 1 where: i + 1 < arr.length.
i - 1 where: i - 1 >= 0.
j where: arr[i] == arr[j] and i != j.
Return the minimum number of steps to reach the last index of the array.
Notice that you can not jump outside of the array at any time.
"""
from typing import List, Deque, DefaultDict, Tuple
from collections import deque, defaultdict
from unittest import TestCase, main
class Solution:
def minJumps(self, arr: List[int]) -> int:
# Edge case
if len(arr) == 1:
return 0
steps = 0
visited = [True] + [False] * (len(arr) - 1)
q: Deque[int] = deque()
q.append(0)
hm: DefaultDict[int, List[int]] = defaultdict(list)
# Create hash map
for i in range(len(arr)):
hm[arr[i]].append(i)
while q:
for _ in range(len(q)):
i = q.popleft()
# If i is the last index, return steps
if i == len(arr) - 1:
return steps
# Search i - 1
if 0 <= i - 1 and not visited[i - 1]:
visited[i - 1] = True
q.append(i - 1)
# Search i + 1
if i + 1 < len(arr) and not visited[i + 1]:
visited[i + 1] = True
q.append(i + 1)
# Search hm[arr[i]]
while hm[arr[i]]:
j = hm[arr[i]].pop()
if not visited[j]:
visited[j] = True
q.append(j)
steps += 1
raise ValueError("Could't find the answer")
class Test(TestCase):
cases: List[Tuple[List[int], int]] = [
([7, 7, 2, 1, 7, 7, 7, 3, 4, 1], 3),
]
def test_solution(self):
solution = Solution()
for arr, expected in self.cases:
self.assertEqual(solution.minJumps(arr), expected)
if __name__ == "__main__":
main()
|
hirotake111/leetcode_diary
|
leetcode/1345/solution.py
|
solution.py
|
py
| 2,162 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74180016507
|
import json
poptab = [ x.split(', ') for x in open('country-list.csv').read().split('\n')]
captab = [ x.split(', ') for x in open('country-capitals.csv').read().split('\n')]
flgtab = [ x.split(', ') for x in open('country-flags.csv').read().split('\n')]
curtab = [ x.split(', ') for x in open('country-currency.csv').read().split('\n')]
popmap = dict([[x[1], { 'population': x[2], 'area': x[3], 'density': x[4]}] for x in poptab if len(x) > 1])
capmap = dict([[x[0], x[1:]] for x in captab if len(x) > 1])
curmap = dict([[x[0], { 'currency': x[1], 'code': x[2]}] for x in curtab if len(x) > 1])
flgmap = dict([[x[1], x[0]] for x in flgtab if len(x) > 1])
countries = [x[1] for x in poptab if len(x) > 1]
res = [{
'serial': i+1,
'name': countries[i],
'capitals': capmap[countries[i]],
'currency': curmap[countries[i]]['currency'],
'currency_code': curmap[countries[i]]['code'],
'population': popmap[countries[i]]['population'],
'area': popmap[countries[i]]['area'],
'population_density': popmap[countries[i]]['density'],
'flag': flgmap[countries[i]]
} for i in range(len(countries))]
print(json.dumps(res, indent=4))
|
GnsP/countries
|
assemble.py
|
assemble.py
|
py
| 1,156 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34570396569
|
from django import forms
from .models import Post, Comment, Category2
from treebeard.forms import movenodeform_factory, MoveNodeForm
# from markdownx.fields import MarkdownxFormField
class CategoryForm(MoveNodeForm):
class Meta:
model = Category2
exclude = ('sib_order', 'parent')
class PostCreateUpdateForm(forms.ModelForm):
class Meta:
model = Post
fields = ('body','title', 'category2', 'is_published' )
labels = {
'category2': 'دسته بندی',
'is_published': ' پست منتشر شود؟',
}
widgets = {
'body': forms.Textarea(attrs={'id':'markdown_input' , 'class':'form-control shadow-lg'}),
'category2': forms.Select(attrs={'class':'form-control shadow-lg'}),
'title': forms.TextInput(attrs={'class':'form-control shadow-lg'}),
# 'is_published': forms.BooleanField(),
# 'title': forms.TextInput(attrs={'class':'form-control shadow-lg'}),
}
class CommentCreateForm(forms.ModelForm):
class Meta:
model = Comment
fields = ('body',)
widgets = {
'body': forms.Textarea(attrs={'class':'form-control'})
}
class CommentReplyForm(forms.ModelForm):
class Meta:
model = Comment
fields = ('body',)
class PostSearchForm(forms.Form):
search = forms.CharField()
|
MohammadGoodarzi/hamkav_core
|
HamkavBlog/forms.py
|
forms.py
|
py
| 1,271 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71992716667
|
# -*- coding: utf-8 -*-
# python3
from loguru import logger
import time
import socket
import re
import psycopg2
from psycopg2 import Error
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
import time
logger.add("/var/log/cdr_collector/cdr_collector_{time:DD-MM-YYYY}.log",
format="{time:DD-MM-YYYY at HH:mm:ss} {level} {message}", rotation="20 MB", compression="zip")
serv_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, proto=0)
serv_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serv_sock.bind(('', 9000))
serv_sock.listen(10)
def recieve_msg(client_sock):
while True:
len_msg = 102
len_msg_socket = len_msg * 9000
valid_msg = []
msg = client_sock.recv(len_msg_socket) #.decode("utf-8") # I got the error when I put server.recv
if len(msg) == len_msg:
logger.debug(f"Received {len(msg)} byte")
global start_time
start_time = time.time()
msg = msg.decode("utf-8")
valid_msg.append(msg)
msg = valid_msg
return msg
elif len(msg) > len_msg:
logger.debug(f"Received {len(msg)} byte")
i = r"b'\d{9}"
ls = [msg[i:i+len_msg] for i in range(0,len(msg),len_msg)]
for i in ls:
if len(i) == len_msg:
i = i.decode("utf-8")
valid_msg.append(i)
msg = valid_msg
return msg
break
elif len(msg) == 0:
logger.info(f"Received {len(msg)} byte client DISCONNECTED!!!")
timing = start_time - time.time()
logger.info(f"Время обработки = {timing}")
return False
break
else:
logger.info(f"ACM is CONNECTED, time {msg.decode('utf-8')}")
def start(serv_sock):
serv_sock.listen(1)
while True:
logger.info(f"waiting connection, socket is OPEN!!!")
client_sock, client_addr = serv_sock.accept()
logger.info(f"Connected to socket by, {client_addr}")
while True:
msg = recieve_msg(client_sock)
if msg == False:
client_sock.close()
break
else:
put_to_db(msg)
logger.info(f"Socket connection by {client_addr} is CLOSED!!!")
client_sock.close()
def put_to_db(msg):
try:
# Подключение к существующей базе данных
connection = psycopg2.connect(user="user",
# пароль, который указали при установке PostgreSQL
password="password",
host="127.0.0.1",
port="5432")
connection.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
# Курсор для выполнения операций с базой данных
cursor = connection.cursor()
#sql_insert_query = f"SELECT cdr_unformatted_func_py({tst})"
sql_insert_query = f"SELECT cdr_unformatted_func_py(VARIADIC ARRAY{msg!r})" # !r для того чтобы строка передавалась с ковычками.
cursor.execute(sql_insert_query)
connection.commit()
except (Exception, Error) as error:
logger.error(f"Error at work PostgreSQL, {error}")
finally:
if connection:
cursor.close()
connection.close()
logger.debug(f"Data length={len(msg)} has been successfully written to the database")
start(serv_sock)
|
jgnom/avaya_cdr_collector
|
main.py
|
main.py
|
py
| 3,660 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35384638116
|
from setuptools import setup
with open('README.md', 'r') as f:
readme = f.read()
if __name__ == '__main__':
setup(
name='youtube-dl-service',
version='0.0.2',
author='Dmitriy Pleshevskiy',
author_email='[email protected]',
description='Using youtube-dl as service in python code',
long_description=readme,
long_description_content_type='text/markdown',
package_data={'': ['LICENSE', 'README.md']},
include_package_data=True,
license='MIT',
packages=['youtube_dl_service'],
install_requires=[
'youtube-dl==2020.12.29',
]
)
|
pleshevskiy/youtube-dl-service
|
setup.py
|
setup.py
|
py
| 655 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32149738267
|
# Upload BOJ Gold-3 Topological Sorting & Graph 2252번 줄세우기
# https://velog.io/@kimdukbae/%EC%9C%84%EC%83%81-%EC%A0%95%EB%A0%AC-Topological-Sorting
# 참고 링크
from collections import deque
N, M = map(int,input().split())
graph = [ [] for _ in range(N+1)]
indegree = [0]*(N+1)
result = []
for i in range(M):
A, B = map(int,input().split())
graph[A].append(B)
indegree[B] += 1
def topology_sort():
q = deque()
for i in range(1,N+1):
if not indegree[i]:
q.append(i)
while q:
num = q.popleft()
result.append(num)
for value in graph[num]:
indegree[value] -= 1
if not indegree[value]:
q.append(value)
topology_sort()
print(*result)
|
HS980924/Algorithm
|
src/14.그래프/B#2252_줄세우기.py
|
B#2252_줄세우기.py
|
py
| 808 |
python
|
en
|
code
| 2 |
github-code
|
6
|
30338343377
|
key = [[0,0,0],[1,0,0],[0,1,1]]
lock = [[1,1,1],[1,1,0],[1,0,1]]
def rotate(arr):
for i in range(len(arr)):
tmp = arr[i][0]
arr[i][0] = arr[i][1]
arr[i][1] = len(arr)-1-tmp
key_one = []
lock_zero = []
for i, n in enumerate(key):
for j, num in enumerate(n):
if num == 1:
key_one.append([i,j])
for i, n in enumerate(lock):
for j, num in enumerate(n):
if num == 0:
lock_zero.append([i,j])
r_cnt = 0
while r_cnt < 4:
for i in range(len(key_one)):
p = key_one[i]
key_one[i][0],key_one[i][1] = p[1], len(key)-1-p[0]
#key_one[i] = (p[1], len(key)-1-p[0])
print(key_one[i])
r_cnt += 1
for i in range(-(len(lock)-1), len(key)+len(lock)):
for j in range(-(len(lock)-1), len(key)+len(lock)):
z_cnt = len(lock_zero)
for row, col in key_one:
row += i
col += j
if (0<=row<len(lock)) and (0<=col<len(lock)):
if [row,col] in lock_zero:
z_cnt -= 1
else:
break
if z_cnt == 0:
print(True)
exit(0)
print(False)
|
minju7346/CordingTest
|
programmers/60059.py
|
60059.py
|
py
| 1,226 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16319784906
|
"""ZIP - Compactando / Descompactando arquivos"""
from zipfile import ZipFile
import os
""" Este módulo fornece ferramentas para:
criar, ler, escrever, adicionar, e listar um arquivo ZIP
-https://docs.python.org/pt-br/3/library/zipfile.html
"""
way = (r'C:\Users\Igor\Desktop\Estudos\Programação-em-Python'
r'\Mundo-invertido\Udemy\4-Módulos-Python')
# Cria um arquivo e colocar arquivos num zip
with ZipFile('file.zip', 'w') as zip:
for file in os.listdir(way):
full_path = os.path.join(way, file)
zip.write(full_path, file)
with ZipFile('file.zip', 'r') as zip: # Exibi os arquivos dentro no zip
for file in zip.namelist():
print(file)
with ZipFile('file.zip', 'r') as zip: # Descompacta os arquivos do zip
zip.extractall(f'{way}/unzipped') # descompactado
|
igorfreits/Studies-Python
|
Udemy/4-Módulos-Python/aula 87 - ZIP.py
|
aula 87 - ZIP.py
|
py
| 814 |
python
|
pt
|
code
| 1 |
github-code
|
6
|
33526996747
|
'''
测试预训练VGG文件,与CNN_DPL为同一预训练模型
'''
import torch as t
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import data
from torchvision import transforms, datasets
import model
import warnings
warnings.filterwarnings('ignore')
batch_size = 128
test_dataset = datasets.ImageFolder(root='/data2/ci2p_user_data/kwei/Caltech101/test',
transform=data.transform)
test_loader = DataLoader(dataset=test_dataset,
shuffle=True,
batch_size=batch_size,
)
device = t.device("cuda:1" if t.cuda.is_available() else "cpu")
net = model.VGG_Test()
net = net.to(device)
def test():
net.eval()
correct = 0
total = 0
with t.no_grad():
for data in test_loader:
images, labels = data
images = images.to(device)
labels = labels.to(device)
outputs = net(images)
_, pred = t.max(outputs.data, dim=1)
total += labels.size(0)
correct += (pred == labels).sum().item()
print('正确率:{}%'.format(100 * correct / total))
if __name__ == '__main__':
test()
|
huifeidetiaotu96/CNN_DPL
|
test_VGG.py
|
test_VGG.py
|
py
| 1,266 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21359432660
|
"""Clean Code in Python - Chapter 5: Decorators
Composition over inheritance, tests for examples 1 & 2
"""
import unittest
from composition_1 import Customer as Customer1
from composition_2 import Customer as Customer2
class BaseTestMixin:
def test_resolver_finds_attributes(self):
with self.subTest(test_class=self._CLASS_TO_TEST):
customer = self._CLASS_TO_TEST(1, "foo", "address")
self.assertEqual(customer.resolve_customer_id, 1)
self.assertEqual(customer.resolve_name, "foo")
self.assertEqual(customer.resolve_address, "address")
self.assertEqual(customer.customer_id, 1)
def test_resolver_attribute_error(self):
with self.subTest(test_class=self._CLASS_TO_TEST):
customer = self._CLASS_TO_TEST(1, "foo", "address")
self.assertEqual(customer.name, "foo")
with self.assertRaises(AttributeError):
customer.resolve_foo
class TestInheritance(BaseTestMixin, unittest.TestCase):
_CLASS_TO_TEST = Customer1
class TestDecorator(BaseTestMixin, unittest.TestCase):
_CLASS_TO_TEST = Customer2
if __name__ == "__main__":
unittest.main()
|
rmariano/Clean-code-in-Python
|
book/src/ch05/tests/test_composition.py
|
test_composition.py
|
py
| 1,193 |
python
|
en
|
code
| 145 |
github-code
|
6
|
3337691584
|
'''
reres替换本地文件有问题 起一个端口用http试一试
'''
from flask import Flask
app = Flask(__name__)
@app.route("/udc.js")
def udc():
fill=open('/Users/xiaodu/Desktop/udc2.js','r',encoding='utf-8')
js_dt=fill.read()
return js_dt
if __name__ == '__main__':
app.run()
|
zml1996/learn_record
|
myflask/file2link.py
|
file2link.py
|
py
| 307 |
python
|
en
|
code
| 2 |
github-code
|
6
|
17510524283
|
'''
2. use BFS to find shortest length
a. establish neighbors: go through alphabet, change 1 letter, if in wordList,
add neighbor to dict for this word
b. BFS -- add that nb word to queue
c. stop BFS when endWord is found.
d. return length
3. use DFS to find routes with shortest length
optimization detail: - i --> nbs(i) ==> consider neighbors that are distance one further away from start
'''
class Solution:
def findLadders(self, beginWord: str, endWord: str, wordList: List[str]) -> List[List[str]]:
def bfs():
todo = set(wordList)
todo.add(beginWord)
allwords = set(wordList)
count = 0
level = deque([beginWord])
todo.remove(beginWord)
length = None
while level:
for _ in range(len(level)):
word = level.popleft()
distance[word] = count
if word == endWord:
length = count + 1
for letidx in range(len(word)):
for alphidx in range(ord('a'), ord('z') + 1):
ch = chr(alphidx)
otherword = word[:letidx] + ch + word[letidx + 1:]
if otherword in allwords - set([word]):
nblookup[word].add(otherword)
nblookup[otherword].add(word)
if otherword in todo:
level.append(otherword)
todo.remove(otherword)
count += 1
return length
def dfs(word, path):
nonlocal length
if length == 1:
if word == endWord:
result.append(path + [endWord])
return
todo.remove(word)
path.append(word)
length -= 1
for otherword in nblookup[word]:
if otherword in todo and distance[otherword] == distance[word] + 1:
dfs(otherword, path)
length += 1
path.pop()
todo.add(word)
from pprint import pprint
result = []
nblookup = defaultdict(set)
distance = {}
length = bfs()
if length is None:
return []
todo = set(wordList)
todo.add(beginWord)
dfs(beginWord, [])
return result
|
soji-omiwade/cs
|
dsa/before_rubrik/word_ladder_ii.py
|
word_ladder_ii.py
|
py
| 2,654 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42931952334
|
import re
class Solution:
def trailingZeros(self, num):
ans, i, div = 0, 5, num/5
while div > 0:
ans += div
i *= 5
div = num/i
return int(ans)
obj = Solution()
print(obj.trailingZeros(25))
|
shwetakumari14/Practice-Problems
|
Pythons Solutions/Trailing Zeros in Factorial.py
|
Trailing Zeros in Factorial.py
|
py
| 265 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21864334971
|
"""9-3: Users
1. Make a class called User. Create two attributes called first_name and last_name,
and then 2. create several other attributes that are typically stored in a user profile.
3. Make a method called describe_user() that prints a summary of the user’s information.
4. Make another method called greet_user() that prints a personalized greeting to the user.
5. Create several instances representing different users, and call both methods for each user."""
class User(): # step 1.
"""Represent a simple user profile."""
def __init__(self, first_name, last_name, username, email, location, age: int):
"""Initialize the user."""
self.first_name = first_name.title() # Step 2. (Line12 to Line 17)
self.last_name = last_name.title()
self.username = username
self.email = email
self.location = location.title()
self.age = age
def describe_user(self): # Step 3.
"""Display a summary of the user's information."""
print("Profile as follows:")
print(f"\n {self.first_name} {self.last_name}")
print(f" Username: {self.username}")
print(f" Email: {self.email}")
print(f" Location: {self.location}")
print(f" Age:{self.age}")
def greet_user(self): # Step 4.
"""Display a personalized greeting to the user."""
print("\nWelcome back, " + self.username + "!")
albert = User('albert', 'joes', 'a_joes', '[email protected]', 'brooklyn', 18)
albert.describe_user()
albert.greet_user()
bonnie = User('bonnie', 'greenman', 'bgreen', '[email protected]', '', 30)
bonnie.describe_user()
bonnie.greet_user()
|
kawing13328/Basics
|
My Homework/Ex_9-3.py
|
Ex_9-3.py
|
py
| 1,656 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39861129033
|
"""Imports xml statements from privatbank, optained via p24-cli.
See https://github.com/dimboknv/p24-cli
The xml format is identical to that of p24 merchant api.
"""
import datetime
from xml.etree import ElementTree as ET
import beangulp
import dateutil.parser
from beancount.core import data, flags
from beancount.core.number import D
from uabean.importers.mixins import IdentifyMixin
class Importer(IdentifyMixin, beangulp.Importer):
FLAG = flags.FLAG_OKAY
matchers = [
("content", "<statements status="),
("mime", "application/xml"),
]
unknown_account = "Assets:Unknown"
def __init__(
self,
card_to_account_map,
*args,
fee_account="Expenses:Fees:Privatbank",
**kwargs
):
self.card_to_account_map = card_to_account_map
self.fee_account = fee_account
super().__init__(*args, **kwargs)
def date_from_elem(self, elem):
return dateutil.parser.parse(" ".join(elem.attrib["trandate"].split(" ")[:2]))
def account(self, _):
return "privatbank"
def extract(self, filename, existing_entries=None):
entries = []
tree = ET.parse(filename)
root = tree.getroot()
assert root.tag == "statements"
max_date = None
max_elem = None
for elem in root:
meta = data.new_metadata(filename, 0)
entries.append(self.entry_from_elem(meta, elem))
date = self.date_from_elem(elem)
if max_date is None or date > max_date:
max_date = date
max_elem = elem
if max_elem is not None:
rest_num, rest_currency = max_elem.attrib["rest"].split(" ", 1)
amount = data.Amount(D(rest_num), rest_currency)
entries.append(
data.Balance(
data.new_metadata(filename, 0),
max_date.date() + datetime.timedelta(days=1),
self.card_to_account_map[max_elem.attrib["card"]],
amount,
None,
None,
)
)
return entries
def entry_from_elem(self, meta, elem):
dt = self.date_from_elem(elem)
meta["time"] = dt.strftime("%H:%M:%S")
account = self.card_to_account_map.get(
elem.attrib["card"], self.unknown_account
)
num, currency = elem.attrib["amount"].split(" ", 1)
num = D(num)
card_num, card_currency = elem.attrib["cardamount"].split(" ", 1)
card_num = D(card_num)
postings = [
data.Posting(
account, data.Amount(card_num, card_currency), None, None, None, None
)
]
if currency != card_currency:
meta["converted"] = elem.attrib["amount"]
elif abs(card_num) != num:
fee_amount = data.Amount(abs(card_num) - num, currency)
postings.append(
data.Posting(self.fee_account, fee_amount, None, None, None, None)
)
return data.Transaction(
meta,
dt.date(),
self.FLAG,
None,
elem.attrib["description"],
data.EMPTY_SET,
data.EMPTY_SET,
postings,
)
def get_test_importer():
return Importer(
{
"1234": "Assets:Privatbank:Universal",
"5678": "Assets:Privatbank:Social",
}
)
if __name__ == "__main__":
from beangulp.testing import main
main(get_test_importer())
|
OSadovy/uabean
|
src/uabean/importers/privatbank_xml.py
|
privatbank_xml.py
|
py
| 3,577 |
python
|
en
|
code
| 18 |
github-code
|
6
|
71567935547
|
class Library:
def __init__(self):
self.user_records = []
self.books_available = {}
self.rented_books = {}
def get_book(self, author: str, book_name: str, days_to_return: int, user):
if author in self.books_available and book_name in self.books_available[author]:
user.books.append(book_name)
self.books_available[author].remove(book_name)
if user.username not in self.rented_books:
self.rented_books[user.username] = {}
self.rented_books[user.username][book_name] = days_to_return
return f"{book_name} successfully rented for the next {days_to_return} days!"
for user_books in self.rented_books.values():
if book_name in user_books:
return f'The book "{book_name}" is already rented and will be available in ' \
f'{user_books[book_name]} days!'
def return_book(self, author: str, book_name: str, user):
if book_name not in user.books:
return f"{user.username} doesn't have this book in his/her records!"
user.books.remove(book_name)
self.books_available[author].append(book_name)
self.rented_books[user.username].pop(book_name)
|
lorindi/SoftUni-Software-Engineering
|
Python-OOP/Classes and Objects - Exercise/08_library/project/library.py
|
library.py
|
py
| 1,255 |
python
|
en
|
code
| 3 |
github-code
|
6
|
7035699251
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 21 13:24:47 2018
@author: kausic
"""
import os
import cv2
import numpy as np
from tqdm import tqdm
root="/home/kausic/Desktop/My_research/dataset/sunrgbd/SUNRGBD"
save_location="/home/kausic/ASU_MS/SML/project/sunrgbd_images/"
data_file=open(save_location+"data.txt",'w')
count=0
for dirn,subn,fileList in tqdm(os.walk(root,True),desc="Files recorded"):
if('annotation' in subn):
#print (subn)
(_,__,files)=os.walk(dirn +'/image/').__next__()
image_path=dirn+'/image/'+files[0]
(_,__,files)=os.walk(dirn +'/depth/').__next__()
depth_path=dirn+'/depth/'+files[0]
scene_file=open(dirn+'/scene.txt')
scene=scene_file.read()
scene_file.close()
rgb_img=cv2.imread(image_path)
depth_img=cv2.imread(depth_path)
if rgb_img is None or depth_img is None:
continue
final_string="img_{0:05d} ".format(count)
img_name="rgb_img_{0:05d}.jpg".format(count)
depth_name="depth_img_{0:05d}.jpg".format(count)
final_string+=scene
data_file.write(final_string+'\n')
cv2.imwrite(save_location+img_name,rgb_img)
cv2.imwrite(save_location+depth_name,depth_img)
count+=1
data_file.close()
|
Bharathgc/Evaluating-Fusion-points-for-multi-stream-networks-handling-cross-modal-data
|
data_extractor_script.py
|
data_extractor_script.py
|
py
| 1,306 |
python
|
en
|
code
| 11 |
github-code
|
6
|
24176685864
|
import paho.mqtt.client as mqtt
from Adafruit_IO import *
import time
broker_address = "192.168.82.100"
broker_port = 1883
topic_gas = "sensor/gas"
topic_flame = "sensor/flame"
topic_control = "control"
ADAFRUIT_IO_USERNAME = "chovy96de"
ADAFRUIT_IO_KEY = "aio_DbIt49hzcNtwelmritEnVKmugtTL"
aio = Client(ADAFRUIT_IO_USERNAME, ADAFRUIT_IO_KEY)
def on_message(client, userdata, msg):
if msg.topic == topic_gas:
gas_reading = int(msg.payload)
print("Gas Sensor Reading:", gas_reading)
# Check if gas reading exceeds 50
if gas_reading > 50:
aio.send_data('gas-reading', gas_reading)
# Send message to 2nd Arduino to turn on LED
client.publish(topic_control, "LED_ON")
else:
aio.send_data('gas-reading', gas_reading)
# Send message to 2nd Arduino to turn off LED
client.publish(topic_control, "LED_OFF")
elif msg.topic == topic_flame:
flame_reading = int(msg.payload)
print("Flame Sensor Reading:", flame_reading)
# Check if flame reading exceeds 50
if flame_reading > 50:
aio.send_data('flame-reading', flame_reading)
# Send message to 2nd Arduino to turn on buzzer
client.publish(topic_control, "BUZZER_ON")
else:
aio.send_data('flame-reading', flame_reading)
# Send message to 2nd Arduino to turn off buzzer
client.publish(topic_control, "BUZZER_OFF")
client = mqtt.Client()
client.on_message = on_message
client.connect(broker_address, broker_port)
client.subscribe(topic_gas)
client.subscribe(topic_flame)
client.loop_start()
# Function to send command to the 2nd Arduino
def send_command_to_arduino(command):
client.publish(topic_control, command)
# Example usage: Send command to turn on the LED
send_command_to_arduino("LED_ON")
# Example usage: Send command to turn on the buzzer
send_command_to_arduino("BUZZER_ON")
# Delay between each data transmission
transmission_interval = 5 # 5 seconds
transmission_count = 0
transmission_limit = 12 # 12 transmissions within 1 minute
while True:
client.loop()
# Send data to Adafruit IO
gas = 100 # Replace with your actual gas sensor reading
flame = 200 # Replace with your actual flame sensor reading
aio.send_data('gas-reading', gas)
aio.send_data('flame-reading', flame)
transmission_count += 1
if transmission_count >= transmission_limit:
# Pause for 1 minute after reaching the transmission limit
time.sleep(60)
transmission_count = 0
else:
time.sleep(transmission_interval)
|
Shihab-007/Advanced-Embedded-System-Hardware-Enginnering
|
Advanced Embedded System/Codes/adafruit.py
|
adafruit.py
|
py
| 2,679 |
python
|
en
|
code
| 1 |
github-code
|
6
|
31454964996
|
from typing import Any
from django.db.models import (
Model,
CharField,
ForeignKey,
BooleanField,
ManyToManyField,
UniqueConstraint,
DateTimeField,
CASCADE,
)
from abstracts.models import AbstractDateTime
from subjectss.models import Topic
from subjectss.models import Student
class QuizType(AbstractDateTime):
SUBJECT_QUIZ_TYPE = 1
TOPIC_QUIZ_TYPE = 2
CLASS_QUIZ_TYPE = 3
QUIZ_NAME_LIMIT = 100
name: CharField = CharField(
max_length=QUIZ_NAME_LIMIT,
unique=True,
db_index=True,
verbose_name="Наименование типа теста"
)
class Meta:
verbose_name: str = "Тип теста"
verbose_name_plural: str = "Типы тестов"
ordering: tuple[str] = ("-datetime_updated",)
def __str__(self) -> str:
return self.name
class Question(AbstractDateTime):
TEST_NAME_LIMIT = 240
name: CharField = CharField(
max_length=TEST_NAME_LIMIT,
unique=True,
db_index=True,
verbose_name="Наименование"
)
attached_subject_class: Topic = ForeignKey(
to=Topic,
on_delete=CASCADE,
related_name="questions",
verbose_name="Вопрос к теме"
)
class Meta:
verbose_name_plural: str = "Вопросы"
verbose_name: str = "Вопрос"
ordering: tuple[str] = ("-datetime_updated",)
def __str__(self) -> str:
return self.name
class Answer(AbstractDateTime):
ANSWER_NAME_LIMIT = 250
name: CharField = CharField(
max_length=ANSWER_NAME_LIMIT,
verbose_name="Ответ"
)
question: Question = ForeignKey(
to=Question,
on_delete=CASCADE,
related_name="answers",
verbose_name="Ответ к вопросу"
)
is_correct: BooleanField = BooleanField(
default=False,
verbose_name="Правильный ответ?"
)
class Meta:
verbose_name_plural: str = "Ответы"
verbose_name: str = "Ответ"
ordering: tuple[str] = ("-datetime_updated",)
constraints: tuple[Any] = (
UniqueConstraint(
fields=['name', 'question'],
name="unique_asnwer_name_question"
),
)
def __str__(self) -> str:
return self.name
class Quiz(Model):
QUIZ_MAX_NAME = 250
name: CharField = CharField(
max_length=QUIZ_MAX_NAME,
verbose_name="Название теста"
)
student: Student = ForeignKey(
to=Student,
on_delete=CASCADE,
related_name="subject_quizes",
verbose_name="Зарегестрированный стедент"
)
quiz_type: QuizType = ForeignKey(
to=QuizType,
on_delete=CASCADE,
related_name="quizes",
verbose_name="Тип куиза"
)
questions: ManyToManyField = ManyToManyField(
to=Question,
through="QuizQuestionAnswer",
through_fields=["quiz", "question"],
verbose_name="Вопросы на теста"
)
datetime_created: DateTimeField = DateTimeField(
verbose_name="время и дата создания",
auto_now_add=True
)
attached_questions: ManyToManyField = ManyToManyField(
to=Question,
blank=True,
related_name="quizess",
verbose_name="Прикрепленные вопросы теста (для чтения)"
)
class Meta:
verbose_name: str = "Тест"
verbose_name_plural: str = "Тесты"
def __str__(self) -> str:
return f"Студент: '{self.student}' Тип теста: '{self.quiz_type}'"
class QuizQuestionAnswer(Model):
quiz: Quiz = ForeignKey(
to=Quiz,
on_delete=CASCADE,
related_name="quiz_questions",
verbose_name="Тест"
)
question: Question = ForeignKey(
to=Question,
on_delete=CASCADE,
related_name="quiz_questions",
verbose_name="Вопрос"
)
user_answer: Answer = ForeignKey(
to=Answer,
on_delete=CASCADE,
related_name="user_answer",
verbose_name="Ответ пользователя"
)
class Meta:
verbose_name: str = "Ответ на вопрос теста"
verbose_name_plural: str = "Ответы на вопросы тестов"
constraints: tuple[Any] = (
UniqueConstraint(
fields=['quiz', 'question'],
name="unique_quiz_question"
),
)
def __str__(self) -> str:
return f"{self.quiz} {self.question} {self.user_answer}"
|
NNaraB/education_site_back
|
apps/tests/models.py
|
models.py
|
py
| 4,723 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9699684266
|
import tkinter
from initController import *
from tkinter import Menu
from tkinter.ttk import *
from bridge import MouseMotionToController
from controller.canvasController import CanvasController
from controller.modeController import CompositionObjectController
top = tkinter.Tk()
buttonsContainer = tkinter.Frame(top)
buttonsContainer.pack(side = tkinter.LEFT)
def create_window():
window = tkinter.Toplevel(top)
window.geometry('{}x{}'.format(200 , 100))
def close():
window.destroy()
def getText(event):
CanvasController.canvasContainer.itemconfig(MouseMotionToController.singleClickedObj[-2].text, text = e.get())
button1 = tkinter.Button(window, text="Close", command=close, width = 50)
button2 = tkinter.Button(window, text="OK", command=close, width = 50)
button1.pack(side = tkinter.BOTTOM)
button2.pack(side = tkinter.BOTTOM)
e = tkinter.Entry(window)
e.bind("<Return>", getText)
e.pack()
def createNewGroup():
CompositionObjectController.createNewGroup()
def killAGroup():
CompositionObjectController.killAGroup()
menubar = Menu(top)
editMenu = Menu(menubar, tearoff=0)
editMenu.add_command(label="Group", command=createNewGroup)
editMenu.add_command(label="UnGroup", command=killAGroup)
editMenu.add_command(label="Change Object Name", command=create_window)
menubar.add_cascade(label="Edit Menu", menu=editMenu)
initAllButtons(buttonsContainer)
initCanvasContainer(top)
initModeControllers()
top.config(menu = menubar)
top.mainloop()
|
j611062000/umlEditorByPython
|
main.py
|
main.py
|
py
| 1,545 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9000442932
|
# 测试
# 开发时间:2022/8/5 9:26
from thop import profile
# import torchvision.models as models
# import torch
from ptflops import get_model_complexity_info
from models.basicblock import DRB, PALayer, CALayer, CCALayer, SRB
from models.SwinT import SwinT
# from .FCVit import fcvit_block
# from .FCA import MultiSpectralAttentionLayer
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.transforms import functional as trans_fn
from torchvision.transforms import InterpolationMode
from models.fusion import iAFF ,AFF, MS_CAM
# 双三次上采样
# img = trans_fn.resize(img, size, InterpolationMode.BICUBIC)
def channel_shuffle(x, groups=4):
batchsize, num_channels, height, width = x.data.size()
channels_per_group = num_channels // groups# reshape
x = x.view(batchsize, groups,channels_per_group, height, width)
x = torch.transpose(x, 1, 2).contiguous()
# flatten
x = x.view(batchsize, -1, height, width)
return x
def conv_layer(in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1, bias=True):
padding = int((kernel_size - 1) / 2) * dilation
return nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=padding, bias=bias, dilation=dilation,
groups=groups)
class Upsample(nn.Sequential):
"""Upsample module.
Args:
scale (int): Scale factor. Supported scales: 2^n and 3.
num_feat (int): Channel number of intermediate features.
"""
def __init__(self, scale, num_feat):
m = []
if (scale & (scale - 1)) == 0: # scale = 2^n
for _ in range(int(math.log(scale, 2))):
m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1))
m.append(nn.PixelShuffle(2))
elif scale == 3:
m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1))
m.append(nn.PixelShuffle(3))
else:
raise ValueError(f'scale {scale} is not supported. ' 'Supported scales: 2^n and 3.')
super(Upsample, self).__init__(*m)
class UpsampleOneStep(nn.Sequential):
"""UpsampleOneStep module (the difference with Upsample is that it always only has 1conv + 1pixelshuffle)
Used in lightweight SR to save parameters.
Args:
scale (int): Scale factor. Supported scales: 2^n and 3.
num_feat (int): Channel number of intermediate features.
"""
def __init__(self, scale, num_feat, num_out_ch, input_resolution=None):
self.num_feat = num_feat
self.input_resolution = input_resolution
m = []
m.append(nn.Conv2d(num_feat, (scale ** 2) * num_out_ch, 3, 1, 1))
m.append(nn.PixelShuffle(scale))
super(UpsampleOneStep, self).__init__(*m)
def flops(self):
H, W = self.input_resolution
flops = H * W * self.num_feat * 3 * 9
return flops
class myModel(nn.Module):
def __init__(self, img_size=64, num_heads=8, upscale=4, window_size=8, num_in_ch=3, nf=64, embed_dim=64,
depth=4, upsampler='pixelshuffledirect', img_range=1.):
super(myModel, self).__init__()
num_feat = 64
num_out_ch = 3
self.upsampler = upsampler
self.window_size = window_size
self.img_range = img_range
self.upscale = upscale
if num_in_ch == 3:
rgb_mean = (0.4488, 0.4371, 0.4040)
self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1)
else:
self.mean = torch.zeros(1, 1, 1, 1)
#####################################################################################################
################################### 1, shallow feature extraction ###################################
self.conv_first = nn.Conv2d(num_in_ch, embed_dim, 3, 1, 1)
#####################################################################################################
################################### 2, deep feature extraction ######################################
self.num_layers = depth
self.layers = nn.ModuleList() # 存放HRBCT模块
for i_layer in range(self.num_layers):
layer = HRBCT(embed_dim, nf, num_heads)
self.layers.append(layer)
#####################################################################################################
################################### 2.2, 深度特征融合模块 ######################################
self.conv1 = nn.Conv2d(depth*embed_dim, embed_dim, kernel_size=1) # depth*embed_dim
self.conv3 = nn.Conv2d(embed_dim, embed_dim, 3, 1, 1, bias=True)
self.PA = PALayer(embed_dim) #
#####################################################################################################
################################ 3, high quality image reconstruction ################################
if self.upsampler == 'pixelshuffle':
# for classical SR
self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
nn.LeakyReLU(inplace=True))
self.upsample = Upsample(upscale, num_feat)
self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
elif self.upsampler == 'pixelshuffledirect':
# for lightweight SR (to save parameters)
self.upsample = UpsampleOneStep(upscale, embed_dim, num_out_ch,
(img_size, img_size))
def check_image_size(self, x):
_, _, h, w = x.size()
mod_pad_h = (self.window_size - h % self.window_size) % self.window_size
mod_pad_w = (self.window_size - w % self.window_size) % self.window_size
x = F.pad(x, (0, mod_pad_w, 0, mod_pad_h), 'reflect')
return x
def forward_shallow_features(self, x):
x1 = self.RRDB(x)
x1 = self.firstUp(x1)
x1 = self.conv_end1(x1)
return x1
def forward_features(self, x):
retainV = []
for layer in self.layers:
x = layer(x)
retainV.append(x)
x1 = torch.cat((retainV[0], retainV[1], retainV[2], retainV[3]), 1).contiguous()
return x1
def DFF(self, x):
x1 = self.conv1(x)
x1 = self.conv3(x1)
x1 = self.PA(x1)
return x1
def forward(self, x):
H, W = x.shape[2:]
self.mean = self.mean.type_as(x)
x = (x - self.mean) * self.img_range
if self.upsampler == 'pixelshuffle':
x = self.conv_first(x)
x = self.conv_before_upsample(x)
x = self.conv_last(self.upsample(x))
elif self.upsampler == 'pixelshuffledirect':
x = self.conv_first(x) # 经过浅层特征提取
x = self.DFF(self.forward_features(x)) + x # 经过深层特征提取和特征融合
x = self.upsample(x) # 图像上采样重建
x = x / self.img_range + self.mean
return x[:, :, :H * self.upscale, :W * self.upscale]
class HRBCT(nn.Module):
def __init__(self, embed_dim=64, nf=64, num_heads=8,distillation_rate=0.50):
super(HRBCT, self).__init__()
# 知识蒸馏
self.distilled_channels = int(embed_dim * distillation_rate)
self.remaining_channels = int(embed_dim - self.distilled_channels)
self.distillation_rate = distillation_rate
self.Conv3_D1 = nn.Conv2d(self.distilled_channels, self.distilled_channels, 3, 1, 1)
self.Conv3_D2 = nn.Conv2d(int(self.remaining_channels * self.distillation_rate), int(self.remaining_channels * self.distillation_rate), 3, 1, 1)
self.ST = SwinT(embed_dim=self.remaining_channels, heads=num_heads)
self.SRB = SRB(int(nf*(1-distillation_rate)**2))
# self.BSRB = BSConvURB( int(nf*(1-distillation_rate)**2), int(nf*(1-distillation_rate)**2), kernel_size=3)
# DRB
# self.DRB = DRB(int(nf*(1-distillation_rate)**2))
# ESA
# self.ESA = ESA(n_feats=nf, conv=nn.Conv2d) # 输出通道 输入通道
self.CCA = CCALayer(nf)
def forward(self, x):
distilled_c1, remaining_c1 = torch.split(x, (self.distilled_channels, self.remaining_channels), dim=1)
distilled_c1 = self.Conv3_D1(distilled_c1)
out1 = self.ST(remaining_c1)
distilled_c2, remaining_c2 = torch.split(out1, (int(self.remaining_channels*self.distillation_rate), int(self.remaining_channels*(1-self.distillation_rate))), dim=1)
distilled_c2 = self.conv1_D2(distilled_c2)
# distilled_c2 = self.Conv3_D2(distilled_c2)
#
out2 = self.SRB(remaining_c2)
out = torch.cat([distilled_c1, distilled_c2, out2], dim=1)
x1 = self.CCA(out) #
x_4 = x + x1
return x_4
if __name__ == '__main__':
x = torch.randn((1, 3, 64, 64))
model = myModel()
y = model(x)
print(y.shape)
device = torch.device('cuda:0')
input = x.to(device)
model.eval()
model = model.to(device)
macs, params = get_model_complexity_info(model, (3, 64, 64), as_strings=True,
print_per_layer_stat=True, verbose=True)
print('{:<30} {:<8}'.format('Computational complexity: ', macs))
print('{:<30} {:<8}'.format('Number of parameters: ', params))
|
sad192/LISN-Infrared-Image-SR
|
models/network_hybrid.py
|
network_hybrid.py
|
py
| 9,368 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34596295886
|
import argparse
import itertools
import re
import sys
from typing import Optional
from graphviz import Digraph
import numpy as np
import wandb
from app.type import APPSProblem, Node
from app.const import (
CONCURRENCY_LIMIT,
DEFAULT_WANDB_PROJECT_NAME,
K,
NO_CUDA,
NUM_ROLLOUTS,
TERMINAL_TOKEN,
PROBLEM_INDEX,
)
from app.config import experiments
sys.path.append("Code-AI-Tree-Search/eval")
from compute_reward import compute_reward as _compute_reward # type: ignore
def extract_code(text: str, terminal_token: str = TERMINAL_TOKEN) -> str:
pattern = rf"ANSWER:\n(.*?){re.escape(terminal_token)}"
match = re.search(pattern, text, re.DOTALL)
return match.group(1).strip() if match else ""
def compute_reward(code: str, problem: APPSProblem, mode: str = "train") -> int:
return _compute_reward(
problem.dir, code + "\n", mode=mode, public_test_cases="half"
)
def log_info(
num_actions: int,
node: Node,
token: Optional[str],
elapsed: Optional[float],
):
print(
f"Step: {('Prediction' if elapsed is not None else 'Selection'):<10} |",
f"Action #: {num_actions:<2} |",
f"Action: {node.display_action if node else 'N/A':<6} |",
f"Token: {repr(token) if token is not None else 'N/A':<8} |",
f"Elapsed: {(str(np.round(elapsed, 3)) + 's' if elapsed is not None else 'N/A'):<7} |",
)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--remote", action="store_true", default=False)
parser.add_argument(
"--debug", action="store_true", help="Debug mode", default=False
)
parser.add_argument("--dry", action="store_true", default=False)
parser.add_argument("--no_cuda", action="store_true", default=NO_CUDA)
parser.add_argument("--K", type=int, help="Number of expanded children", default=K)
parser.add_argument("--num_rollouts", type=int, default=NUM_ROLLOUTS)
parser.add_argument("--concurrency_limit", type=int, default=CONCURRENCY_LIMIT)
parser.add_argument("--experiment_name", type=str, default=None)
parser.add_argument(
"--problem_index",
type=str,
default=PROBLEM_INDEX,
choices=APPSProblem.problem_indices,
)
args, _ = parser.parse_known_args()
return args
def traverse_and_visualize(node, graph, tokenizer, node_id=0):
if node is None:
return node_id
# Create a label for the node with its statistics
action = tokenizer.decode([node.action]) if node.action != "root" else "root"
label = f"Action ID: {node.action}\n Action: {action}\nVisits: {node.visits}\nSelected: {node.selected}"
if node.action != "root":
label += f"\nProb: {node.prob:.2f}\nValue: {node.value:.2f}"
graph.node(str(node_id), label)
current_id = node_id
children = [c for c in node.children if c.selected > 0 or c.visits > 0]
for child in children:
next_id = node_id + 1
graph.edge(str(current_id), str(next_id))
node_id = traverse_and_visualize(child, graph, tokenizer, next_id)
return node_id
def visualize_tree(root, tokenizer):
graph = Digraph(comment="Tree Visualization")
traverse_and_visualize(root, graph, tokenizer)
graph.render("tree", format="png")
def get_wandb_runs(experiment_name, project_name=DEFAULT_WANDB_PROJECT_NAME):
return wandb.Api().runs(project_name, filters={"group": experiment_name})
def compose_configs(
problem_indices, experiment_name, dry, project_name=DEFAULT_WANDB_PROJECT_NAME
):
runs = get_wandb_runs(experiment_name, project_name)
already_run = [run.config for run in runs]
exp = experiments[experiment_name]
configs = []
for idx in problem_indices:
for cfg in list(itertools.product(*exp.values())):
cfg = {
**dict(zip(exp.keys(), cfg)),
"problem_index": idx,
}
if cfg not in already_run or dry:
configs.append(cfg)
return configs
|
cavaunpeu/mcts-llm-codegen
|
app/util.py
|
util.py
|
py
| 4,017 |
python
|
en
|
code
| 1 |
github-code
|
6
|
26804269841
|
import sys
from bisect import bisect_left
input = sys.stdin.readline
num_gates = int(input())
num_planes = int(input())
undocked = [gate + 1 for gate in range(num_gates)]
def binary_search(max_dock):
idx = bisect_left(undocked, max_dock)
if idx == len(undocked) or undocked[idx] > max_dock and idx > 0:
return idx - 1
if undocked[idx] == max_dock:
return idx
raise ValueError
for plane in range(num_planes):
max_dock = int(input())
try: undocked.pop(binary_search(max_dock))
except: break
print(num_gates - len(undocked))
|
Stevan-Zhuang/DMOJ
|
CCC/CCC '15 S3 - Gates.py
|
CCC '15 S3 - Gates.py
|
py
| 574 |
python
|
en
|
code
| 1 |
github-code
|
6
|
5430829069
|
#CODING BY ARYAN KHAN
import os
import random
try:
color_table = "#00FF00"
except FileNotFoundError:
color_table = "#00FF00"
#--(Dark@Colours)---#
r="\033[1;91m"
g="\033[1;92m"
y="\033[1;93m"
b="\033[1;94m"
p="\033[1;95m"
c="\033[1;96m"
l="\033[1;97m"
s="\033[0m"
#--(light@Colours)---#
lr="\033[0;91m"
lg="\033[0;92m"
ly="\033[0;93m"
lb="\033[0;94m"
lp="\033[0;95m"
lc="\033[0;96m"
ll="\033[38;5;208m"
#--(rare-colors)--#
holaa="38;5"
ro=(f"\033[{holaa};208")
rb=(f"\033[{holaa};32")
rc=(f"\033[{holaa};122m")
rg= (f"\033[{holaa};112m")
rp=(f"\033[{holaa};147m")
os.system ("clear")
logo = ("""
\033[1;33m 888 d8P 8888888b. .d8888b.
\033[1;33m 888 d8P 888 Y88b d88P Y88b
\033[1;32m 888 d8P 888 888 Y88b.
\033[1;33m 888d88K 888 d88P "Y888b.
\033[1;33m 8888888b 8888888P" "Y88b.
\033[1;32m 888 Y88b 888 T88b "888
\033[1;33m 888 Y88b 888 T88b Y88b d88P
\033[1;33m 888 Y88b 888 T88b "Y8888P"
\033[1;32m==========================================
\033[1;32m \033[1;33mCREATED BY : \033[1;33mKASHIF \033[1;32m&& \033[1;33mARYAN
\033[1;32m \033[1;32mFACEBOK : \033[1;34m ArYan KhAn
\033[1;32m \033[1;35mGITHUB : \033[1;35mTEAM-KRS
\033[1;32m \033[1;36mTOOL STATUS : \033[1;36mTOOL IS FREE
\033[1;32m \033[1;35mTEAM : \033[1;35mKRS
\033[1;32m \033[1;36mTOOL VIRSION : \033[1;36mUAGENTS
\033[1;32m==========================================\n""")
def linex():
print('\033[1;32m==========================================')
def lines():
print('\033[1;32m==========================================')
os.system("clear")
print(logo)
def generate_samsung_user_agent():
samsung_models = [
"Galaxy S21",
"Galaxy S20",
"Galaxy Note 20",
"Galaxy Note 10",
"Galaxy A71",
"Galaxy A51",
"Galaxy Tab S7",
"Galaxy Tab S6",
"Galaxy Watch 4",
"Galaxy Watch Active 2",
]
android_versions = [
"10.0; Android",
"11.0; Android",
"12.0; Android",
]
samsung_browsers = [
"Chrome/90.0.4430.93",
"Chrome/91.0.4472.164",
"Firefox/89.0",
"Firefox/90.0",
"SamsungBrowser/14.0",
"SamsungBrowser/15.0",
]
model = random.choice(samsung_models)
android_version = random.choice(android_versions)
browser = random.choice(samsung_browsers)
user_agent = f"Mozilla/5.0 ({model}; {android_version}; SM-G977B) AppleWebKit/537.36 (KHTML, like Gecko) {browser} Safari/537.36"
return user_agent
def generate_user_agents(num_agents):
user_agents = []
for _ in range(num_agents):
user_agent = generate_samsung_user_agent()
user_agents.append(user_agent)
return user_agents
# Option 1: Generate a specific number of user agents
num_agents = int(input("Enter LIMIT: "))
user_agents = generate_user_agents(num_agents)
print("Generated User Agents:")
for ua in user_agents:
print(ua)
# Option 2: Generate unlimited user agents
unlimited_option = input("Do you want to create unlimited user agents? (yes/no): ")
if unlimited_option.lower() == "yes":
while True:
user_agent = generate_samsung_user_agent()
print(user_agent)
next_option = input("Generate another user agent? (yes/no): ")
if next_option.lower() != "yes":
break
else:
print("Script execution completed.")
user_agent = f"{logo}\nMozilla/5.0 ({model};"
|
TEAM-KRS/Useragents
|
ua.py
|
ua.py
|
py
| 3,522 |
python
|
en
|
code
| 1 |
github-code
|
6
|
41039653002
|
from __future__ import annotations
import json
import atexit
import datetime
import functools
import logging
import multiprocessing
import os
import shutil
import signal
import socket
import subprocess
import tempfile
import threading
import time
import webbrowser
from collections import namedtuple
from contextlib import closing
from enum import Enum, auto
from io import BufferedWriter
from logging.handlers import QueueHandler
from multiprocessing import Queue
from os import path
from typing import Dict, List, Optional, Set
import docker
import grpc
import requests
import toml
from urllib3.exceptions import MaxRetryError
import vega_sim.api.governance as gov
import vega_sim.grpc.client as vac
from vega_sim import vega_bin_path, vega_home_path
from vega_sim.service import VegaService
from vega_sim.tools.load_binaries import download_binaries
from vega_sim.tools.retry import retry
from vega_sim.wallet.base import DEFAULT_WALLET_NAME, Wallet
from vega_sim.wallet.slim_wallet import SlimWallet
from vega_sim.wallet.vega_wallet import VegaWallet
logger = logging.getLogger(__name__)
PortUpdateConfig = namedtuple(
"PortUpdateConfig", ["file_path", "config_path", "key", "val_func"]
)
PORT_DIR_NAME = "market_sim_ports"
class Ports(Enum):
DATA_NODE_GRPC = auto()
DATA_NODE_REST = auto()
DATA_NODE_POSTGRES = auto()
FAUCET = auto()
WALLET = auto()
VEGA_NODE = auto()
CORE_GRPC = auto()
CORE_REST = auto()
BROKER = auto()
METRICS = auto()
DATA_NODE_METRICS = auto()
PPROF = auto()
CONSOLE = auto()
PORT_UPDATERS = {
Ports.DATA_NODE_GRPC: [
PortUpdateConfig(
("config", "data-node", "config.toml"),
["API"],
"Port",
lambda port: port,
),
PortUpdateConfig(
("config", "data-node", "config.toml"),
["Gateway", "Node"],
"Port",
lambda port: port,
),
PortUpdateConfig(
("config", "wallet-service", "networks", "local.toml"),
["API", "GRPC"],
"Hosts",
lambda port: [f"localhost:{port}"],
),
],
Ports.DATA_NODE_REST: [
PortUpdateConfig(
("config", "data-node", "config.toml"),
["Gateway"],
"Port",
lambda port: port,
),
PortUpdateConfig(
("config", "wallet-service", "networks", "local.toml"),
["API", "REST"],
"Hosts",
lambda port: [f"localhost:{port}"],
),
PortUpdateConfig(
("config", "wallet-service", "networks", "local.toml"),
["API", "GraphQL"],
"Hosts",
lambda port: [f"localhost:{port}"],
),
],
Ports.DATA_NODE_POSTGRES: [
PortUpdateConfig(
("config", "data-node", "config.toml"),
["SQLStore", "ConnectionConfig"],
"Port",
lambda port: port,
),
],
Ports.FAUCET: [
PortUpdateConfig(
("config", "faucet", "config.toml"), [], "Port", lambda port: port
),
],
Ports.WALLET: [
PortUpdateConfig(
("config", "wallet-service", "config.toml"),
["Server"],
"Port",
lambda port: port,
),
PortUpdateConfig(
("config", "wallet-service", "networks", "local.toml"),
[],
"Port",
lambda port: port,
),
],
Ports.VEGA_NODE: [
PortUpdateConfig(
("config", "node", "config.toml"),
["Blockchain", "Null"],
"Port",
lambda port: port,
),
],
Ports.CORE_GRPC: [
PortUpdateConfig(
("config", "faucet", "config.toml"),
["Node"],
"Port",
lambda port: port,
),
PortUpdateConfig(
("config", "node", "config.toml"),
["API"],
"Port",
lambda port: port,
),
PortUpdateConfig(
("config", "data-node", "config.toml"),
["API"],
"CoreNodeGRPCPort",
lambda port: port,
),
],
Ports.CORE_REST: [
PortUpdateConfig(
("config", "node", "config.toml"),
["API", "REST"],
"Port",
lambda port: port,
),
],
Ports.BROKER: [
PortUpdateConfig(
("config", "data-node", "config.toml"),
["Broker", "SocketConfig"],
"Port",
lambda port: port,
),
PortUpdateConfig(
("config", "node", "config.toml"),
["Broker", "Socket"],
"Port",
lambda port: port,
),
],
Ports.DATA_NODE_METRICS: [
PortUpdateConfig(
("config", "data-node", "config.toml"),
["Metrics"],
"Port",
lambda port: port,
),
],
Ports.METRICS: [
PortUpdateConfig(
("config", "node", "config.toml"),
["Metrics"],
"Port",
lambda port: port,
),
],
Ports.PPROF: [
PortUpdateConfig(
("config", "data-node", "config.toml"),
["Pprof"],
"Port",
lambda port: port,
),
PortUpdateConfig(
("config", "node", "config.toml"),
["Pprof"],
"Port",
lambda port: port,
),
],
Ports.CONSOLE: [
PortUpdateConfig(
("config", "wallet-service", "networks", "local.toml"),
["Console"],
"LocalPort",
lambda port: port,
),
],
}
class VegaStartupTimeoutError(Exception):
pass
class ServiceNotStartedError(Exception):
pass
class SocketNotFoundError(Exception):
pass
def logger_thread(q):
while True:
record = q.get()
if record is None:
break
logger = logging.getLogger(record.name)
logger.handle(record)
def find_free_port(existing_set: Optional[Set[int]] = None):
ret_sock = 0
existing_set = (
existing_set.union(set([ret_sock]))
if existing_set is not None
else set([ret_sock])
)
# Synchronisation to try to avoid using the same ports across processes
# launching at very similar times
dated_path_dir = path.join(
tempfile.gettempdir(),
PORT_DIR_NAME,
datetime.date.today().strftime("%Y-%d-%m-%H-%M"),
)
os.makedirs(dated_path_dir, exist_ok=True)
existing_set.update(set(int(x) for x in os.listdir(dated_path_dir)))
num_tries = 0
while ret_sock in existing_set:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(("", 0))
ret_sock = s.getsockname()[1]
num_tries += 1
if num_tries >= 100:
# Arbitrary high number. If we try 100 times and fail to find
# a port it seems reasonable to give up
raise SocketNotFoundError("Failed finding a free socket")
open(path.join(dated_path_dir, str(ret_sock)), "x")
return ret_sock
def _terminate_proc(
proc: subprocess.Popen[bytes], out_file: BufferedWriter, err_file: BufferedWriter
) -> None:
proc.terminate()
out_file.close()
err_file.close()
def _popen_process(
popen_args: List[str],
dir_root: str,
log_name: str,
env: Optional[Dict[str, str]] = None,
) -> subprocess.Popen[bytes]:
out = open(path.join(dir_root, f"{log_name}.out"), "wb")
err = open(path.join(dir_root, f"{log_name}.err"), "wb")
sub_proc = subprocess.Popen(
popen_args, stdout=out, stderr=err, env=env, close_fds=True
)
atexit.register(functools.partial(_terminate_proc, sub_proc, out, err))
return sub_proc
def _update_node_config(
vega_home: str,
port_config: Dict[Ports, int],
transactions_per_block: int = 1,
block_duration: str = "1s",
use_docker_postgres: bool = False,
) -> None:
config_path = path.join(vega_home, "config", "node", "config.toml")
config_toml = toml.load(config_path)
config_toml["Blockchain"]["Null"]["GenesisFile"] = path.join(
vega_home, "genesis.json"
)
config_toml["Blockchain"]["Null"]["BlockDuration"] = block_duration
config_toml["Blockchain"]["Null"]["TransactionsPerBlock"] = transactions_per_block
existing_ports = set(port_config.values())
for port in Ports:
if port in port_config:
continue
new_port = find_free_port(existing_ports)
existing_ports.add(new_port)
port_config[port] = new_port
with open(config_path, "w") as f:
toml.dump(config_toml, f)
for port_key, update_configs in PORT_UPDATERS.items():
for config in update_configs:
file_path = path.join(vega_home, *config.file_path)
config_toml = toml.load(file_path)
elem = config_toml
for k in config.config_path:
elem = elem[k]
elem[config.key] = config.val_func(port_config[port_key])
if port_key == Ports.DATA_NODE_POSTGRES:
config_toml["SQLStore"]["UseEmbedded"] = not use_docker_postgres
with open(file_path, "w") as f:
toml.dump(config_toml, f)
def manage_vega_processes(
child_conn: multiprocessing.Pipe,
log_queue,
vega_path: str,
data_node_path: str,
vega_wallet_path: str,
vega_console_path: Optional[str] = None,
run_with_console: bool = False,
port_config: Optional[Dict[Ports, int]] = None,
transactions_per_block: int = 1,
block_duration: str = "1s",
run_wallet: bool = False,
retain_log_files: bool = False,
log_dir: Optional[str] = None,
replay_from_path: Optional[str] = None,
store_transactions: bool = True,
log_level: Optional[int] = None,
genesis_time: Optional[datetime.datetime] = None,
) -> None:
logger.addHandler(QueueHandler(log_queue))
logger.setLevel(log_level if log_level is not None else logging.INFO)
port_config = port_config if port_config is not None else {}
try:
docker_client = docker.from_env()
use_docker_postgres = True
except:
use_docker_postgres = False
# Explicitly not using context here so that crashed logs are retained
tmp_vega_dir = tempfile.mkdtemp(prefix="vega-sim-") if log_dir is None else log_dir
logger.info(f"Running NullChain from vegahome of {tmp_vega_dir}")
if port_config.get(Ports.CONSOLE):
logger.info(f"Launching Console at port {port_config.get(Ports.CONSOLE)}")
if port_config.get(Ports.DATA_NODE_REST):
logger.info(
"Launching Datanode REST + GRAPHQL at port"
f" {port_config.get(Ports.DATA_NODE_REST)}"
)
if port_config.get(Ports.DATA_NODE_GRPC):
logger.info(
f"Launching Datanode GRPC at port {port_config.get(Ports.DATA_NODE_GRPC)}"
)
if port_config.get(Ports.CORE_REST):
logger.info(f"Launching Core REST at port {port_config.get(Ports.CORE_REST)}")
if port_config.get(Ports.CORE_GRPC):
logger.info(f"Launching Core GRPC at port {port_config.get(Ports.CORE_GRPC)}")
dest_dir = f"{tmp_vega_dir}/vegahome"
shutil.copytree(vega_home_path, dest_dir)
for dirpath, _, filenames in os.walk(dest_dir):
os.utime(dirpath, None)
for file in filenames:
os.utime(os.path.join(dirpath, file), None)
if genesis_time is not None:
with open(f"{dest_dir}/genesis.json", "r") as file:
data = json.load(file)
data["genesis_time"] = genesis_time.isoformat() + "Z"
with open(f"{dest_dir}/genesis.json", "w") as file:
json.dump(data, file, indent=2)
tmp_vega_home = tmp_vega_dir + "/vegahome"
_update_node_config(
tmp_vega_home,
port_config=port_config,
transactions_per_block=transactions_per_block,
block_duration=block_duration,
use_docker_postgres=use_docker_postgres,
)
if use_docker_postgres:
data_node_docker_volume = docker_client.volumes.create()
data_node_container = docker_client.containers.run(
"timescale/timescaledb:2.11.2-pg15",
command=[
"-c",
"max_connections=50",
"-c",
"log_destination=stderr",
"-c",
"work_mem=5MB",
"-c",
"huge_pages=off",
"-c",
"shared_memory_type=sysv",
"-c",
"dynamic_shared_memory_type=sysv",
"-c",
"shared_buffers=2GB",
"-c",
"temp_buffers=5MB",
],
detach=True,
ports={5432: port_config[Ports.DATA_NODE_POSTGRES]},
volumes=[f"{data_node_docker_volume.name}:/var/lib/postgresql/data"],
environment={
"POSTGRES_USER": "vega",
"POSTGRES_PASSWORD": "vega",
"POSTGRES_DB": "vega",
},
remove=False,
)
dataNodeProcess = _popen_process(
[
data_node_path,
"start",
"--home=" + tmp_vega_home,
"--chainID=CUSTOM",
],
dir_root=tmp_vega_dir,
log_name="data_node",
)
vega_args = [
vega_path,
"start",
"--nodewallet-passphrase-file=" + tmp_vega_home + "/passphrase-file",
"--home=" + tmp_vega_home,
]
if store_transactions:
replay_file = (
replay_from_path
if replay_from_path is not None
else tmp_vega_home + "/replay"
)
vega_args.extend(
[
f"--blockchain.nullchain.replay-file={replay_file}",
"--blockchain.nullchain.record",
]
)
if replay_from_path is not None:
vega_args.extend(
[
f"--blockchain.nullchain.replay-file={replay_from_path}",
"--blockchain.nullchain.replay",
]
)
vegaNodeProcess = _popen_process(
vega_args,
dir_root=tmp_vega_dir,
log_name="node",
)
for _ in range(500):
try:
requests.get(
f"http://localhost:{port_config[Ports.CORE_REST]}/blockchain/height"
).raise_for_status()
break
except:
pass
vegaFaucetProcess = _popen_process(
[
vega_path,
"faucet",
"run",
"--passphrase-file=" + tmp_vega_home + "/passphrase-file",
"--home=" + tmp_vega_home,
],
dir_root=tmp_vega_dir,
log_name="faucet",
)
processes = {
"data-node": dataNodeProcess,
"faucet": vegaFaucetProcess,
"vega": vegaNodeProcess,
}
if run_wallet:
for _ in range(3000):
try:
requests.get(
f"http://localhost:{port_config.get(Ports.DATA_NODE_REST)}/time"
).raise_for_status()
requests.get(
f"http://localhost:{port_config.get(Ports.CORE_REST)}/blockchain/height"
).raise_for_status()
break
except (
MaxRetryError,
requests.exceptions.ConnectionError,
requests.exceptions.HTTPError,
):
time.sleep(0.1)
subprocess.run(
[
vega_wallet_path,
"wallet",
"api-token",
"init",
f"--home={tmp_vega_home}",
f"--passphrase-file={tmp_vega_home}/passphrase-file",
],
capture_output=True,
)
subprocess.run(
[
vega_wallet_path,
"wallet",
"create",
"--wallet",
DEFAULT_WALLET_NAME,
"--home",
tmp_vega_home,
"--passphrase-file",
tmp_vega_home + "/passphrase-file",
"--output",
"json",
],
capture_output=True,
)
subprocess.run(
[
vega_wallet_path,
"wallet",
"api-token",
"generate",
"--home=" + tmp_vega_home,
"--tokens-passphrase-file=" + tmp_vega_home + "/passphrase-file",
"--wallet-passphrase-file=" + tmp_vega_home + "/passphrase-file",
"--wallet-name=" + DEFAULT_WALLET_NAME,
"--description=" + DEFAULT_WALLET_NAME,
],
capture_output=True,
)
wallet_args = [
vega_wallet_path,
"wallet",
"service",
"run",
"--network",
"local",
"--home=" + tmp_vega_home,
"--automatic-consent",
"--load-tokens",
"--tokens-passphrase-file=" + tmp_vega_home + "/passphrase-file",
]
vegaWalletProcess = _popen_process(
wallet_args,
dir_root=tmp_vega_dir,
log_name="vegawallet",
)
processes["wallet"] = vegaWalletProcess
if run_with_console:
env_copy = os.environ.copy()
env_copy.update(
{
"NX_VEGA_URL": (
f"http://localhost:{port_config[Ports.DATA_NODE_REST]}/graphql"
),
"NX_VEGA_WALLET_URL": f"http://localhost:{port_config[Ports.WALLET]}",
"NX_VEGA_ENV": "CUSTOM",
"NX_PORT": f"{port_config[Ports.CONSOLE]}",
"NODE_ENV": "development",
"NX_VEGA_NETWORKS": "{}",
}
)
console_process = _popen_process(
[
"yarn",
"--cwd",
vega_console_path,
"nx",
"serve",
"-o",
"trading",
"--port",
f"{port_config[Ports.CONSOLE]}",
],
dir_root=tmp_vega_dir,
log_name="console",
env=env_copy,
)
processes["console"] = console_process
# Send process pid values for resource monitoring
child_conn.send({name: process.pid for name, process in processes.items()})
# According to https://docs.oracle.com/cd/E19455-01/806-5257/gen-75415/index.html
# There is no guarantee that signal will be catch by this thread. Usually the
# parent process catches the signal and removes it from the list of pending
# signals, this leave us with memory leak where we have orphaned vega processes
# and the docker containers. Below is hack to maximize chance by catching the
# signal.
# We call signal.signal method as a workaround to move this thread on top of
# the catch stack, then sigwait waits until singal is trapped.
# As last resort We catches the `SIGCHLD` in case the parent process exited
# and this is the orphan now.
# But to provide 100% guarantee this should be implemented in another way:
# - Signal should be trapped in the main process, and this should be synced
# the shared memory
# - or this entire process manager should be incorporated in the VegaServiceNull
# and containers/processes should be removed as inline call in the __exit__
#
#
# Important assumption is that this signal can be caught multiple times as well
def sighandler(signal, frame, logger_):
if signal is None:
logger_.info("VegaServiceNull exited normally")
else:
logger_.info(f"VegaServiceNull exited after trapping the {signal} signal")
logger_.info("Received signal from parent process")
logger_.info("Starting termination for processes")
for name, process in processes.items():
logger_.info(f"Terminating process {name}(pid: {process.pid})")
process.terminate()
for name, process in processes.items():
attempts = 0
while process.poll() is None:
logger_.info(f"Process {name} still not terminated")
time.sleep(1)
attempts += 1
if attempts > 60:
logger_.warning(
"Gracefully terminating process timed-out. Killing process"
f" {name}."
)
process.kill()
logger_.debug(f"Process {name} stopped with {process.poll()}")
if process.poll() == 0:
logger_.info(f"Process {name} terminated.")
if process.poll() == -9:
logger_.info(f"Process {name} killed.")
if use_docker_postgres:
def kill_docker_container() -> None:
try:
data_node_container.stop()
with open(tmp_vega_home + "/postgres.out", "wb") as f:
f.write(data_node_container.logs())
data_node_container.remove()
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
logger_.debug(
f"Container {data_node_container.name} has been already"
" killed"
)
return
else:
raise e
logger_.debug(f"Stopping container {data_node_container.name}")
retry(10, 1.0, kill_docker_container)
removed = False
logger_.debug(f"Removing volume {data_node_docker_volume.name}")
for _ in range(20):
if data_node_container.status == "running":
time.sleep(3)
continue
try:
data_node_docker_volume.remove(force=True)
removed = True
break
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
removed = True
logger_.debug(
f"Data node volume {data_node_docker_volume.name} has been"
" already killed"
)
break
else:
time.sleep(1)
except docker.errors.APIError:
time.sleep(1)
if not removed:
logger_.exception(
"Docker volume failed to cleanup, will require manual cleaning"
)
if not retain_log_files and os.path.exists(tmp_vega_dir):
shutil.rmtree(tmp_vega_dir)
# The below lines are workaround to put the signal listeners on top of the stack, so this process can handle it.
signal.signal(signal.SIGINT, lambda _s, _h: None)
signal.signal(signal.SIGTERM, lambda _s, _h: None)
# The process had previously created one or more child processes with the fork() function.
# One or more of these processes has since died.
signal.sigwait(
[
signal.SIGKILL, # The process was explicitly killed by somebody wielding the kill program.
signal.SIGTERM, # The process was explicitly killed by somebody wielding the terminate program.
signal.SIGCHLD,
]
)
sighandler(None, None, logger_=logger)
class VegaServiceNull(VegaService):
PORT_TO_FIELD_MAP = {
Ports.CONSOLE: "console_port",
Ports.CORE_GRPC: "vega_node_grpc_port",
Ports.CORE_REST: "vega_node_rest_port",
Ports.DATA_NODE_GRPC: "data_node_grpc_port",
Ports.DATA_NODE_METRICS: "data_node_metrics_port",
Ports.DATA_NODE_POSTGRES: "data_node_postgres_port",
Ports.DATA_NODE_REST: "data_node_rest_port",
Ports.FAUCET: "faucet_port",
Ports.METRICS: "metrics_port",
Ports.VEGA_NODE: "vega_node_port",
Ports.WALLET: "wallet_port",
}
def __init__(
self,
vega_path: Optional[str] = None,
data_node_path: Optional[str] = None,
vega_wallet_path: Optional[str] = None,
vega_console_path: Optional[str] = None,
start_immediately: bool = False,
run_with_console: bool = False,
run_wallet_with_token_dapp: bool = False,
port_config: Optional[Dict[Ports, int]] = None,
warn_on_raw_data_access: bool = True,
transactions_per_block: int = 1,
seconds_per_block: int = 1,
use_full_vega_wallet: bool = False,
retain_log_files: bool = False,
launch_graphql: bool = False,
store_transactions: bool = True,
replay_from_path: Optional[str] = None,
listen_for_high_volume_stream_updates: bool = False,
check_for_binaries: bool = False,
genesis_time: Optional[datetime.datetime] = None,
):
super().__init__(
can_control_time=True,
warn_on_raw_data_access=warn_on_raw_data_access,
seconds_per_block=seconds_per_block,
listen_for_high_volume_stream_updates=listen_for_high_volume_stream_updates,
)
self.retain_log_files = retain_log_files
self._using_all_custom_paths = all(
[x is not None for x in [vega_path, data_node_path, vega_wallet_path]]
)
self.vega_path = vega_path or path.join(vega_bin_path, "vega")
self.data_node_path = data_node_path or path.join(vega_bin_path, "data-node")
self.vega_wallet_path = vega_wallet_path or path.join(vega_bin_path, "vega")
self.vega_console_path = vega_console_path or path.join(
vega_bin_path, "console"
)
self.proc = None
self.run_with_console = run_with_console
self.run_wallet_with_token_dapp = run_wallet_with_token_dapp
self.genesis_time = genesis_time
self.transactions_per_block = transactions_per_block
self.seconds_per_block = seconds_per_block
self._wallet = None
self._use_full_vega_wallet = use_full_vega_wallet
self.store_transactions = store_transactions
self.log_dir = tempfile.mkdtemp(prefix="vega-sim-")
self.launch_graphql = launch_graphql
self.replay_from_path = replay_from_path
self.check_for_binaries = check_for_binaries
self.stopped = False
self.logger_p = None
self._assign_ports(port_config)
if start_immediately:
self.start()
def __enter__(self):
self.start()
return self
def __exit__(self, type, value, traceback):
self.stop()
def wait_fn(self, wait_multiple: float = 1) -> None:
self.wait_for_core_catchup()
self.forward(f"{int(wait_multiple * self.seconds_per_block)}s")
self.wait_for_core_catchup()
@property
def wallet(self) -> Wallet:
if self._wallet is None:
if self._use_full_vega_wallet:
self._wallet = VegaWallet(
self.wallet_url,
wallet_path=self.vega_wallet_path,
vega_home_dir=path.join(self.log_dir, "vegahome"),
passphrase_file_path=path.join(
self.log_dir, "vegahome", "passphrase-file"
),
)
else:
self._wallet = SlimWallet(
self.core_client,
full_wallet=None,
log_dir=self.log_dir,
)
return self._wallet
def _check_started(self) -> None:
if self.proc is None:
raise ServiceNotStartedError("NullChain Vega accessed without starting")
def _generate_port_config(self) -> Dict[Ports, int]:
return {
Ports.CONSOLE: self.console_port,
Ports.CORE_GRPC: self.vega_node_grpc_port,
Ports.CORE_REST: self.vega_node_rest_port,
Ports.DATA_NODE_GRPC: self.data_node_grpc_port,
Ports.DATA_NODE_METRICS: self.data_node_metrics_port,
Ports.DATA_NODE_POSTGRES: self.data_node_postgres_port,
Ports.DATA_NODE_REST: self.data_node_rest_port,
Ports.FAUCET: self.faucet_port,
Ports.METRICS: self.metrics_port,
Ports.VEGA_NODE: self.vega_node_port,
Ports.WALLET: self.wallet_port,
}
# set ports from port_config or alternatively find a free port
# to use
def _assign_ports(self, port_config: Optional[Dict[Ports, int]]):
self.console_port = 0
self.data_node_grpc_port = 0
self.data_node_metrics_port = 0
self.data_node_postgres_port = 0
self.data_node_rest_port = 0
self.faucet_port = 0
self.metrics_port = 0
self.vega_node_grpc_port = 0
self.vega_node_port = 0
self.vega_node_rest_port = 0
self.wallet_port = 0
for key, name in self.PORT_TO_FIELD_MAP.items():
if port_config is not None and key in port_config:
setattr(self, name, port_config[key])
else:
curr_ports = set(
[getattr(self, port) for port in self.PORT_TO_FIELD_MAP.values()]
)
setattr(self, name, find_free_port(curr_ports))
def start(self, block_on_startup: bool = True) -> None:
if self.check_for_binaries and not self._using_all_custom_paths:
download_binaries()
parent_conn, child_conn = multiprocessing.Pipe()
ctx = multiprocessing.get_context()
port_config = self._generate_port_config()
self.queue = Queue()
self.logger_p = threading.Thread(target=logger_thread, args=(self.queue,))
self.logger_p.start()
self.proc = ctx.Process(
target=manage_vega_processes,
kwargs={
"child_conn": child_conn,
"log_queue": self.queue,
"vega_path": self.vega_path,
"data_node_path": self.data_node_path,
"vega_wallet_path": self.vega_wallet_path,
"vega_console_path": self.vega_console_path,
"run_with_console": self.run_with_console,
"port_config": port_config,
"transactions_per_block": self.transactions_per_block,
"block_duration": f"{int(self.seconds_per_block)}s",
"run_wallet": self._use_full_vega_wallet,
"retain_log_files": self.retain_log_files,
"log_dir": self.log_dir,
"store_transactions": self.store_transactions,
"replay_from_path": self.replay_from_path,
"log_level": logging.getLogger().level,
"genesis_time": self.genesis_time,
},
)
self.proc.start()
if self.run_with_console:
logger.info(
"Vega Running. Console launched at"
f" http://localhost:{self.console_port}"
)
if block_on_startup:
# Wait for startup
started = False
for _ in range(500):
try:
channel = grpc.insecure_channel(
self.data_node_grpc_url,
options=(
("grpc.enable_http_proxy", 0),
("grpc.max_send_message_length", 1024 * 1024 * 20),
("grpc.max_receive_message_length", 1024 * 1024 * 20),
),
)
grpc.channel_ready_future(channel).result(timeout=5)
trading_data_client = vac.VegaTradingDataClientV2(
self.data_node_grpc_url,
channel=channel,
)
gov.get_blockchain_time(trading_data_client)
requests.get(
f"http://localhost:{self.data_node_rest_port}/time"
).raise_for_status()
requests.get(
f"http://localhost:{self.vega_node_rest_port}/blockchain/height"
).raise_for_status()
requests.get(
f"http://localhost:{self.faucet_port}/api/v1/health"
).raise_for_status()
if self._use_full_vega_wallet:
requests.get(
f"http://localhost:{self.wallet_port}/api/v2/health"
).raise_for_status()
started = True
break
except (
MaxRetryError,
requests.exceptions.ConnectionError,
requests.exceptions.HTTPError,
grpc.RpcError,
grpc.FutureTimeoutError,
):
time.sleep(0.1)
if not started:
self.stop()
raise VegaStartupTimeoutError(
"Timed out waiting for Vega simulator to start up"
)
# TODO: Remove this once datanode fixes up startup timing
time.sleep(6)
self.process_pids = parent_conn.recv()
# Initialise the data-cache
self.data_cache
if self.run_with_console:
webbrowser.open(f"http://localhost:{port_config[Ports.CONSOLE]}/", new=2)
if self.launch_graphql:
webbrowser.open(
f"http://localhost:{port_config[Ports.DATA_NODE_REST]}/graphql", new=2
)
# Class internal as at some point the host may vary as well as the port
@staticmethod
def _build_url(port: int, prefix: str = "http://"):
return f"{prefix}localhost:{port}"
def stop(self) -> None:
logger.debug("Calling stop for veganullchain")
if self.stopped:
return
self.stopped = True
if self._core_client is not None:
self.core_client.stop()
if self._core_state_client is not None:
self.core_state_client.stop()
if self._trading_data_client_v2 is not None:
self.trading_data_client_v2.stop()
if self.proc is None:
logger.info("Stop called but nothing to stop")
else:
os.kill(self.proc.pid, signal.SIGTERM)
if self.queue is not None:
if self.proc is not None:
attempts = 0
while self.proc.is_alive:
if attempts > 5:
break
time.sleep(1)
attempts += 1
self.queue.put(None)
self.logger_p.join()
if isinstance(self.wallet, SlimWallet):
self.wallet.stop()
super().stop()
@property
def wallet_url(self) -> str:
return self._build_url(self.wallet_port)
@property
def data_node_rest_url(self) -> str:
return self._build_url(self.data_node_rest_port)
@property
def data_node_grpc_url(self) -> str:
return self._build_url(self.data_node_grpc_port, prefix="")
@property
def faucet_url(self) -> str:
return self._build_url(self.faucet_port)
@property
def vega_node_url(self) -> str:
return self._build_url(self.vega_node_port)
@property
def vega_node_grpc_url(self) -> str:
return self._build_url(self.vega_node_grpc_port, prefix="")
def clone(self) -> VegaServiceNull:
"""Creates a clone of the service without the handle to other processes.
This is required as when spinning a Nullchain service out into
separate processes we need to start the various components in the main
thread (as daemon processes cannot spawn daemon processes), however want
to maintain a handle to these in the child.
"""
return VegaServiceNull(
self.vega_path,
self.data_node_path,
self.vega_wallet_path,
start_immediately=False,
port_config=self._generate_port_config(),
use_full_vega_wallet=self._use_full_vega_wallet,
warn_on_raw_data_access=self.warn_on_raw_data_access,
)
|
vegaprotocol/vega-market-sim
|
vega_sim/null_service.py
|
null_service.py
|
py
| 36,719 |
python
|
en
|
code
| 19 |
github-code
|
6
|
27472315206
|
from flask import Flask, request,jsonify
import util
from flask_cors import CORS
app= Flask(__name__)
CORS(app)
@app.route('/get_location_names',methods=['GET'])
def get_location_names():
response = jsonify({
'location': util.get_location()
})
response.headers.add('Access-Control-Allow-Origin', '*')
return response
@app.route('/predict_home_price',methods=['GET','POST'])
def predict_home_price():
toal_sqft=float(request.form['total_sqft'])
bhk=int(request.form['bhk'])
balcony=int(3)
bath=int(request.form['bath'])
location=request.form['location']
response=jsonify({
'estimated_price':util.estimated_price(location,toal_sqft,bhk,bath,balcony)
})
response.headers.add('Access-Control-Allow-Origin', '*')
return response
if __name__ == "__main__":
print("staring the server")
util.get_location()
app.run()
|
Shinchan3/Home_price_linear_regression
|
server/server.py
|
server.py
|
py
| 922 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42963274647
|
# 시퀀스형
# 컨테이너형 container : 서로다른 자료형을 담음. list , tuple , collections.deque
# 플랫 flat : 한개의 자료형 str , bytes , bytearray , array.array , memoryview
# 가변() mutable : list , bytearray , array.array, memoryview , deque
# 불변() immutable : tuple , str , bytes
# 해시 테이블
# key 에 value 를 저장하는 구조
# 키값의 연산결과에 따라 직접접근이 가능!
# key 값을 해싱 함수 -> 해쉬 주소 -> key 에 대한 value 참조
# print(__builtins__.__dict__)
t1 = (10, 20, (30, 40, 50))
t2 = (10, 20, [30, 40, 50])
print(hash(t1)) # immutable only!~
# print(hash(t2)) ## mutable 이라 해시불가
# Dict Setdefault 예제. recommended
source = (('k1', 'val1'),
('k1', 'val2'),
('k2', 'val3'),
('k2', 'val4'),
('k2', 'val5'))
new_dict1 = {}
new_dict2 = {}
# No setdefault
for k, v in source:
if k in new_dict1:
new_dict1[k].append(v)
else:
new_dict1[k] = [v]
print(new_dict1)
# Use setdefault
for k, v in source:
new_dict2.setdefault(k, []).append(v)
print(new_dict2)
# wow!~
# 주의
new_dict3 = {k: v for k, v in source}
print(new_dict3)
|
elbum/py_inter
|
chap04_03.py
|
chap04_03.py
|
py
| 1,202 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
27132094648
|
import logging
from operations.operation_manager import Operation
from operations import *
logging.config.fileConfig('/opt/TopPatch/conf/logging.config')
logger = logging.getLogger('rvapi')
#process that data!!
def process_queue_data(rqueue, queue_exists, agent_id,
username, customer_name, uri, method):
if queue_exists:
agent_queue = rqueue.get_all_objects_in_queue()
for operation in agent_queue:
if operation.get(OperationKey.OperationId):
oper = (
Operation(username, customer_name, uri, method)
)
oper.update_operation_pickup_time(
operation[OperationKey.OperationId], agent_id,
CHECKIN
)
return agent_queue
else:
return([])
|
SteelHouseLabs/vFense
|
tp/src/receiver/corehandler.py
|
corehandler.py
|
py
| 837 |
python
|
en
|
code
| 5 |
github-code
|
6
|
70396698747
|
import lcqp_manip
import casadi
import numpy as np
import lcqpow
# create the objects
box_ly = 0.15
vmax = 0.035
wmax = 0.035
box = lcqp_manip.Box(w=0.24, h=0.08, m=0.1, g=9.81, vmax=casadi.SX([vmax, vmax, wmax]))
gripper = lcqp_manip.Gripper(h=0.08, rmax=0.115, rmin=0, vmax=casadi.SX([vmax, vmax, wmax, vmax]))
ground = lcqp_manip.Ground()
wall = lcqp_manip.Wall(-0.25)
objects = [gripper, box, ground, wall]
# create the contact points and surfaces
box_TR_corner = lcqp_manip.BoxTopRightCorner(box)
box_BR_corner = lcqp_manip.BoxBottomRightCorner(box)
box_TL_corner = lcqp_manip.BoxTopLeftCorner(box)
box_BL_corner = lcqp_manip.BoxBottomLeftCorner(box)
box_T_surface = lcqp_manip.BoxTopSurface(box)
box_B_surface = lcqp_manip.BoxBottomSurface(box)
box_R_surface = lcqp_manip.BoxRightSurface(box)
box_L_surface = lcqp_manip.BoxLeftSurface(box)
gripper_BR_corner = lcqp_manip.GripperBottomRightCorner(gripper, offset=0.0035)
gripper_BL_corner = lcqp_manip.GripperBottomLeftCorner(gripper, offset=0.0035)
# create contacts
fmax = casadi.SX([10*box.m*box.g, 10*box.m*box.g, 10*box.m*box.g])
mu_ground = 0.4
mu = 0.5
# contacts between the box (contact points) and ground (contact surface)
contact_box_TR_ground = lcqp_manip.RelaxedContact(contact_point=box_TR_corner, contact_surface=ground,
contact_name="box_TR_ground", mu=mu, fmax=fmax,
inv_force_dir=False)
contact_box_BR_ground = lcqp_manip.RelaxedContact(contact_point=box_BR_corner, contact_surface=ground,
contact_name="box_BR_ground", mu=mu, fmax=fmax,
inv_force_dir=False)
contact_box_TL_ground = lcqp_manip.RelaxedContact(contact_point=box_TL_corner, contact_surface=ground,
contact_name="box_TL_ground", mu=mu, fmax=fmax,
inv_force_dir=False)
contact_box_BL_ground = lcqp_manip.RelaxedContact(contact_point=box_BL_corner, contact_surface=ground,
contact_name="box_BL_ground", mu=mu, fmax=fmax,
inv_force_dir=False)
# contacts between the box (contact points) and ground (contact surface)
contact_box_TR_wall = lcqp_manip.RelaxedContact(contact_point=box_TR_corner, contact_surface=wall,
contact_name="box_TR_wall", mu=mu,
inv_force_dir=False)
contact_box_BR_wall = lcqp_manip.RelaxedContact(contact_point=box_BR_corner, contact_surface=wall,
contact_name="box_BR_wall", mu=mu,
inv_force_dir=False)
contact_box_TL_wall = lcqp_manip.RelaxedContact(contact_point=box_TL_corner, contact_surface=wall,
contact_name="box_TL_wall", mu=mu,
inv_force_dir=False)
contact_box_BL_wall = lcqp_manip.RelaxedContact(contact_point=box_BL_corner, contact_surface=wall,
contact_name="box_BL_wall", mu=mu,
inv_force_dir=False)
# contacts between the gripper (contact points) and box (contact surfaces)
contact_gripper_BR_box_R = lcqp_manip.RelaxedContact(contact_point=gripper_BR_corner, contact_surface=box_R_surface,
contact_name="gripper_BR_box_R", mu=mu, fmax=fmax,
inv_force_dir=True)
contact_gripper_BL_box_R = lcqp_manip.RelaxedContact(contact_point=gripper_BL_corner, contact_surface=box_R_surface,
contact_name="gripper_BL_box_L", mu=mu, fmax=fmax,
inv_force_dir=True)
contacts = [contact_box_TL_ground, contact_box_BL_ground, contact_box_TR_ground, contact_box_BR_ground,
contact_box_TL_wall, contact_box_BL_wall, contact_box_TR_wall, contact_box_BR_wall,
contact_gripper_BR_box_R]
# create an LCQP
lcqp = lcqp_manip.LCQP(objects, contacts)
lcqp.set_force_balance(box)
lcqp.set_position_limit(gripper_BR_corner, box_T_surface, margin=0.02, inv_dir=True)
lcqp.set_position_limit(gripper_BR_corner, box_B_surface, margin=0.02, inv_dir=True)
box_x0 = 0.5
box_y0 = wall.w0 + box.w / 2 + 0.03
# goal configuration
box_center_to_gripper_top = gripper.h / 2
goal_height = 0.0
goal_angle = np.pi / 2
gripper_z_start = box.h + box_center_to_gripper_top
box_z_goal = box.w / 2
q_goal = np.array([box_y0, 0, -np.pi/6, 0.01, box_y0, box_z_goal, goal_angle])
# set config cost
q_weight = np.array([0.0, 0.0, 0.001, 1000, 0, 1, 1000])
v_weight = 1.0e-02
f_weight = 1.0e-02
slack_penalty = 1.0e04
lcqp.set_config_cost(q_goal, q_weight, v_weight, f_weight, slack_penalty)
# create the LCQP solver
lcqp_solver = lcqp_manip.LCQPSolver(lcqp)
lcqp_solver.options.setMaxRho(1.0e12)
lcqp_solver.options.setComplementarityTolerance(1.0e-06)
lcqp_solver.options.setStationarityTolerance(1.0e-04)
# create the simulation environment
sim = lcqp_manip.PyBulletSimulation(time_step=0.05, sim_time=30, gui=True)
sim.robot_q0 = np.array([-np.pi/2, 0, -np.pi/2, np.pi/2, -np.pi/2, -np.pi/2, -np.pi/2, -0.01, 0.01])
sim.joint_rest_poses = sim.robot_q0.tolist()
sim.add_box(lx=box_ly, ly=box.w, lz=box.h, mass=box.m, x=box_x0, y=box_y0)
# create the controller
controller = lcqp_manip.LCQPController(lcqp_solver=lcqp_solver, projection_axis='x', z_offset=sim.table_height)
controller.joint_rest_poses = sim.robot_q0
controller.position_gain = np.concatenate([np.full(7, 1), np.full(2, 0.1)])
controller.op_space_gain = np.concatenate([np.full(3, 100), np.full(3, 100)])
# run a simulation
pre_grasp_orn = np.array([[ 0, 1, 0],
[ 0, 0, -1],
[ -1, 0, 0]])
from scipy.spatial.transform import Rotation
pre_grasp_orn = Rotation.from_matrix(pre_grasp_orn).as_quat()
controller.set_pre_grasp(pre_grasp_pos=np.array([box_x0, box_y0+box.w/2+gripper.h, sim.table_surface_height+box.h/2+gripper.h/2]),
pre_grasp_orn=pre_grasp_orn,
pre_pre_grasp_dist=np.array([0.0, 0.1, 0.0]),
pre_grasp_tol=0.01, pre_pre_grasp_tol=0.02)
controller.set_post_grasp(post_grasp_pos=np.array([box_x0, 0.0, gripper_z_start+sim.table_surface_height+0.1]),
post_grasp_tol=0.01)
sim.set_camera_side()
sim.run(controller, record=False)
# sim.run(controller, anim_2D=False, record=True)
|
omron-sinicx/lcqp
|
examples/pivot.py
|
pivot.py
|
py
| 6,805 |
python
|
en
|
code
| 1 |
github-code
|
6
|
18537251439
|
# prob_link: https://www.codingninjas.com/studio/problems/rod-cutting-problem_8230727?challengeSlug=striver-sde-challenge&leftPanelTab=0
from sys import stdin
import sys
def cutRod(prices, n):
# Write your code here.
mat = [[0 for i in range(n+1)]for j in range(n+1)]
for i in range(1, n+1):
for j in range(1, n+1):
if i == 1:
mat[i][j] = j*prices[i-1]
else:
if i > j:
mat[i][j] = mat[i-1][j]
else:
mat[i][j] = max(prices[i-1]+mat[i][j-i], mat[i-1][j])
return mat[n][n]
# Taking input using fast I/O.
def takeInput():
n = int(input())
price = list(map(int, input().strip().split(" ")))
return price, n
# Main.
t = int(input())
while t:
price, n = takeInput()
print(cutRod(price, n))
t = t-1
|
Red-Pillow/Strivers-SDE-Sheet-Challenge
|
P181_Rod_cutting_problem.py
|
P181_Rod_cutting_problem.py
|
py
| 894 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1101626454
|
import random
def comparar_lista():
lista1=[] # Inicializamos dos listas vacías para almacenar los números generados
lista2=[]
n=int(input("ingrese el tamaño de la lista1:")) # El usuario ingresa el tamaño de ambas listas
y=int(input("ingrese el tamaño de la lista2:"))
for i in range(n):
lista1.append(random.randint(1, 100)) # Generamos números aleatorios y los agregamos a lista1
print("La lista1 original es:", lista1)
for i in range(y):
lista2.append(random.randint(1, 100)) # Generamos números aleatorios y los agregamos a lista2
print("La lista2 original es:", lista2)
suma_lista1 = sum(lista1) # Calculamos la suma de ambas listas
suma_lista2 = sum(lista2)
print("La suma de la lista 1 es:", suma_lista1)
print("La suma de la lista 2 es:", suma_lista2)
if suma_lista1 > suma_lista2: # Comparamos las sumas y mostramos un mensaje indicando cuál lista tiene la suma mayor, o si ambas sumas son iguales.
print("La suma de la lista 1 es mayor que el de la lista 2.")
elif suma_lista2 > suma_lista1:
print("La suma de la lista 2 en mayor que el de la lista 1.")
else:
print("La suma de las dos listas son iguales.")
def determinar_mayor_menor(lista1, lista2):
menor_lista1=min(lista1) # Utilizamos la función min() para encontrar el número menor en lista1 y lo imprimimos
print("El numero menor de la lista1 es:", menor_lista1)
menor_lista2=min(lista2)
print("El numero menor de la lista2 es:", menor_lista2)
# Llamamos a la función comparar_lista() para ejecutar el programa y realizar las comparaciones entre las listas generadas aleatoriamente.
comparar_lista()
|
Diego1229/mejoramiento_Gonzalez
|
listas/lista2.py
|
lista2.py
|
py
| 1,721 |
python
|
es
|
code
| 0 |
github-code
|
6
|
19240398722
|
N = int(input())
S = []
for _ in range(N):
tot = 0
L = list(map(int, input().split()))
L.sort()
if L[0] == L[2]:
tot = 10000 + 1000 * L[0]
elif L[0] == L[1] or L[1] == L[2]:
tot = 1000 + 100 * L[1]
else:
tot = L[2] * 100
S.append(tot)
print(max(S))
|
sdh98429/dj2_alg_study
|
백준/Bronze/2476. 주사위 게임/주사위 게임.py
|
주사위 게임.py
|
py
| 291 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28204866764
|
from automata.Automata import Automata
from tokenizer.Tokenizer import tokenizerSimple
'''
Leemos los datos necesarios para empezar a procesar el automata.
Tokenizamos las entradas para obtenerlas en forma de array.
'''
simbolos_entrada = tokenizerSimple(input("Simbolos de entrada >> "))
estados = tokenizerSimple(input("Estados >> "))
estado_inicial = tokenizerSimple(input("Estado inicial >> "))
estados_aceptacion = tokenizerSimple(input("Estados de Aceptacion >> "))
# Creamos una nueva instancia de tipo Automata.
automata = Automata(simbolos_entrada, estados, estado_inicial, estados_aceptacion)
#print("Entrada -->", automata.simbolos_entrada)
#print("Estados -->", automata.estados)
#print("Inicial -->", automata.estado_inicial)
#print("Aceptacion -->", automata.estados_aceptacion)
|
ephelsa/Teoria_Lenguaje
|
1. Automata/proyecto/__init__.py
|
__init__.py
|
py
| 799 |
python
|
es
|
code
| 0 |
github-code
|
6
|
20512905913
|
def merge(arr,l,r):
i = j = k = 0
while i < len(l) and j < len(r):
if l[i] < r[j]:
arr[k] = l[i]
i += 1
else:
arr[k] = r[j]
j += 1
k += 1
if i == len(l) and j < len(r):
while j < len(r):
arr[k] = r[j]
j += 1
k +=1
if j == len(r) and i < len(l):
while i < len(l):
arr[k] = l[i]
i += 1
k += 1
def mergesort(arr):
if len(arr) >1:
m = len(arr)//2
l = arr[:m]
r = arr[m:]
mergesort(l)
mergesort(r)
merge(arr,l,r)
arr = [100, 22, 5, 2, 6, 1]
mergesort(arr)
for i in arr:
print(i,end=' ')
|
chaithanyasubramanyam/pythonfiles
|
mergesort.py
|
mergesort.py
|
py
| 721 |
python
|
en
|
code
| 0 |
github-code
|
6
|
349034442
|
import pandas as pd
import numpy as np
from zipfile import ZipFile
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from pathlib import Path
import matplotlib.pyplot as plt
from keras.layers import Concatenate, Dense, Dropout
from keras.layers import Add, Activation, Lambda
from keras.models import Model
from keras.layers import Input, Reshape, Dot
from keras.layers.embeddings import Embedding
from keras.optimizers import Adam
from keras.regularizers import l2
class RecommenderNet(keras.Model):
def __init__(self, num_users, num_products, embedding_size, **kwargs):
super(RecommenderNet, self).__init__(**kwargs)
self.num_users = num_users
self.num_products = num_products
self.embedding_size = embedding_size
self.user_embedding = layers.Embedding(
num_users,
embedding_size,
embeddings_initializer="he_normal",
embeddings_regularizer=keras.regularizers.l2(1e-6),
)
self.user_bias = layers.Embedding(num_users, 1)
self.product_embedding = layers.Embedding(
num_products,
embedding_size,
embeddings_initializer="he_normal",
embeddings_regularizer=keras.regularizers.l2(1e-6),
)
self.product_bias = layers.Embedding(num_products, 1)
def call(self, inputs):
user_vector = self.user_embedding(inputs[:, 0])
user_bias = self.user_bias(inputs[:, 0])
product_vector = self.product_embedding(inputs[:, 1])
product_bias = self.product_bias(inputs[:, 1])
dot_user_product = tf.tensordot(user_vector, product_vector, 2)
# Add all the components (including bias)
x = dot_user_product + user_bias + product_bias
# The sigmoid activation forces the rating to between 0 and 1
return tf.nn.sigmoid(x)
class EmbeddingLayer:
def __init__(self, n_items, n_factors):
self.n_items = n_items
self.n_factors = n_factors
def __call__(self, x):
x = Embedding(self.n_items, self.n_factors, embeddings_initializer='he_normal',
embeddings_regularizer=l2(1e-6))(x)
x = Reshape((self.n_factors,))(x)
return x
def ModRecommenderNet(n_users, n_movies, n_factors, min_rating, max_rating):
user = Input(shape=(1,))
u = EmbeddingLayer(n_users, n_factors)(user)
movie = Input(shape=(1,))
m = EmbeddingLayer(n_movies, n_factors)(movie)
x = Concatenate()([u, m])
x = Dropout(0.05)(x)
x = Dense(10, kernel_initializer='he_normal')(x)
x = Activation('relu')(x)
x = Dropout(0.5)(x)
x = Dense(1, kernel_initializer='he_normal')(x)
x = Activation('sigmoid')(x)
x = Lambda(lambda x: x * (max_rating - min_rating) + min_rating)(x)
model = Model(inputs=[user, movie], outputs=x)
opt = Adam(lr=0.001)
model.compile(loss='mean_squared_error', optimizer=opt)
return model
if __name__ == "__main__":
print('main')
|
wmcfarlan/health_recommender
|
src/helper/keras_class.py
|
keras_class.py
|
py
| 3,069 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17451718192
|
import numpy as np
import pandas as pd
from model_zoo.xgb import train_xgb
from model_zoo.catboost import train_catboost
from model_zoo.lgbm import train_lgbm
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import KFold
TRAIN_FCTS = {
"lgbm": train_lgbm,
"xgb": train_xgb,
"catboost": train_catboost,
}
def k_fold(
df,
config,
log_folder=None,
):
train_fct = TRAIN_FCTS[config.model]
ft_imps, models = [], []
pred_oof = np.zeros(len(df))
if config.split == "kf":
kf = KFold(n_splits=config.n_folds, shuffle=True, random_state=13)
splits = kf.split(df)
elif config.split == "gkf":
splits = [(i, i) for i in range(config.n_folds)]
else:
raise NotImplementedError()
for fold, (train_idx, val_idx) in enumerate(splits):
if fold in config.selected_folds:
print(f"\n------------- Fold {fold + 1} / {config.n_folds} -------------\n")
if config.split == "kf":
df_train = df.iloc[train_idx].reset_index(drop=True)
df_val = df.iloc[val_idx].reset_index(drop=True)
else:
df_train = df[
(df["fold_1"] != fold) & (df["fold_2"] != fold)
].reset_index(drop=True)
df_val = df[(df["fold_1"] == fold) | (df["fold_2"] == fold)]
val_idx = (
df_val.index.values
if isinstance(df, pd.DataFrame)
else df_val.index.values.get()
)
print(f" -> {len(df_train)} training pairs")
print(f" -> {len(df_val)} validation pairs\n")
pred_val, model = train_fct(
df_train,
df_val.reset_index(drop=True),
None,
config.features,
config.target,
params=config.params,
cat_features=config.cat_features,
use_es=config.use_es,
)
pred_oof[val_idx] += pred_val
try:
try:
ft_imp = model.feature_importance
except AttributeError:
ft_imp = model.feature_importances_
ft_imp = pd.DataFrame(
pd.Series(ft_imp, index=config.features), columns=["importance"]
)
ft_imps.append(ft_imp)
except Exception:
pass
models.append(model)
if log_folder is None:
return pred_oof, models, ft_imp
if config.model == "xgb":
model.save_model(log_folder + f"{config.model}_{fold}.json")
elif config.model == "lgbm":
try:
model.booster_.save_model(log_folder + f"{config.model}_{fold}.txt")
except Exception:
model.save_model(log_folder + f"{config.model}_{fold}.txt")
else: # catboost, verif
model.save_model(log_folder + f"{config.model}_{fold}.txt")
if config.split == "gkf":
pred_oof = pred_oof / (1 + (df["fold_1"] != df["fold_2"]))
y = df[config.target].values if isinstance(df, pd.DataFrame) else df[config.target].get()
auc = roc_auc_score(y, pred_oof)
print(f"\n Local CV is {auc:.4f}")
ft_imp = pd.concat(ft_imps, axis=1).mean(1)
ft_imp.to_csv(log_folder + "ft_imp.csv")
np.save(log_folder + "pred_oof.npy", pred_oof)
return pred_oof, models, ft_imp
|
TheoViel/kaggle_foursquare
|
src/training/main_boosting.py
|
main_boosting.py
|
py
| 3,551 |
python
|
en
|
code
| 13 |
github-code
|
6
|
34389873363
|
import sys
import numpy as np
def main():
import crayimage
from crayimage.runutils import map_run, load_index
data_root, index_file, run_name, bins, window = [
t(arg) for t, arg in zip([str, str, str, int, int], sys.argv[1:])
]
run = load_index(index_file, data_root)[run_name]
sample_img = run.get_img(0)
max_value = 256 if sample_img.dtype == np.uint8 else 1024
per_bin = max_value / bins
counts = np.zeros(
shape=sample_img.shape + (bins, ),
dtype='uint16'
)
print(counts.shape)
for img in run[:10]:
bins = img / per_bin
counts[:, :, :, bins] += 1
print(counts)
if __name__ == '__main__':
main()
|
yandexdataschool/crayimage
|
examples/hotpixel_suppression.py
|
hotpixel_suppression.py
|
py
| 658 |
python
|
en
|
code
| 5 |
github-code
|
6
|
15819759637
|
HEX_CHARS = "0123456789ABCDEF"
ZKARRAY = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_"
SUN_MAGICS = [1030, 1029, 4088]
def unhash_cell(raw_cell):
return [ZKARRAY.index(i) for i in raw_cell]
class Cell:
def __init__(self, raw_data=None, index=-1):
self.index = index
self.entity = None
self.color = 'black'
self.dead = True
self.text = ' '
self.posIJ = []
self.lineOfSight = False
self.movement = 8
self.initialized = False
if raw_data:
self.parse_data(raw_data)
self.set_default_display()
def parse_data(self, raw_data):
cd = unhash_cell(raw_data)
self.isActive = (cd[0] & 32 >> 5) == 1
self.lineOfSight = (cd[0] & 1) == 1
self.layerGroundRot = cd[1] & 48 >> 4
self.groundLevel = cd[1] & 15
self.movement = ((cd[2] & 56) >> 3)
self.layerGroundNum = (cd[0] & 24 << 6) + (cd[2] & 7 << 6) + cd[3]
self.layerObject1Num = ((cd[0] & 4) << 11) + ((cd[4] & 1) << 12) + (cd[5] << 6) + cd[6]
self.layerObject2Num = ((cd[0]&2)<<12) + ((cd[7]&1)<<12) + (cd[8]<<6) + cd[9]
self.isSun = self.layerObject1Num in SUN_MAGICS or self.layerObject2Num in SUN_MAGICS
self.text = str(self.movement)
self.initialized = True
def is_obstacle(self):
return self.lineOfSight and not self.entity
def set_default_display(self):
self.text = str(self.movement)
if self.isSun:
self.color = 'yellow'
self.dead = False
elif self.movement == 0 and self.lineOfSight:
self.color = 'grey'
self.dead = False
elif self.lineOfSight:
self.color = 'white'
def set_entity(self, entity):
if not entity:
self.entity = None
self.set_default_display()
elif not entity.dead:
self.entity = entity
self.color = 'red'
self.text = self.entity.type[0]
def __repr__(self):
return str(self.__dict__)
def __str(self):
return self.__repr__()
|
XeLiT/retro-dbot
|
utils/cell.py
|
cell.py
|
py
| 2,203 |
python
|
en
|
code
| 11 |
github-code
|
6
|
37204907362
|
# 打开数据库连接
import pymysql as pymysql
import math
EARTH_REDIUS = 6378.137
pi = 3.1415926
def rad(d):
return d * pi / 180.0
def getDistan(lat1, lng1, lat2, lng2):
radLat1 = rad(lat1)
radLat2 = rad(lat2)
a = radLat1 - radLat2
b = rad(lng1) - rad(lng2)
s = 2 * math.asin(math.sqrt(math.pow(math.sin(a / 2), 2) + math.cos(radLat1) * math.cos(radLat2) * math.pow(
math.sin(b / 2), 2)))
s = s * EARTH_REDIUS
return s
db = pymysql.connect(host='bj-cdb-cwu7v42u.sql.tencentcdb.com', port=62864, user='user', passwd='xmxc1234', db='test',
charset='utf8') # charset='utf8'查询的有汉字此代码必须加上否则导出的是???
# 使用cursor()方法获取操作游标
cursor = db.cursor()
# sql 查询语句
sql = "select id, client_name, latitude, longitude FROM t_client_elm limit 100"
# 执行sql语句
cursor.execute(sql)
# 获取所有记录列表
results = cursor.fetchall()
a = [["酒仙桥",116.687288, 40.2342],
["望京",116.462054, 39.914102],
["三里屯",116.442054, 39.954102]]
print(a.__len__())
for i in range(0, a.__len__()):
i = 1 # 坐标定义
name: str = a[i][0]
lat2: float = a[i][1]
lng2: float = a[i][2]
i = i + 1
for row in results:
lat1=float(row[2])
lng1=float(row[3])
result = getDistan(lat1, lng1, lat2,lng2)
if result <= 1:
print(name + ":" + str(result))
|
tangleibest/untitled
|
test/a.py
|
a.py
|
py
| 1,444 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72066473469
|
from .qtImport import *
class NewNoteForm(QWidget):
def __init__(self, parent, category_list):
super().__init__()
self._parent = parent
self._category = category_list
self.initUI()
def initUI(self):
layout = QGridLayout()
self.setLayout( layout )
label = QLabel("In catergory ")
layout.addWidget( label, 1, 1 )
self._list = QComboBox()
layout.addWidget( self._list, 1, 2)
for x in self._category:
self._list.addItem( x )
self._newCategory = QCheckBox("Create a new category")
layout.addWidget( self._newCategory , 2,1)
self._newCategoryName = QLineEdit()
layout.addWidget( self._newCategoryName, 2, 2)
name = QLabel("The name of the new note ")
layout.addWidget( name, 3, 1 )
self._inp_name = QLineEdit()
layout.addWidget( self._inp_name, 3, 2)
button = QPushButton("Create")
button.clicked.connect( self._parent.create )
layout.addWidget( button, 4, 1, 1,2)
def getInputs(self):
data = {}
data["category"] = self._list.currentIndex()
data["new_category"] = self._newCategory.isChecked()
data["new_category_name"] = self._newCategoryName.text()
data["note_name"] = self._inp_name.text()
return data
class NewNoteWindow(QMainWindow):
def __init__(self, parent):
super(NewNoteWindow, self).__init__(parent)
self._form = None
self._parent = parent
def setCategory(self, available ):
self._category = available
def initUI(self):
self.setWindowTitle("Create a new notes")
_widget = QWidget()
windowLayout = QVBoxLayout(_widget)
self._form = NewNoteForm( self, self._category )
windowLayout.addWidget( self._form )
self.setCentralWidget( _widget )
def create( self ):
data = self._form.getInputs()
self._parent.actionNewNote( data )
self.close()
class DeleteNoteForm(QWidget):
def __init__(self, parent, notes_tree):
super().__init__()
self._parent = parent
self._notes_tree = notes_tree
self.initUI()
def initUI(self):
layout = QGridLayout()
self.setLayout( layout )
label = QLabel("Select the note or category you want to delete\n Becarefull it's a definitiv deletion")
layout.addWidget( label, 1, 1, 2, 2 )
self._tree = QTreeWidget()
headerItem = QTreeWidgetItem()
item = QTreeWidgetItem()
layout.addWidget( self._tree, 1, 2)
for category, notes in self._notes_tree.items():
parent = QTreeWidgetItem(self._tree)
parent.setText(0, category)
parent.setFlags(parent.flags() | Qt.ItemIsTristate | Qt.ItemIsUserCheckable)
for note in notes:
child = QTreeWidgetItem(self._tree)
child.setText(0, note)
child.setFlags(child.flags() | Qt.ItemIsUserCheckable)
child.setCheckState(0, Qt.Unchecked)
button = QPushButton("Delete selection")
button.clicked.connect( self._parent.delete )
layout.addWidget( button, 4, 1, 1,2)
def getInputs(self):
iterator = QTreeWidgetItemIterator(self._tree, QTreeWidgetItemIterator.Checked)
while iterator.value():
item = iterator.value()
print (item.text(0))
iterator += 1
data = {}
return data
class DeleteNoteWindow(QMainWindow):
def __init__(self, parent):
super(DeleteNoteWindow, self).__init__(parent)
self._form = None
self._parent = parent
def setNotesTree(self, available ):
self._category = available
def initUI(self):
self.setWindowTitle("Delete a note or a category")
_widget = QWidget()
windowLayout = QVBoxLayout(_widget)
self._form = DeleteNoteForm( self, self._category )
windowLayout.addWidget( self._form )
self.setCentralWidget( _widget )
def delete( self ):
data = self._form.getInputs()
self._parent.actionDeleteNote( data )
self.close()
|
basileMarchand/wnm
|
wnm/forms.py
|
forms.py
|
py
| 4,370 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13900998846
|
from flask import Flask,render_template,request,send_file;
from flask_wtf import FlaskForm;
from wtforms import FileField,SubmitField;
import pandas as pd
import logging as logger
from werkzeug.utils import secure_filename
from openpyxl.styles import Border, Side, Alignment,PatternFill
from openpyxl.utils import get_column_letter
from datetime import datetime
import re
import os
import io
app = Flask(__name__)
app.config['SECRET_KEY'] = 'supersecretkey'
app.config['UPLOAD_FOLDER'] = 'static/files'
class UploadFileForm(FlaskForm):
file = FileField("File")
submit = SubmitField("Upload File")
@app.route('/',methods = ['GET','POST'])
@app.route('/download')
def index():
form = UploadFileForm()
# @app.route('/upload', methods=['GET', 'POST'])
if form.validate_on_submit():
file = form.file.data
df = pd.read_excel(file)
unique_surveyor_names = df["Surveyor Name"].unique()
# Create a dictionary to store the results for each surveyor name
results = {}
# Loop through each unique surveyor name
for surveyor_name in unique_surveyor_names:
# Filter the dataframe to only include data for the current surveyor name
surveyor_data = df[df["Surveyor Name"] == surveyor_name]
# Group the data by surveyor name and calculate the number of samples
samples = len(surveyor_data)
# Group the data by surveyor name and calculate the total audio duration
duration = surveyor_data["Audio Duration (in secs)"].sum()
# Calculate the starting time and ending time for each surveyor
start_time = surveyor_data["Timestamp"].min()
end_time = surveyor_data["Timestamp"].max()
# Group the data by gender and calculate the percentage of male and female
gender_group = surveyor_data.groupby("Gender").count()["Timestamp"]
#print(gender_group)
gender_percentage = (gender_group / len(surveyor_data) * 100).astype(float)
def count_valid_contact_numbers(x):
pattern = re.compile(r"^\d{10}$")
mask = x.apply(lambda x: not bool(pattern.match(str(x))))
count = mask.sum()
return count
invalid_numbers = count_valid_contact_numbers(surveyor_data['Contact Number'])
duplicate = surveyor_data.duplicated('Location').sum()
# Group the data by age group and calculate the percentage of each age group
age_group = surveyor_data.groupby("Age Group").count()["Timestamp"].astype(float)
age_percentage = (age_group / len(surveyor_data) * 100)
print(age_group)
# Group the data by political party and calculate the percentage of each party
party_group = surveyor_data.groupby('ఇప్పటికి ఇప్పుడు ఎన్నికలు జరిగితే మీరు ఏ పార్టీ కి మద్దతు ఇస్తారు ?').count()["Timestamp"]
party_percentage = (party_group / len(surveyor_data) * 100).astype(float)
party_group1 = surveyor_data.groupby('మీ MLA పరిపాలన పట్ల మీ అభిప్రాయం?').count()["Timestamp"]
party_percentage2 = (party_group1 / len(surveyor_data) * 100).astype(float)
party_group2 = surveyor_data.groupby('వైయెస్ జగన్మోహన్ రెడ్డిగారి పరిపాలన పట్ల మీ అభిప్రాయం ఏమిటి?').count()["Timestamp"]
party_percentage3 = (party_group2 / len(surveyor_data) * 100).astype(float)
# Save the results in the dictionary
results[surveyor_name] = {
"NO OF SAMPLES": samples,
"DURATION": duration,
"STARTING TIME": start_time,
"ENDING TIME": end_time,
"FEMALE": gender_percentage.get("Female", 0),
"MALE": gender_percentage.get("Male", 0),
"DUPLICATE LOCATION":duplicate,
'INVALID CONTACT': invalid_numbers,
"18-30": age_percentage.get("18-30", 0),
"30-45": age_percentage.get("30-45", 0),
"45-60": age_percentage.get("45-60", 0),
"60+": age_percentage.get("60+", 0),
"YSRCP": party_percentage.get("YSRCP", 0),
"TDP": party_percentage.get("TDP",0),
"JSP": party_percentage.get("JSP", 0),
"BJP": party_percentage.get("BJP", 0),
"INC": party_percentage.get("INC", 0),
"Not Decided": party_percentage.get("Not Decided", 0),
"బాగుంది.":party_percentage3.get('బాగుంది' ,0),
"బాగోలేదు.":party_percentage3.get('బాగోలేదు' ,0),
"బాగా చేస్తున్నారు" : party_percentage2.get("బాగా చేస్తున్నారు" , 0),
"బాగా చేయడం లేదు": party_percentage2.get("బాగా చేయడం లేదు" , 0),
}
#results[surveyor_name]['INVALID CONTACT'] = invalidnum.Contact_Number.apply(count_valid_contact_numbers).sum()
def color_format(val):
text_color = 'black'
try:
if val >= "60":
color = 'red'
font_weight = 'bold'
text_color = 'white'
else:
color = 'white'
font_weight = 'normal'
return 'background-color: %s; font-weight: %s; color: %s' % (color, font_weight,text_color)
except ValueError:
return ''
def color_format2(val):
text_color = 'black'
try:
if val.endswith("%") and float(val.strip("%")) >= 30:
color = 'red'
font_weight = 'bold'
text_color = 'white'
else:
color = 'white'
font_weight = 'normal'
return 'background-color: %s; font-weight: %s; color: %s' % (color, font_weight,text_color)
except ValueError:
return ''
def color_format3(val):
text_color = 'black'
try:
if val.endswith("%") and float(val.strip("%")) >= 50:
color = 'red'
font_weight = 'bold'
text_color = 'white'
else:
color = 'white'
font_weight = 'normal'
return 'background-color: %s; font-weight: %s; color: %s' % (color, font_weight,text_color)
except ValueError:
return ''
def color_format4(val):
text_color = 'black'
try:
if val.endswith("%") and float(val.strip("%")) >= 60:
color = 'red'
font_weight = 'bold'
text_color = 'white'
else:
color = 'white'
font_weight = 'normal'
return 'background-color: %s; font-weight: %s; color: %s' % (color, font_weight,text_color)
except ValueError:
return ''
def duration_format(val):
text_color = 'black'
if val >= '05:00:00' :
color = 'red'
font_weight = 'bold'
text_color = 'white'
else:
color= 'white'
font_weight = 'normal'
return 'background-color: %s; font-weight: %s; color: %s' % (color, font_weight,text_color)
def duplicate_location(val):
text_color = 'black'
try:
if val >= 5:
color = 'red'
font_weight = 'bold'
text_color = 'white'
else:
color = 'white'
font_weight = 'normal'
return 'background-color: %s; font-weight: %s; color: %s' % (color, font_weight,text_color)
except ValueError:
return ''
def invalid_number(val):
text_color = 'black'
try:
if val >= 5:
color = 'red'
font_weight = 'bold'
text_color = 'white'
else:
color = 'white'
font_weight = 'normal'
return 'background-color: %s; font-weight: %s; color: %s' % (color, font_weight,text_color)
except ValueError:
return ''
results_df = pd.DataFrame.from_dict(results, orient='index')
#results_df = results_df.transpose()
results_df.reset_index(inplace=True)
results_df.rename(columns={"index": "Surveyor Name"}, inplace=True)
results_df['BJP'] = results_df['BJP'].apply(lambda x: "{:.0f}%".format(x))
results_df['INC'] = results_df['INC'].apply(lambda x: "{:.0f}%".format(x))
results_df['JSP'] = results_df['JSP'].apply(lambda x: "{:.0f}%".format(x))
results_df['TDP'] = results_df['TDP'].apply(lambda x: "{:.0f}%".format(x))
results_df['YSRCP'] = results_df['YSRCP'].apply(lambda x: "{:.0f}%".format(x))
results_df['Not Decided'] = results_df['Not Decided'].apply(lambda x: "{:.0f}%".format(x))
results_df['18-30']= results_df['18-30'].apply(lambda x: "{:.0f}%".format(x))
results_df['30-45']= results_df['30-45'].apply(lambda x: "{:.0f}%".format(x))
results_df['45-60']= results_df['45-60'].apply(lambda x: "{:.0f}%".format(x))
results_df['60+']= results_df['60+'].apply(lambda x: "{:.0f}%".format(x))
results_df['MALE']= results_df['MALE'].apply(lambda x: "{:.0f}%".format(x))
results_df['FEMALE']= results_df['FEMALE'].apply(lambda x: "{:.0f}%".format(x))
results_df['బాగా చేయడం లేదు']= results_df['బాగా చేయడం లేదు'].apply(lambda x: "{:.0f}%".format(x))
results_df['బాగా చేస్తున్నారు']= results_df['బాగా చేస్తున్నారు'].apply(lambda x: "{:.0f}%".format(x))
results_df['బాగోలేదు.']= results_df['బాగోలేదు.'].apply(lambda x: "{:.0f}%".format(x))
results_df['బాగుంది.']= results_df['బాగుంది.'].apply(lambda x: "{:.0f}%".format(x))
results_df['STARTING TIME'] = results_df['STARTING TIME'].apply(lambda x: datetime.strptime(x, '%Y-%m-%d %H:%M:%S.%f'))
results_df['ENDING TIME'] = results_df['ENDING TIME'].apply(lambda x: datetime.strptime(x, '%Y-%m-%d %H:%M:%S.%f'))
results_df['DURATION'] = results_df['ENDING TIME'] - results_df['STARTING TIME']
results_df['DURATION'] = results_df['DURATION'].apply(lambda x: f"{x.days * 24 + x.seconds // 3600:0>2}:{(x.seconds % 3600) // 60:0>2}:{x.seconds % 60:0>2}")
results_df = results_df.style.applymap(color_format, subset=['MALE', 'FEMALE']) \
.applymap(duration_format, subset=['DURATION']) \
.applymap(duplicate_location, subset=['DUPLICATE LOCATION']) \
.applymap(invalid_number, subset=['INVALID CONTACT']) \
.applymap(color_format2, subset=['18-30','30-45','45-60','60+'])\
.applymap(color_format3, subset=['YSRCP','TDP','JSP','BJP','INC','Not Decided'])\
.applymap(color_format4, subset=['బాగుంది.','బాగోలేదు.' ])\
.applymap(color_format4, subset=['బాగా చేస్తున్నారు', 'బాగా చేయడం లేదు' ])
#results_df.to_excel(r"C:\Users\dell\Downloads\Sai_Swapnill Cons_Results.xlsx")
# Save the DataFrame to an Excel file
writer = pd.ExcelWriter("result.xlsx", engine='openpyxl')
results_df.to_excel(writer, index=False)
# Get the active worksheet
worksheet = writer.book.active
worksheet.freeze_panes = worksheet.cell(1,2)
# Set the column width to automatically adjust to the size of the contents in the column
for column_cells in worksheet.columns:
length = max(len(str(cell)) for cell in column_cells)
worksheet.column_dimensions[column_cells[0].column_letter].width = length
worksheet.insert_rows(1, 1)
worksheet['C1']="TIME"
worksheet.merge_cells('C1:E1')
worksheet['F1']="GENDER"
worksheet.merge_cells('F1:G1')
worksheet['J1']="AGE GROUP"
worksheet.merge_cells('J1:M1')
worksheet['N1'] = 'ఇప్పటికి ఇప్పుడు ఎన్నికలు జరిగితే మీరు ఏ పార్టీ కి మద్దతు ఇస్తారు ?'
worksheet.merge_cells('N1:S1')
worksheet['T1']= "వైయెస్ జగన్మోహన్ రెడ్డిగారి పరిపాలన పట్ల మీ అభిప్రాయం ఏమిటి?""మీ MLA పరిపాలన పట్ల మీ అభిప్రాయం?"
worksheet.merge_cells('T1:U1')
worksheet['V1'] = "మీ MLA పరిపాలన పట్ల మీ అభిప్రాయం?"
worksheet.merge_cells('V1:W1')
fill_colors = ["D8E4BCFF", "D8E4BCFF", "D8E4BCFF", "D8E4BCFF", "D8E4BCFF","D8E4BCFF",]
for i, merged_cell_range in enumerate(worksheet.merged_cell_ranges):
range_string = str(merged_cell_range)
merged_row = worksheet[range_string]
for row in merged_row:
for cell in row:
cell.fill = PatternFill(start_color=fill_colors[i], end_color=fill_colors[i], fill_type="solid")
for row in worksheet.iter_rows():
for cell in row:
cell.border = cell.border + Border(left=Side(style='thin'),
right=Side(style='thin'),
top=Side(style='thin'),
bottom=Side(style='thin'))
cell.alignment = Alignment(horizontal='center')
# Set the background color of the first row (the column names)
for cell in worksheet[2]:
cell.fill = PatternFill(start_color="B8CCE4FF", end_color="B8CCE4FF", fill_type = "solid")
#Add filter to each column
worksheet.auto_filter.ref = "A2:%s2" % (get_column_letter(worksheet.max_column))
# data = Reference(worksheet, min_col=2, min_row=1, max_row=worksheet.max_row, max_col=2)
# Save the changes to the Excel file
writer.save()
print("Analysis complete!")
# Convert categorical columns to numeric
# Return the file for download
return send_file("result.xlsx",
mimetype="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
as_attachment=True)
# rendered=render_template('data.html',res = res,length = length)
# df1 = pd.DataFrame({'Data': [rendered]})
# output = io.BytesIO()
# writer = pd.ExcelWriter(output, engine='xlsxwriter')
# df1.to_excel(writer, index=False, sheet_name='Sheet1')
# writer.save()
# output.seek(0)
# return send_file(output,
# attachment_filename='combined.xlsx',
# as_attachment=True)
# result.to_excel("swapnil_New.xlsx", index=False)
#return render_template('data.html',res = res,length = length)
#result.to_excel(os.path.join(app.config['UPLOAD_FOLDER'], "swapnil_New.xlsx"), index=False)
# return "Report Generated Successfully"
return render_template('index.html',form = form)
if __name__ == '__main__':
app.run()
|
Swapnil-mindbrick-2022/reportgen
|
App.py
|
App.py
|
py
| 18,656 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26703437835
|
import argparse
from math import sqrt, floor, ceil
from os import listdir
import io
from Player import Player
import pygame
import time
from View import View
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import cv2
BUTTONPRESS_CSV = "buttonpress.csv"
GAME_CSV = "game.csv"
FACE_FILES = ["faces_0.txt", "faces_1.txt"]
class DummyClock:
def __init__(self):
pass
def get_fps(self):
return 60
def get_rawtime(self):
return 1
def get_time(self):
return 1
class GameReplay:
def __init__(self, replay_dir):
self.field = None
self.players = []
self.boxes = set()
self.bombs = dict()
self.explosions = set()
self.inactive_traps = dict()
self.active_traps = set()
self.falling_boxes = dict()
self.crushing_boxes = dict()
self.power_ups = dict()
self.active_taunts = dict()
self.view = View(32 * 15, 32 * 16, "Replay Bombangerman")
self.plots = []
self.vlines = []
self.replay_dir = "../replays/" + replay_dir
self.face_replay_file_handles = [None,None]
self.next_faces = [[0,np.zeros((48,48))],[0,np.zeros((48,48))]]
plt.ion()
plt.show()
self.figure = plt.figure()
self.figure.autofmt_xdate()
plt.xticks([])
plt.yticks([])
self.last_timestamp = None
self.first_timestamp = None
replay_files = listdir(self.replay_dir)
if GAME_CSV in replay_files:
with open(self.replay_dir + "/" + GAME_CSV) as f:
lines = f.readlines()
self.first_timestamp = float(lines[3].split(";")[0])
self.last_timestamp = float(lines[-1].split(";")[0])
# plot data files
data_files = [f for f in replay_files if f not in [GAME_CSV, BUTTONPRESS_CSV] + FACE_FILES]
self.nr_plots = len(data_files)
for file in data_files:
print("FILENAME:", file)
self.replay_data(file)
# Faces Replay display setup
# Yes, this has constantly open file handles. but this is read only, so we will prolly get away with it.
# Here, we preload the first entry in those files
for i,filename in enumerate(FACE_FILES):
if filename in replay_files:
f = open(self.replay_dir + "/" + filename)
self.face_replay_file_handles[i] = f
print("Opened Face Data File for Player", i)
self.load_next_image_and_timestamp(i,f)
# prepare 2 windows if 2 players have replay data here
for i,h in enumerate(self.face_replay_file_handles):
if h is not None:
cv2.namedWindow("Player " + str(i))
# buttonpress
if BUTTONPRESS_CSV in replay_files:
try:
with open(self.replay_dir + "/" + BUTTONPRESS_CSV) as f:
content = f.readlines()
bps = [float(x) for x in content]
for b in bps:
for plot in self.plots:
plot.axvline(x=b, c="b")
except FileNotFoundError:
print(BUTTONPRESS_CSV + " not found")
# game replay
if GAME_CSV in replay_files:
with open(self.replay_dir + "/" + GAME_CSV) as f:
for i, line in enumerate(f):
if i == 0:
self.field = eval(line)
elif i == 1:
for x, y, ticks in eval(line):
self.inactive_traps[(x, y)] = [ticks, ticks]
elif i == 2:
player_data = eval(line)
self.update_player_data(player_data)
else:
break
def replay_data(self, filename):
content = pd.read_csv(self.replay_dir + "/" + filename, delimiter=";", names=["time", "player0", "player1"], dtype="float")
if self.last_timestamp is not None:
content = content[content["time"] <= self.last_timestamp]
if self.first_timestamp is not None:
content = content[content["time"] >= self.first_timestamp]
content = content.fillna(method='ffill').dropna()
content = content.sort_values('time', axis=0)
plot_nr = len(self.plots) + 1
nr_rows = ceil(sqrt(self.nr_plots))
nr_columns = ceil(sqrt(self.nr_plots))
plot = self.figure.add_subplot(nr_rows, nr_columns, plot_nr)
plot.title.set_text(filename)
self.plots.append(plot)
content.plot(x="time", ax=plot)
vline = plot.axvline(x=0, c="r")
self.vlines.append(vline)
def new_plot(self, x, ys, title):
plot_nr = len(self.plots) + 1
nr_rows = floor(sqrt(self.nr_plots))
nr_columns = ceil(sqrt(self.nr_plots))
plot = self.figure.add_subplot(nr_rows, nr_columns, plot_nr)
plot.title.set_text(title)
self.plots.append(plot)
for y in ys:
plot.plot(x, y)
vline = plot.axvline(x=x[0], c="r")
self.vlines.append(vline)
def replay(self):
clock = DummyClock()
run = True
frame = 3
last_timestamp = None
while run:
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
run = False
if event.type == pygame.K_ESCAPE:
run = False
with open(self.replay_dir + "/" + GAME_CSV) as f:
for i, line in enumerate(f):
if i == frame:
if line == "\n": # no idea why but needed in windows
continue
timestamp = eval(line.split(";")[0])
events = eval(line.split(";")[1])
self.handle_events(events)
elif i < frame:
continue
else:
break
if frame % 30 == 0:
for vline in self.vlines:
vline.set_xdata(timestamp)
# TODO what's this line for?
plt.pause(1e-10)
if not last_timestamp is None:
time.sleep((timestamp - last_timestamp)/5)
last_timestamp = timestamp
# intermission: draw player face when present
for i,h in enumerate(self.face_replay_file_handles):
if h is not None:
if timestamp >= self.next_faces[i][0]:
img = self.next_faces[i][1] / 255.0
img = cv2.resize(img, dsize=(48*3, 48*3), interpolation=cv2.INTER_NEAREST)
cv2.imshow("Player " + str(i), img)
self.load_next_image_and_timestamp(i,h)
# intermission end
self.update_counters()
self.view.draw_game(self.field, self.boxes, self.inactive_traps, self.active_traps, self.power_ups,
self.bombs, self.explosions, self.falling_boxes, self.crushing_boxes,
self.players, self.active_taunts, 0, clock)
self.view.update()
frame += 1
pygame.quit()
def update_counters(self):
""" Updates all tick counters for the client
"""
for bomb in self.bombs:
self.bombs[bomb] -= 1
for x, y in self.inactive_traps:
data = self.inactive_traps[(x, y)]
data[1] -= 1 # [max_ticks, remaining_ticks]
for falling_box in self.falling_boxes:
self.falling_boxes[falling_box] -= 1
for crushing_box in self.crushing_boxes:
self.crushing_boxes[crushing_box] -= 1
remove = []
for id, ticks in self.active_taunts.items():
if ticks <= 0:
remove.append(id)
else:
self.active_taunts[id] -= 1
for id in remove:
del self.active_taunts[id]
def update_player_data(self, player_data):
while len(self.players) < len(player_data):
self.players.append(None)
for d in player_data:
pid = d["id"]
x = d["x"]
y = d["y"]
lifes = d["l"]
bombs = d["b"]
power = d["p"]
player = self.players[pid] if len(self.players) > pid else None
if player == None:
self.players[pid] = Player(pid, x, y, lifes, power, bombs)
else:
player.x = x
player.y = y
player.lifes = lifes
player.bombs = bombs
player.power = power
def handle_events(self, events):
for type, data in events:
if type == 0:
# GENERIC
pass
elif type == 1:
# PLAYER_INIT
# unused
pass
elif type == 2:
# PLAYER_MORTAL
self.players[data["id"]].immortal = False
elif type == 3:
# PLAYER_DAMAGED
self.players[data["id"]].lifes -= data["dmg"]
self.players[data["id"]].immortal = True
elif type == 4:
# PLAYER_MOVED
p = self.players[data["id"]]
p.x = data["x"]
p.y = data["y"]
p.facing = data["f"]
elif type == 12:
# PLAYER_NOT_SLIMEY
self.players[data["id"]].slimey = False
elif type == 13:
# PLAYER_SLIMED
self.players[data["id"]].slimey = True
elif type == 5:
# SPAWN_BOX
self.boxes.add((data["x"], data["y"]))
elif type == 6:
# SPAWN_BOMB
self.bombs[(data["x"], data["y"])] = data["t"] # x,y -> ticks
elif type == 7:
# SPAWN_EXPLOSION
self.explosions.add((data["x"], data["y"]))
elif type == 8:
# UPDATE_TRAP
# unused
pass
elif type == 9:
# REMOVE_BOX
self.boxes.discard((data["x"], data["y"]))
elif type == 10:
# REMOVE_BOMB
self.bombs.pop((data["x"], data["y"]), None)
elif type == 11:
# REMOVE_EXPLOSION
self.explosions.discard((data["x"], data["y"]))
elif type == 15:
# SPAWN_FALLING_BOX
self.falling_boxes[(data["x"], data["y"])] = data["t"] # x,y -> ticks
elif type == 16:
# REMOVE_FALLING_BOX
self.falling_boxes.pop((data["x"], data["y"]), None)
elif type == 17:
# SPAWN_CRUSHING_BOX
self.crushing_boxes[(data["x"], data["y"])] = data["t"]
elif type == 18:
# REMOVE_CRUSHING_BOX
self.crushing_boxes.pop((data["x"], data["y"]), None)
elif type == 19:
# PLAYER_TAUNT
if data["id"] not in self.active_taunts:
self.active_taunts[data["id"]] = data["t"]
elif type == 20:
# SPAWN_POWER_UP
self.power_ups[(data["x"], data["y"])] = data["t"] # type
elif type == 21:
# REMOVE_POWER_UP
self.power_ups.pop((data["x"], data["y"]), None)
elif type == 22:
# ANGER_INFO
self.players[0].set_anger(data["0"])
self.players[1].set_anger(data["1"])
elif type == 23:
# ACTIVATE_TRAP
self.inactive_traps.pop((data["x"], data["y"]), None)
self.active_traps.add((data["x"], data["y"]))
elif type == 24:
# RESET_TRAP
self.inactive_traps[(data["x"], data["y"])] = [data["t"], data["t"]] # [max_ticks, remaining_ticks]
self.active_traps.discard((data["x"], data["y"]))
elif type == 25:
# PLAYER_INVERT_KEYBOARD_ON
self.players[data["id"]].inverted_keyboard = True
elif type == 26:
# PLAYER_INVERT_KEYBOARD_OFF
self.players[data["id"]].inverted_keyboard = False
elif type == 27:
# PLAYER_CHANGE_BOMBS_COUNT
self.players[data["id"]].bombs = data["b"]
elif type == 28:
# PLAYER_CHANGE_POWER_AMOUNT
self.players[data["id"]].power = data["p"]
elif type == 29:
# PLAYER_AUTOWALK_ON
self.players[data["id"]].autowalk = True
elif type == 30:
# PLAYER_AUTOWALK_OFF
self.players[data["id"]].autowalk = False
def load_next_image_and_timestamp(self, player_id, opened_handle):
f = opened_handle
timestamp = f.readline()
if timestamp == "":
self.face_replay_file_handles[player_id] = None
f.close()
return
else:
# print([timestamp])
timestamp = float(timestamp)
self.next_faces[player_id][0] = timestamp
image_data = []
for _ in range(48):
line = f.readline()
if line == "":
self.face_replay_file_handles[player_id] = None
f.close()
return
image_data.append(line.strip())
print(len(image_data))
image_data = "\n".join(image_data)
image_data = io.StringIO(initial_value=image_data.strip() + "\n")
img = np.loadtxt(image_data)
self.next_faces[player_id][1] = img
line = f.readline().strip()
if line != "":
print(line)
print("ERROR: Wanted to jump empty line but was not empty")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--replay_dir", help="The directory containing the replays for this run.", type=str)
args = vars(parser.parse_args())
gr = GameReplay(**args)
gr.replay()
|
LanyK/TheAngerGames
|
bombangerman/client/GameReplay.py
|
GameReplay.py
|
py
| 14,488 |
python
|
en
|
code
| 1 |
github-code
|
6
|
71578351547
|
""" An example trainer for a simply policy gradient implementation. """
import time
import json
import torch
from torch.optim import Adam
import gym
from oxentiel import Oxentiel
from asta import dims, shapes
from vpg import (
get_action,
compute_policy_loss,
compute_value_loss,
finish,
ActorCritic,
RolloutStorage,
)
SETTINGS_PATH = "settings_vpg.json"
def train(ox: Oxentiel) -> None:
""" Training loop. """
env: gym.Env = gym.make(ox.env_name)
shapes.OB = env.observation_space.shape
dims.N_ACTS = env.action_space.n
ac = ActorCritic(shapes.OB[0], dims.N_ACTS, ox.hidden_dim)
actor_optimizer = Adam(ac.pi.parameters(), lr=ox.lr)
critic_optimizer = Adam(ac.v.parameters(), lr=ox.lr)
rollouts = RolloutStorage()
ob = env.reset()
done = False
t_start = time.time()
for i in range(ox.iterations):
ob_t = torch.Tensor(ob)
act, val = get_action(ac, ob_t)
# Critical: add prev ob to rollouts buffer.
prev_ob = ob
ob, rew, done, _ = env.step(act)
rollouts.add(prev_ob, act, val, rew)
# If we're done, or we finished a batch.
if done or (i > 0 and i % ox.batch_size == 0):
rews = rollouts.rews
vals = rollouts.vals
last_val = 0 if done else vals[-1]
ep_weights, ep_rets = finish(ox, rews, vals, last_val)
rollouts.rews = []
rollouts.vals = []
rollouts.lens.append(len(ep_weights))
rollouts.weights.extend(ep_weights)
rollouts.rets.extend(ep_rets)
ob, done = env.reset(), False
if i > 0 and i % ox.batch_size == 0:
mean_ret, mean_ep_len = rollouts.stats()
obs, acts, weights, rets = rollouts.get()
actor_optimizer.zero_grad()
policy_loss = compute_policy_loss(ac, obs, acts, weights)
policy_loss.backward()
actor_optimizer.step()
critic_optimizer.zero_grad()
value_loss = compute_value_loss(ac, obs, rets)
value_loss.backward()
critic_optimizer.step()
print(f"Iteration: {i} \t ", end="")
print(f"Time: {time.time() - t_start:.3f} \t ", end="")
print(f"Loss: {policy_loss:.3f} \t ", end="")
print(f"Mean episode length: {mean_ep_len:.3f} \t ", end="\n")
t_start = time.time()
def main() -> None:
""" Run the trainer. """
with open(SETTINGS_PATH, "r") as settings_file:
settings = json.load(settings_file)
ox = Oxentiel(settings)
train(ox)
if __name__ == "__main__":
main()
|
langfield/polstead
|
demos/vpg/primitive/trainer.py
|
trainer.py
|
py
| 2,664 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6118839537
|
# define a closer
def charge(price) :
def calc(num):
return price * num
return calc
# new 2 closers
child = charge(400)
adult = charge(1000)
price1 = child(3)
price2 = adult(2)
print(price1)
print(price2)
|
longniu/pylesson
|
note/price.py
|
price.py
|
py
| 224 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5484857647
|
import ROOT
import uproot
from hipe4ml.tree_handler import TreeHandler
import numpy as np
import argparse
import yaml
import sys
sys.path.append('utils')
import utils as utils
utils.set_style()
kBlueC = ROOT.TColor.GetColor('#1f78b4')
kOrangeC = ROOT.TColor.GetColor('#ff7f00')
ROOT.gROOT.SetBatch()
## create signal extraction class
class SignalExtraction:
def __init__(self, input_data_hdl, input_mc_hdl=None): ## could be either a pandas or a tree handler
self.data_hdl = input_data_hdl
self.mc_hdl = input_mc_hdl
self.is_3lh = True
self.bins = 40
self.n_evts = 1e9
self.is_matter = False
self.signal_fit_func = 'dscb'
self.bkg_fit_func = 'pol1'
self.performance = False
self.additional_pave_text = '' ## additional text to be added to the ALICE performance pave
## variables
self.pdf = None
self.roo_dataset = None
### frames to be saved to file
self.mc_frame_fit = None
self.data_frame_fit = None
self.local_pvalue_graph = None
def process_fit(self, extended_likelihood=True, rooworkspace_path=None):
if self.is_3lh:
self.inv_mass_string = '#it{M}_{^{3}He+#pi^{-}}' if self.is_matter else '#it{M}_{^{3}#bar{He}+#pi^{+}}'
decay_string = '{}^{3}_{#Lambda}H #rightarrow ^{3}He+#pi^{-}' if self.is_matter else '{}^{3}_{#bar{#Lambda}}#bar{H} #rightarrow ^{3}#bar{He}+#pi^{+}'
tree_var_name = 'fMassH3L'
else:
self.inv_mass_string = '#it{M}_{^{4}He+#pi^{-}}' if self.is_matter else '#it{M}_{^{4}#bar{He}+#pi^{+}}'
decay_string = '{}^{4}_{#Lambda}H #rightarrow ^{4}He+#pi^{-}' if self.is_matter else '{}^{4}_{#bar{#Lambda}}#bar{H} #rightarrow ^{4}#bar{He}+#pi^{+}'
tree_var_name = 'fMassH4L'
# define signal and bkg variables
if self.is_3lh:
mass = ROOT.RooRealVar('m', self.inv_mass_string, 2.96, 3.04, 'GeV/c^{2}')
mu = ROOT.RooRealVar('mu', 'hypernucl mass', 2.97, 2.992, 'GeV/c^{2}')
else:
mass = ROOT.RooRealVar('m', self.inv_mass_string, 3.89, 3.97, 'GeV/c^{2}')
mu = ROOT.RooRealVar('mu', 'hypernucl mass', 3.9, 3.95, 'GeV/c^{2}')
sigma = ROOT.RooRealVar('sigma', 'hypernucl width', 0.001, 0.004, 'GeV/c^{2}')
a1 = ROOT.RooRealVar('a1', 'a1', 0, 5.)
a2 = ROOT.RooRealVar('a2', 'a2', 0, 5.)
n1 = ROOT.RooRealVar('n1', 'n1', 1, 5.)
n2 = ROOT.RooRealVar('n2', 'n2', 1, 5.)
c0 = ROOT.RooRealVar('c0', 'constant c0', -1., 1)
c1 = ROOT.RooRealVar('c1', 'constant c1', -1., 1)
if self.signal_fit_func == 'dscb':
signal = ROOT.RooCrystalBall('cb', 'cb', mass, mu, sigma, a1, n1, a2, n2)
elif self.signal_fit_func == 'gaus':
signal = ROOT.RooGaussian('gaus', 'gaus', mass, mu, sigma)
else:
raise ValueError(f'Invalid signal fit function. Expected one of: dscb, gaus')
# define background pdf
if self.bkg_fit_func == 'pol1':
background = ROOT.RooChebychev('bkg', 'pol1 bkg', mass, ROOT.RooArgList(c0))
elif self.bkg_fit_func == 'pol2':
background = ROOT.RooChebychev('bkg', 'pol2 bkg', mass, ROOT.RooArgList(c0, c1))
else:
raise ValueError(f'Invalid background fit function. Expected one of: pol1, pol2')
if extended_likelihood:
n_signal = ROOT.RooRealVar('n_signal', 'n_signal', 10., 1e6)
n_background = ROOT.RooRealVar('n_background', 'n_background', 0., 1e6)
else:
f = ROOT.RooRealVar('f', 'fraction of signal', 0., 0.4)
# fix DSCB parameters to MC
if self.mc_hdl != None:
mass_roo_mc = utils.ndarray2roo(np.array(self.mc_hdl['fMassH3L'].values, dtype=np.float64), mass, 'histo_mc')
signal.fitTo(mass_roo_mc, ROOT.RooFit.Range(2.97, 3.01), ROOT.RooFit.PrintLevel(-1))
a1.setConstant()
a2.setConstant()
n1.setConstant()
n2.setConstant()
sigma.setRange(sigma.getVal(), sigma.getVal()*1.5)
self.mc_frame_fit = mass.frame(80)
self.mc_frame_fit.SetName('mc_frame_fit')
mass_roo_mc.plotOn(self.mc_frame_fit)
signal.plotOn(self.mc_frame_fit)
fit_param = ROOT.TPaveText(0.6, 0.6, 0.9, 0.9, 'NDC')
fit_param.SetBorderSize(0)
fit_param.SetFillStyle(0)
fit_param.SetTextAlign(12)
fit_param.AddText('#mu = ' + f'{mu.getVal()*1e3:.2f} #pm {mu.getError()*1e3:.2f}' + ' MeV/#it{c}^{2}')
fit_param.AddText('#sigma = ' + f'{sigma.getVal()*1e3:.2f} #pm {sigma.getError()*1e3:.2f}' + ' MeV/#it{c}^{2}')
self.mc_frame_fit.addObject(fit_param)
# define the fit function and perform the actual fit
if extended_likelihood:
self.pdf = ROOT.RooAddPdf('total_pdf', 'signal + background', ROOT.RooArgList(signal, background), ROOT.RooArgList(n_signal, n_background))
else:
self.pdf = ROOT.RooAddPdf('total_pdf', 'signal + background', ROOT.RooArgList(signal, background), ROOT.RooArgList(f))
mass_array = np.array(self.data_hdl[tree_var_name].values, dtype=np.float64)
self.roo_dataset = utils.ndarray2roo(mass_array, mass)
self.pdf.fitTo(self.roo_dataset, ROOT.RooFit.Extended(extended_likelihood), ROOT.RooFit.Save(True), ROOT.RooFit.PrintLevel(-1))
## get fit parameters
fit_pars = self.pdf.getParameters(self.roo_dataset)
sigma_val = fit_pars.find('sigma').getVal()
sigma_val_error = fit_pars.find('sigma').getError()
mu_val = fit_pars.find('mu').getVal()
mu_val_error = fit_pars.find('mu').getError()
if extended_likelihood:
signal_counts = n_signal.getVal()
signal_counts_error = n_signal.getError()
background_counts = n_background.getVal()
background_counts_error = n_background.getError()
else:
signal_counts = (1-f.getVal())*self.roo_dataset.sumEntries()
signal_counts_error = (1-f.getVal()) * self.roo_dataset.sumEntries()*f.getError()/f.getVal()
background_counts = f.getVal()*self.roo_dataset.sumEntries()
background_counts_error = f.getVal() * self.roo_dataset.sumEntries()*f.getError()/f.getVal()
self.data_frame_fit = mass.frame(self.n_bins)
self.data_frame_fit.SetName('data_frame_fit')
self.roo_dataset.plotOn(self.data_frame_fit, ROOT.RooFit.Name('data'), ROOT.RooFit.DrawOption('p'))
self.pdf.plotOn(self.data_frame_fit, ROOT.RooFit.Components('bkg'), ROOT.RooFit.LineStyle(ROOT.kDashed), ROOT.RooFit.LineColor(kOrangeC))
self.pdf.plotOn(self.data_frame_fit, ROOT.RooFit.LineColor(kBlueC), ROOT.RooFit.Name('fit_func'))
self.data_frame_fit.GetYaxis().SetTitleSize(0.06)
self.data_frame_fit.GetYaxis().SetTitleOffset(0.9)
self.data_frame_fit.GetYaxis().SetMaxDigits(2)
self.data_frame_fit.GetXaxis().SetTitleOffset(1.1)
# signal within 3 sigma
mass.setRange('signal', mu_val-3*sigma_val, mu_val+3*sigma_val)
signal_int = signal.createIntegral(ROOT.RooArgSet(mass), ROOT.RooArgSet(mass), 'signal')
signal_int_val_3s = signal_int.getVal()*signal_counts
signal_int_val_3s_error = signal_int_val_3s*signal_counts_error/signal_counts
# background within 3 sigma
mass.setRange('bkg', mu_val-3*sigma_val, mu_val+3*sigma_val)
bkg_int = background.createIntegral(ROOT.RooArgSet(mass), ROOT.RooArgSet(mass), 'bkg')
bkg_int_val_3s = bkg_int.getVal()*background_counts
bkg_int_val_3s_error = bkg_int_val_3s*background_counts_error/background_counts
significance = signal_int_val_3s / np.sqrt(signal_int_val_3s + bkg_int_val_3s)
significance_err = utils.significance_error(signal_int_val_3s, bkg_int_val_3s, signal_int_val_3s_error, bkg_int_val_3s_error)
s_b_ratio_err = np.sqrt((signal_int_val_3s_error/signal_int_val_3s)**2 + (bkg_int_val_3s_error/bkg_int_val_3s)**2)*signal_int_val_3s/bkg_int_val_3s
# add pave for stats
pinfo_vals = ROOT.TPaveText(0.632, 0.5, 0.932, 0.85, 'NDC')
pinfo_vals.SetBorderSize(0)
pinfo_vals.SetFillStyle(0)
pinfo_vals.SetTextAlign(11)
pinfo_vals.SetTextFont(42)
pinfo_vals.AddText(f'Signal (S): {signal_counts:.0f} #pm {signal_counts_error:.0f}')
pinfo_vals.AddText(f'S/B (3 #sigma): {signal_int_val_3s/bkg_int_val_3s:.1f} #pm {s_b_ratio_err:.1f}')
pinfo_vals.AddText('S/#sqrt{S+B} (3 #sigma): ' + f'{significance:.1f} #pm {significance_err:.1f}')
pinfo_vals.AddText('#mu = ' + f'{mu_val*1e3:.2f} #pm {mu.getError()*1e3:.2f}' + ' MeV/#it{c}^{2}')
pinfo_vals.AddText('#sigma = ' + f'{sigma_val*1e3:.2f} #pm {sigma.getError()*1e3:.2f}' + ' MeV/#it{c}^{2}')
## add pave for ALICE performance
if self.performance:
pinfo_alice = ROOT.TPaveText(0.6, 0.5, 0.93, 0.85, 'NDC')
else:
pinfo_alice = ROOT.TPaveText(0.14, 0.6, 0.42, 0.85, 'NDC')
pinfo_alice.SetBorderSize(0)
pinfo_alice.SetFillStyle(0)
pinfo_alice.SetTextAlign(11)
pinfo_alice.SetTextFont(42)
pinfo_alice.AddText('ALICE Performance')
pinfo_alice.AddText('Run 3, pp #sqrt{#it{s}} = 13.6 TeV')
if not self.performance:
pinfo_alice.AddText('N_{ev} = ' f'{self.n_evts:.0f} ' '#times 10^{9}')
pinfo_alice.AddText(decay_string)
if self.additional_pave_text != '':
pinfo_alice.AddText(self.additional_pave_text)
if not self.performance:
self.data_frame_fit.addObject(pinfo_vals)
self.data_frame_fit.addObject(pinfo_alice)
fit_stats = {'signal': [signal_counts, signal_counts_error],
'significance': [significance, significance_err], 's_b_ratio': [signal_int_val_3s/bkg_int_val_3s, s_b_ratio_err]}
if rooworkspace_path != None:
w = ROOT.RooWorkspace('w')
sb_model = ROOT.RooStats.ModelConfig('sb_model', w)
sb_model.SetPdf(self.pdf)
sb_model.SetParametersOfInterest(ROOT.RooArgSet(n_signal))
sb_model.SetObservables(ROOT.RooArgSet(mass))
getattr(w, 'import')(sb_model)
getattr(w, 'import')(self.roo_dataset)
w.writeToFile(rooworkspace_path + '/rooworkspace.root', True)
return fit_stats
def compute_significance_asymptotic_calc(self, rooworkspace_path, do_local_p0plot=False):
print("-----------------------------------------------")
print("Computing significance with asymptotic calculator")
## get saved workspace
workspace_file = ROOT.TFile(rooworkspace_path + '/rooworkspace.root', 'READ')
w = workspace_file.Get('w')
roo_abs_data = w.data('data')
sb_model = w.obj('sb_model')
poi = sb_model.GetParametersOfInterest().first()
sb_model.SetSnapshot(ROOT.RooArgSet(poi))
## create the b-only model
b_model = sb_model.Clone()
b_model.SetName('b_model')
poi.setVal(0)
b_model.SetSnapshot(poi)
b_model.Print()
# w.var('sigma').setConstant(True)
w.var('mu').setConstant(True)
asymp_calc = ROOT.RooStats.AsymptoticCalculator(roo_abs_data, sb_model, b_model)
asymp_calc.SetPrintLevel(0)
asymp_calc_result = asymp_calc.GetHypoTest()
null_p_value = asymp_calc_result.NullPValue()
null_p_value_err = asymp_calc_result.NullPValueError()
significance = asymp_calc_result.Significance()
significance_err = asymp_calc_result.SignificanceError()
if do_local_p0plot:
### perform a scan in mass and compute the significance
masses = []
p0_values = []
p0_values_expected = []
mass_array = np.linspace(w.var('mu').getMin(), w.var('mu').getMax(), 100)
for mass in mass_array:
w.var('mu').setVal(mass)
w.var('mu').setConstant(True)
asymp_calc_scan = ROOT.RooStats.AsymptoticCalculator(roo_abs_data, sb_model, b_model)
asymp_calc_scan.SetOneSidedDiscovery(True)
asym_calc_result_scan = asymp_calc_scan.GetHypoTest()
null_p_value_scan = asym_calc_result_scan.NullPValue()
masses.append(mass)
p0_values.append(null_p_value_scan)
print(f"Mass: {mass} MeV/c^2, p0: {null_p_value_scan:.10f}")
## create a graph with the p0 values
self.local_pvalue_graph = ROOT.TGraph(len(masses), np.array(masses), np.array(p0_values))
self.local_pvalue_graph.SetName('p0_values')
self.local_pvalue_graph.GetXaxis().SetTitle(self.inv_mass_string)
self.local_pvalue_graph.GetYaxis().SetTitle('Local p-value')
# log Y axis
self.local_pvalue_graph.SetMarkerStyle(20)
self.local_pvalue_graph.SetMarkerColor(kBlueC)
self.local_pvalue_graph.SetMarkerSize(0)
self.local_pvalue_graph.SetLineColor(kBlueC)
self.local_pvalue_graph.SetLineWidth(2)
print("****************************************************")
print(f'p0: {null_p_value:.3E} +/- {null_p_value_err:.3E}')
print(f'significance: {significance:.5f} +/- {significance_err:.5f}')
print("****************************************************")
if __name__ == '__main__':
# set parameters
parser = argparse.ArgumentParser(
description='Configure the parameters of the script.')
parser.add_argument('--config-file', dest='config_file', default='',
help='path to the YAML file with configuration.')
parser.add_argument('--nbins', dest='n_bins', default=30,
help='number of bins in the final plot.')
parser.add_argument('--performance', action='store_true',
help="True for performance plot", default=False)
args = parser.parse_args()
config_file = open(args.config_file, 'r')
config = yaml.full_load(config_file)
input_parquet_data = config['input_parquet_data']
input_analysis_results = config['input_analysis_results']
input_parquet_mc = config['input_parquet_mc']
output_dir = config['output_dir']
output_file = config['output_file']
is_4lh = config['is_4lh']
matter_type = config['matter_type']
n_bins = config['n_bins']
performance = args.performance
data_hdl = TreeHandler(input_parquet_data)
mc_hdl = TreeHandler(input_parquet_mc)
an_vtx_z = uproot.open(input_analysis_results)['hyper-reco-task']['hZvtx']
n_evts = an_vtx_z.values().sum() / 1e9
n_evts = round(n_evts, 0)
signal_extraction = SignalExtraction(data_hdl, mc_hdl)
signal_extraction.n_bins = n_bins
signal_extraction.n_evts = n_evts
signal_extraction.matter_type = matter_type
signal_extraction.performance = performance
signal_extraction.is_3lh = not is_4lh
signal_extraction.bkg_fit_func = 'pol1'
signal_extraction.process_fit(extended_likelihood=True, rooworkspace_path="../results")
signal_extraction.compute_significance_asymptotic_calc(rooworkspace_path="../results", do_local_p0plot=True)
# create output file and save frames
out_file = ROOT.TFile(f'{output_dir}/{output_file}', 'recreate')
out_file.cd()
signal_extraction.data_frame_fit.Write()
signal_extraction.mc_frame_fit.Write()
signal_extraction.local_pvalue_graph.Write()
out_file.Close()
if is_4lh:
state_label = '4lh'
else:
state_label = '3lh'
cSignalExtraction = ROOT.TCanvas('cSignalExtraction', 'cSignalExtraction', 800, 600)
signal_extraction.data_frame_fit.SetTitle('')
signal_extraction.data_frame_fit.Draw()
cSignalExtraction.SaveAs(f'{output_dir}/cSignalExtraction_{matter_type}_{state_label}.pdf')
|
lbariogl/HyperRoutine
|
signal_extraction.py
|
signal_extraction.py
|
py
| 16,102 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73829652666
|
import asyncio
import multiprocessing as mp
import os
import time
from bs4 import BeautifulSoup
from src.download_utils import download_page
class DownloadRepository:
def __init__(self, address: str):
self.files = []
self.address = address
self.queue = mp.Queue()
self.process = mp.Process(target=self.start_download)
async def start_collect(self, start_page: str) -> None:
self.process.start()
await self.search_repository(start_page)
time.sleep(2)
self.process.terminate()
async def search_repository(self, url: str) -> None:
raw_html = await download_page(self.address, url)
is_file = self.check_page(raw_html)
if not is_file:
embedded_links = self.parse_html_code(raw_html)
for link in embedded_links:
await self.search_repository(link)
def check_page(self, html_text: str) -> bool:
soup = BeautifulSoup(html_text, 'lxml')
tool_bar = soup.find(
name='h4',
attrs='file-header ui top attached header df ac sb',
)
if tool_bar:
tag = tool_bar.find('div', class_='ui buttons mr-2')
if tag is None:
return False
link = tag.find('a', string='Raw').get('href')
print(f'[+]Found file -> {self.address}{link}')
self.queue.put(link)
return True
return False
def parse_html_code(self, html_code: str) -> list[str]:
soup = BeautifulSoup(html_code, 'lxml')
links = []
if soup.tbody:
for tag in soup.tbody.find_all('a'):
link = tag.get('href')
title = tag.get('title')
if link and title:
links.append(link)
return links
return []
def start_download(self):
asyncio.run(self.save_file())
@staticmethod
def create_temp_folder():
if not os.path.exists('repository'):
os.mkdir('repository')
@staticmethod
def create_saving_directory(path: str) -> None:
if not os.path.exists(path):
os.makedirs(path)
@staticmethod
def extract_paths(link: str) -> tuple[str, str]:
paths = link.split('/')
project = paths[2]
directory = '/'.join(paths[6:-1])
filename = paths[-1]
folder = f'repository/{project}/{directory}'
full_path = f'{folder}/{filename}'
return full_path, folder
async def save_file(self):
"""Download and save downloaded files to repository directory."""
self.create_temp_folder()
while True:
if not self.queue.empty():
link = self.queue.get()
text = await download_page(self.address, link)
full_path, directory = self.extract_paths(link)
self.create_saving_directory(directory)
with open(f'{full_path}', 'w') as repo_file:
repo_file.write(text)
print(f'[+]File saved in {full_path}')
|
FeltsAzn/TestTaskRadium
|
src/download_recursive.py
|
download_recursive.py
|
py
| 3,102 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38049949342
|
from gtts import gTTS
from pydub import AudioSegment
from pydub.playback import play
from screeninfo import get_monitors
import numpy as np
import cv2
import json
import tempfile
import os
import time
def get_screen_resolution(use_default):
if use_default:
return 640, 480 # Default resolution
monitors = get_monitors()
if monitors:
return monitors[0].width, monitors[0].height
else:
print("Unable to determine screen resolution.")
return None
def text_to_speech(text, lang='en'):
tts = gTTS(text=text, lang=lang, slow=False)
with tempfile.NamedTemporaryFile(delete=False) as temp_audio:
tts.save(temp_audio.name)
audio_segment = AudioSegment.from_file(temp_audio.name)
os.remove(temp_audio.name)
return audio_segment
input_file = 'results.json'
with open(input_file, 'r') as f:
results_data = json.load(f)
use_default_resolution = 1 # 0 to use device's resolution
screen_width, screen_height = get_screen_resolution(use_default_resolution)
# Total duration of audio (in milliseconds)
total_duration = 10000 # 10sec
# Silent audio segment
audio = AudioSegment.silent(duration=total_duration)
for frame_idx, frame_results in enumerate(results_data):
frame_duration = total_duration / len(results_data)
frame_audio = AudioSegment.silent(duration=frame_duration)
for obj_result in frame_results:
bounding_box = obj_result['bounding_box']
class_name = obj_result['class_name']
# Adjusting amplitude based on distance between y-coordinates
distance_y = bounding_box['y'] + bounding_box['height'] - bounding_box['y']
amplitude = np.clip(1 - distance_y / screen_height, 0.1, 1) # Amplitude range = [0.1, 1]
# Midpoint of bounding box for panning audio
midpoint_x = bounding_box['x'] + bounding_box['width'] / 2
midpoint_y = bounding_box['y'] + bounding_box['height'] / 2
# Distance between midpoints and pan accordingly
distance_to_center = midpoint_x - screen_width / 2
pan = np.clip(distance_to_center / (screen_width / 2), -1, 1) # Pan range = [-1, 1]
voice_audio = text_to_speech(class_name)
voice_audio = voice_audio - (1 - amplitude) * 50
voice_audio = voice_audio.pan(pan)
frame_audio = frame_audio.overlay(voice_audio)
# print(f"Frame {frame_idx + 1}, Object: {class_name}, Pan: {pan}, Amplitude: {amplitude:.2f}")
audio = audio.overlay(frame_audio, position=int(frame_idx * frame_duration))
if (frame_idx + 1) % 3 == 0: # Speak out the class name every 3 seconds
text_to_speech("Attention, " + class_name).play()
# Export the stereo audio to a file
audio.export('stereo_audio_with_speech_amplitude.wav', format='wav')
|
aman071/Smartphone-audio-cues-for-visually-impaired
|
audio_generation.py
|
audio_generation.py
|
py
| 2,862 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24348533870
|
import shutil
import zipfile
import os
import sys
import warnings
from urllib.request import urlretrieve
from tqdm import tqdm
from zrp import about
# This is used to show progress when downloading.
# see here: https://github.com/tqdm/tqdm#hooks-and-callbacks
class TqdmUpTo(tqdm):
"""Provides `update_to(n)` which uses `tqdm.update(delta_n)`."""
def update_to(self, b=1, bsize=1, tsize=None):
"""
b : int, optional
Number of blocks transferred so far [default: 1].
bsize : int, optional
Size of each block (in tqdm units) [default: 1].
tsize : int, optional
Total size (in tqdm units). If [default: None] remains unchanged.
"""
if tsize is not None:
self.total = tsize
self.update(b * bsize - self.n) # will also set self.n = b * bsize
def download_progress(url, fname):
"""
Download a file and show a progress bar.
:param url: A string for the url of the release zip to download.
:param fname: A string for the local file name under which the downloaded file can be found.
:return:
"""
with TqdmUpTo(unit='B', unit_scale=True, miniters=1,
desc=url.split('/')[-1]) as t: # all optional kwargs
print(f"Retrieving url at {url}")
urlretrieve(url, filename=fname, reporthook=t.update_to, data=None)
t.total = t.n
return fname
def download_and_clean_lookup_tables(url, lookup_tables_output_fname, lookup_tables_output_zip_fname, geo_yr="2019",
acs_yr="2019", acs_range="5yr"):
"""
Downloads look up tables and file them within the module.
This downloads the zip file from the repository, extracts it, renames it, then moves the
tables to the correct directory, and removes large files not used at runtime.
:param lookup_tables_output_fname: A string for the name of the file downloaded after unzipping.
:param acs_range: A string for the year range the acs lookup table data will be from.
:param acs_yr: A string for the year the acs lookup table data will be from.
:param geo_yr: A string for the year the geo lookup table data will be from.
:param url: A string for the url of the release zip to download.
:param lookup_tables_output_zip_fname: A string for the name of the zip file downloaded.
:return:
"""
cwd = os.path.dirname(os.path.abspath(__file__))
path_to_lt_zip = os.path.join(cwd, lookup_tables_output_zip_fname)
print("Downloading zrp release...", file=sys.stderr)
download_progress(url, path_to_lt_zip)
print("Finished download")
print("\n")
print("Filing extras...")
with zipfile.ZipFile(path_to_lt_zip, 'r') as zf:
zf.extractall(cwd)
os.remove(path_to_lt_zip)
# Get rid of prefix that unzipping prepends
# curr_folder = cwd.split("/")[-1]
# unzipped_src_fname = curr_folder + "-" + lookup_tables_output_fname
# path_to_unzipped_src = os.path.join(cwd, unzipped_src_fname)
path_to_lookup_tables = os.path.join(cwd, lookup_tables_output_fname)
# os.rename(path_to_unzipped_src, path_to_lookup_tables)
# Clear old look up table directories
data_dir = os.path.join(cwd, 'data')
geo_data_dir = os.path.join(data_dir, f'processed/geo/{geo_yr}')
acs_data_dir = os.path.join(data_dir, f'processed/acs/{acs_yr}/{acs_range}')
if os.path.isdir(geo_data_dir):
shutil.rmtree(geo_data_dir)
if os.path.isdir(acs_data_dir):
shutil.rmtree(acs_data_dir)
print("Old geo lookup table data cleared out.")
# Migrate lookup tables
dl_geo_dir = os.path.join(cwd, lookup_tables_output_fname, f'geo/{geo_yr}')
dl_acs_dir = os.path.join(cwd, lookup_tables_output_fname, f'acs/{acs_yr}/{acs_range}')
if os.path.isdir(dl_geo_dir):
shutil.move(dl_geo_dir, geo_data_dir)
print(f"New geo lookup tables successfully migrated from {dl_geo_dir} to {geo_data_dir}.")
else:
warnings.warn(f"The geo lookup data was not found in {dl_geo_dir}. Ensure you're requesting a valid year. "
"Consult the lookup_tables release to troubleshoot.")
if os.path.isdir(dl_acs_dir):
shutil.move(dl_acs_dir, acs_data_dir)
print(f"New geo lookup tables successfully migrated from {dl_acs_dir} to {acs_data_dir}.")
else:
warnings.warn(f"The acs lookup data was not found in {dl_acs_dir}. Ensure you're requesting a valid year and"
"year range. Consult the lookup_tables release to troubleshoot.")
# Remove rest of lookup table folder
shutil.rmtree(path_to_lookup_tables)
# save a version file so we can tell what it is
vpath = os.path.join(data_dir, 'version')
with open(vpath, 'w') as vfile:
vfile.write('zrp release --> {}'.format(lookup_tables_output_fname))
print("Filed lookup tables successfully,", file=sys.stderr)
print(" to", data_dir)
def download_and_clean_pipelines(url, pipelines_output_fname, pipelines_output_zip_fname):
"""
Downloads pipeline pickle files and file them within the module.
This downloads the zip file from the repository, extracts it, renames it, then moves the
tables to the correct directory, and removes large files not used at runtime.
:param pipeline_output_fname: A string for the name of the file downloaded after unzipping.
:param url: A string for the url of the release zip to download.
:param pipelines_output_zip_fname: A string for the name of the zip file downloaded.
:return:
"""
cwd = os.path.dirname(os.path.abspath(__file__))
path_to_ppln_zip = os.path.join(cwd, pipelines_output_zip_fname)
print("Downloading zrp release...", file=sys.stderr)
download_progress(url, path_to_ppln_zip)
print("Finished download")
print("\n")
print("Filing extras...")
with zipfile.ZipFile(path_to_ppln_zip, 'r') as zf:
zf.extractall(cwd)
os.remove(path_to_ppln_zip)
# Get rid of prefix that unzipping prepends
# curr_folder = cwd.split("/")[-1]
# unzipped_src_fname = curr_folder + "-" + pipelines_output_fname
# path_to_unzipped_src = os.path.join(cwd, unzipped_src_fname)
path_to_pipelines = os.path.join(cwd, pipelines_output_fname)
# os.rename(path_to_unzipped_src, path_to_pipelines)
# Clear old look up table directories
model_dir = os.path.join(cwd, 'modeling/models')
block_group_dir = os.path.join(model_dir, 'block_group')
census_tract_dir = os.path.join(model_dir, 'census_tract')
zip_code_dir = os.path.join(model_dir, 'zip_code')
block_group_pipeline = os.path.join(block_group_dir, 'pipe.pkl')
census_tract_pipeline = os.path.join(census_tract_dir, 'pipe.pkl')
zip_code_pipeline = os.path.join(zip_code_dir, 'pipe.pkl')
if os.path.isfile(block_group_pipeline):
os.remove(block_group_pipeline)
if os.path.isfile(census_tract_pipeline):
os.remove(census_tract_pipeline)
if os.path.isfile(zip_code_pipeline):
os.remove(zip_code_pipeline)
print("Old pipelines cleared out.")
# Migrate pipelines
dl_bg_pipe_file = os.path.join(path_to_pipelines, 'block_group_pipe.pkl')
dl_ct_pipe_file = os.path.join(path_to_pipelines, 'census_tract_pipe.pkl')
dl_zp_pipe_file = os.path.join(path_to_pipelines, 'zip_code_pipe.pkl')
if os.path.isfile(dl_bg_pipe_file):
shutil.move(dl_bg_pipe_file, os.path.join(block_group_dir, 'pipe.pkl'))
print("Block group pipeline successfully migrated.")
else:
warnings.warn(f"The block group pipeline was not found in {dl_bg_pipe_file}."
"Consult the pipelines release to troubleshoot.")
if os.path.isfile(dl_ct_pipe_file):
shutil.move(dl_ct_pipe_file, os.path.join(census_tract_dir, 'pipe.pkl'))
print("Census tract pipeline successfully migrated.")
else:
warnings.warn(f"The census tract pipeline was not found in {dl_ct_pipe_file}."
"Consult the pipelines release to troubleshoot.")
if os.path.isfile(dl_zp_pipe_file):
shutil.move(dl_zp_pipe_file, os.path.join(zip_code_dir, 'pipe.pkl'))
print("Zip code pipeline successfully migrated.")
else:
warnings.warn(f"The zip code pipeline was not found in {dl_zp_pipe_file}."
"Consult the pipelines release to troubleshoot.")
# Remove rest of pipelines folder
shutil.rmtree(path_to_pipelines)
# save a version file so we can tell what it is
data_dir = os.path.join(cwd, 'data')
vpath = os.path.join(data_dir, 'version')
with open(vpath, 'w') as vfile:
vfile.write('zrp release --> {}'.format(pipelines_output_fname))
print("Filed pipelines successfully", file=sys.stderr)
print(" to", model_dir)
def get_release():
version = about.__version__
dl_tpl = "{m}-{v}"
return dl_tpl.format(m="zrp", v=version)
def download():
release_pkg = get_release()
# lookup_tables_output_fname = release_pkg + "_lookup_tables"
lookup_tables_output_fname = "lookup_tables"
lookup_tables_output_zip_fname = release_pkg + "_lookup_tables" + ".zip"
lookup_table_url = about.__download_url_prefix__ + release_pkg + "/lookup_tables.zip"
download_and_clean_lookup_tables(lookup_table_url, lookup_tables_output_fname, lookup_tables_output_zip_fname)
pipelines_output_fname = "pipelines"
pipelines_output_zip_fname = release_pkg + "_pipelines" + ".zip"
pipelines_url = about.__download_url_prefix__ + release_pkg + "/pipelines.zip"
download_and_clean_pipelines(pipelines_url, pipelines_output_fname, pipelines_output_zip_fname)
|
zestai/zrp
|
zrp/download.py
|
download.py
|
py
| 9,695 |
python
|
en
|
code
| 25 |
github-code
|
6
|
17436250579
|
import sys
import pdb
def count_primes(n):
isPrime = [False] * 2 + [True] * (n - 2)
i = 2
while i * i < n:
if not isPrime[i]:
i += 1
continue
j = i * i
while j < n:
isPrime[j] = False
j += i
i += 1
print(isPrime)
return isPrime.count(True)
count_primes(10)
|
naubull2/codingtests
|
leetcode/test.py
|
test.py
|
py
| 361 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70628519869
|
class NrPdu:
def __init__(self, byteStream=None):
if byteStream==None:
self.PduByteArray = bytearray()
else:
if isinstance(byteStream, str):
self.PduByteArray = bytearray.fromhex(byteStream)
else:
if hasattr(byteStream, 'decode'):
self.PduByteArray = byteStream
print(self.PduByteArray)
else:
print("argument must be hex string or bytearray")
def getBitField(self, inputByte, bitMask, bitOffset):
print('getBitField\t : inputByte = 0x{:02x} '.format(inputByte) \
+ 'bitMask = ' + '0x{:02x} '.format(bitMask) \
+ 'bitOffset = ' + '0x{:02x} '.format(bitOffset))
return (inputByte & bitMask) >> bitOffset
|
leoneilcdasco/5GNR-UserPlane-Utilities
|
nruplane/nrcommon/nrpdu.py
|
nrpdu.py
|
py
| 804 |
python
|
no
|
code
| 7 |
github-code
|
6
|
32740927638
|
#!/bin/python3
import re
import getopt, sys
from kazoo.client import KazooClient
import json
def getRollupRules(zookeeperHostsIn, zNodePath):
zk = KazooClient(hosts=zookeeperHostsIn);
zk.start();
result = {};
if zk.exists(zNodePath):
for zookeeperChild in zk.get_children(zNodePath):
zookeeperChildPath = zNodePath + "/" + zookeeperChild
if zk.exists(zookeeperChildPath):
Data, Stat = zk.get(zookeeperChildPath)
result[zookeeperChild] = json.loads(Data)
else:
print("Reported ZK path no longer exists: " + Zookeeper_Child_Path)
zk.stop();
return result;
|
ModdingFox/AutomaticDruidRollup
|
Zookeeper/Fetch_Druid_Rules.py
|
Fetch_Druid_Rules.py
|
py
| 682 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37828901284
|
#!/usr/bin/env python3
import random
import sys
STRENGTH = {
"All Star": 2,
"Angry Bird": 1,
"GOAT": 1,
"MKatz": 3,
"Pegasus": 1,
"Piston": 4,
}
CARDIO = {
22: 2,
"Abacus": 3,
"Empire": 2,
"Mont Blanc": 1,
"MVP": 4,
"Quarterbacks": 1,
}
def pick_random_vid(videos: dict) -> tuple[str, int]:
key = random.choice(list(videos.keys()))
idx = random.randint(0, videos[key] - 1)
return key, idx
args = sys.argv
if len(args) == 2:
arg = args[1]
if arg == "cardio":
video, idx = pick_random_vid(CARDIO)
print(f"{video} video {idx + 1}")
if arg == "strength":
video, idx = pick_random_vid(STRENGTH)
print(f"{video} video {idx + 1}")
else:
video, idx = pick_random_vid({**STRENGTH, **CARDIO})
print(f"{video} video {idx + 1}")
|
dbatten5/dotfiles
|
scripts/f45.py
|
f45.py
|
py
| 839 |
python
|
en
|
code
| 3 |
github-code
|
6
|
29785322466
|
import turtle as t
import random
t.colormode(255)
def random_color_finder():
r = random.randint(0, 255)
g = random.randint(0, 255)
b = random.randint(0, 255)
random_color = (r, g, b)
return random_color
directions = [0, 90, 180, 270]
t.pensize(15)
t.speed('fastest')
for _ in range(200):
t.color(random_color_finder())
t.forward(30)
t.setheading(random.choice(directions))
my_screen = t.Screen()
my_screen.bgcolor('black')
my_screen.exitonclick()
|
NiramayThaker/Python-Topics
|
turtle_graphics/turtle_random_walk_rgb.py
|
turtle_random_walk_rgb.py
|
py
| 486 |
python
|
en
|
code
| 1 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.