seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
26080546821
|
# Example 1:
# here we are making a class
class Student:
pass
# we are making an object from student class
harry = Student()
larry = Student()
# we can make instance variable of an object like this
harry.name = "Harry"
harry.std = 12
harry.section = 1
larry.std = 9
larry.subjects = ["hindi", "physics"]
print(harry.name, harry.std)
print(harry.section, larry.subjects)
# if we will going to use 'larry.name' then it will through an error because we did not define the name for the larry
# Example 2:
class MyClass(object):
# Every class derived from the based class 'object'
# Not recommended to use 'object' while defining the class
def show(self):
# every class method will given us the 'self' instance object
print("Hello world")
obj = MyClass()
obj.show()
# Example 2:
class Mobile:
# writing our one constructor which get called whenever we create the object
def __init__(self):
self.model = "RealMe"
def show_model(self):
print("Model: ", self.model)
realMe = Mobile()
# Accessing method
realMe.show_model()
# Accessing properties
print(realMe.model)
# Updating property value
realMe.model = "RealMe Pro2"
print(realMe.model)
# EXample 4:
class Mobile2:
# writing our one constructor which get called whenever we create the object
def __init__(self, model):
self.model = model
def show_model(self, price):
print("Model: ", self.model, " Price: ", price)
realMe2 = Mobile2("RealMe X")
realMe2.show_model(30000)
print("Address of realMe2: ", id(realMe2))
# Address of realMe2: 3058544135760
readMi = Mobile2("ReadMi 7s")
readMi.show_model(20000)
print("Address of readMi: ", id(readMi))
# Address of readMi: 3058544135952
# Example 5:
class Employee:
no_of_leaves = 8
# this is the class variable
pass
harry = Employee()
rohan = Employee()
harry.name = "Harry"
harry.salary = 455
harry.role = "instructor"
print(harry.name, harry.salary)
# the variable that we made in here like name,salary,role these variable are the instance variable of an object not the varialbe of the class
print(harry.no_of_leaves)
# but here no_of_leaves is the is the property of the class
print(Employee.no_of_leaves)
# so we can access variable inside the class through the class as well
# if we want to change the value the variable of class we can do that through class name
Employee.no_of_leaves = 9
# we can change the value of the no_of_leaves thorugh but!!!
harry.no_of_leaves = 10
# here we are trying to change the value of the class variable then it cant be able to change
# but!!! we are acctully making the new instance variable of an object
print(harry.no_of_leaves)
# we can varify through one attribute and that is:
print(harry.__dict__)
# __dict is defined in all classes and it return dictionary
# now we can see that now no_of_leaves is the instance variable of an object
print(Employee.__dict__)
rohan.name = "Rohan"
rohan.salary = 455
rohan.role = "Student"
print(rohan.name, rohan.salary)
print(rohan.no_of_leaves)
# but for rohan object no_of_leaves is not the instance variable but the class variable
|
roman-ojha/python
|
Notes/Main/OOP/02_Class_and_Object/02_Creating_Our_Class.py
|
02_Creating_Our_Class.py
|
py
| 3,134 |
python
|
en
|
code
| 2 |
github-code
|
6
|
27579511655
|
import os
import shutil
import torch
def make_dirs(args, opts, mode="train"):
splits , features = '', ''
if args.video_sets == 'videos':
splits += 'new_'
if args.input_feature == '2d':
features += 'new_'
splits += 'splits'
features += 'features'
train_list = os.path.join(opts.data_dir, "BEST", splits, opts.task, "train.txt")
valid_list = os.path.join(opts.data_dir, "BEST", splits, opts.task, "test.txt")
feature_path = os.path.join(opts.data_dir, "BEST", features, opts.task)
resultdir = os.path.join(opts.result_dir, opts.arg, "lap_"+opts.lap, opts.task)
if mode == "train":
demodir = None
dir = resultdir
if mode == "eval":
demodir = os.path.join(opts.demo_dir, "results", opts.arg, "lap_"+opts.lap, opts.task)
dir = demodir
if os.path.exists(dir):
shutil.rmtree(dir)
os.makedirs(dir)
return train_list, valid_list, feature_path, resultdir, demodir
def accuracy(score_pos, score_neg):
"""Computes the % of correctly ordered pairs"""
pred1 = score_pos
pred2 = score_neg
correct = torch.gt(pred1, pred2)
return float(correct.sum())/correct.size(0), int(correct.sum())
def data_augmentation(input_var1, input_var2, args, device):
if args.input_feature == '2d':
noise = torch.autograd.Variable(torch.normal(torch.zeros(input_var1.size()[1],
input_var1.size()[2],
input_var1.size()[3],
input_var1.size()[4]),
0.01)).to(device)
else:
noise = torch.autograd.Variable(torch.normal(torch.zeros(input_var1.size()[1],
input_var1.size()[2]),
0.01)).to(device)
input_var1 = torch.add(input_var1, noise)
input_var2 = torch.add(input_var2, noise)
return input_var1, input_var2
class AverageMeter(object):
"""Compute and stores the average and current value"""
def __init__(self):
self.reset()
def reset_val(self):
self.val = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def sec2str(sec):
if sec < 60:
return "{:02d}s".format(int(sec))
elif sec < 3600:
min = int(sec / 60)
sec = int(sec - min * 60)
return "{:02d}m{:02d}s".format(min, sec)
elif sec < 24 * 3600:
min = int(sec / 60)
hr = int(min / 60)
sec = int(sec - min * 60)
min = int(min - hr * 60)
return "{:02d}h{:02d}m{:02d}s".format(hr, min, sec)
elif sec < 365 * 24 * 3600:
min = int(sec / 60)
hr = int(min / 60)
dy = int(hr / 24)
sec = int(sec - min * 60)
min = int(min - hr * 60)
hr = int(hr - dy * 24)
return "{:02d} days, {:02d}h{:02d}m{:02d}s".format(dy, hr, min, sec)
|
t-koba-96/skill-assessment
|
src/util.py
|
util.py
|
py
| 3,287 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21386838378
|
# -*- coding: utf-8 -*-
import datetime
from functools import partial
import ipyvuetify as v
from traitlets import (
Unicode, observe, directional_link,
List, Int, Bool, Any, link
)
from sepal_ui.sepalwidgets.sepalwidget import SepalWidget, TYPES
from sepal_ui.frontend.styles import sepal_darker
class DynamicSelect(v.Card):
""" Widget to navigate with next and previous buttons over a list
Args:
items (list) : List of items to be displayed in select list
label (str) : Label to display into widget
Parameters:
v_model (traitlets.Any): Current element from select list
Example:
[1] ds = DynamicSelect(items=[1,2,3,4,5])
ds # Display Dynamic select widget
[2] # add behaviour once v_model changes
ds.observe(lambda x: print(x), 'v_model')
"""
items = List([]).tag(sync=True)
v_model = Any().tag(sync=True)
confidence = Unicode('All').tag(sync=True)
def __init__(self, label='', **kwargs):
self.class_='d-flex align-center mb-2'
self.row=True
self.label = label
super().__init__(**kwargs)
self.w_prev = v.Btn(
_metadata = {'name':'previous'},
x_small=True,
children=[
v.Icon(left=True,children=['mdi-chevron-left']),
'prev'
])
self.w_next = v.Btn(
_metadata = {'name' : 'next'},
x_small=True,
children=[
v.Icon(children=['mdi-chevron-right']),
'nxt'
])
self.w_conf = v.Select(
class_='ma-2',
label='Confidence',
v_model='All',
items=['All', 'Low','High', 'Nominal']
)
self.w_list = v.Select(
class_='ma-2',
label=self.label,
items=self.items,
v_model=''
)
self.children = [
self.w_prev,
self.w_conf,
self.w_list,
self.w_next
]
link((self.w_list, 'items'),(self, 'items'))
link((self.w_list, 'v_model'),(self, 'v_model'))
link((self.w_conf, 'v_model'),(self, 'confidence'))
self.w_prev.on_event('click', self.prev_next_event)
self.w_next.on_event('click', self.prev_next_event)
def prev_next_event(self, widget, change, data):
current = self.w_list.v_model
position = -1 if not current else self.w_list.items.index(current)
last = len(self.w_list.items) - 1
if widget._metadata['name']=='next':
if position < last:
self.w_list.v_model = self.w_list.items[position+1]
elif widget._metadata['name']=='previous':
if position > 0:
self.w_list.v_model = self.w_list.items[position-1]
class Tooltip(v.Tooltip):
def __init__(self, widget, tooltip, *args, **kwargs):
"""
Custom widget to display tooltip when mouse is over widget
Args:
widget (DOM.widget): widget used to display tooltip
tooltip (str): the text to display in the tooltip
Example:
btn = v.Btn(children=['Button'])
Tooltip(widget=btn, tooltip='Click over the button')
"""
self.bottom=True
self.v_slots=[{
'name': 'activator',
'variable': 'tooltip',
'children': widget
}]
widget.v_on = 'tooltip.on'
self.children = [tooltip]
super().__init__(*args, **kwargs)
def __setattr__(self, name, value):
"""prevent set attributes after instantiate tooltip class"""
if hasattr(self,'_model_id'):
if self._model_id:
raise RuntimeError(f"You can't modify the attributes of the {self.__class__} after instantiated")
super().__setattr__(name, value)
class Tabs(v.Card):
current = Int(0).tag(sync=True)
def __init__(self, titles, content, **kwargs):
self.background_color="primary"
self.dark = True
self.tabs = [v.Tabs(v_model=self.current, children=[
v.Tab(children=[title], key=key) for key, title in enumerate(titles)
])]
self.content = [v.TabsItems(
v_model=self.current,
children=[
v.TabItem(children=[content], key=key) for key, content in enumerate(content)
]
)]
self.children= self.tabs + self.content
link((self.tabs[0], 'v_model'),(self.content[0], 'v_model'))
super().__init__(**kwargs)
|
dfguerrerom/restoration_viewer
|
component/widget/custom_widgets.py
|
custom_widgets.py
|
py
| 4,906 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29584086251
|
# -*- coding: utf-8 -*-
import unicodedata
from datetime import datetime, timedelta
from html2text import html2text
from openerp import models, api, fields
from openerp.exceptions import Warning
class AvancysNotification(models.Model):
_name = 'avancys.notification'
user_id = fields.Many2one('res.users', 'Usuario')
notification = fields.Char('Notificacion')
tittle = fields.Char('Titulo')
url = fields.Char('Url')
date = fields.Datetime('Fecha de generacion')
state = fields.Selection([
('pending', 'Pendiente'),
('sent', 'Enviada')
])
persistent = fields.Boolean('Notificacion persistente')
constructor_id = fields.Many2one('notification.constructor', 'constructor')
modelo_id = fields.Integer('ID Registro')
@api.model
def get_notifications(self):
notifications = self.env['avancys.notification'].search([
('user_id', '=', self.env.uid),
('state', '=', 'pending'),
('date', '<=', datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S'))
])
data = []
for message in notifications:
data.append(
{
'user_id': message.user_id.name,
'tittle': message.tittle,
'notification': message.notification,
'url': message.url,
'date': message.date,
'state': message.state
}
)
if message.persistent is True:
message.unlink()
else:
message.state = 'sent'
return data
class NotificationConstructor(models.Model):
_name = 'notification.constructor'
name = fields.Char('Descripcion')
table = fields.Many2one('ir.model', 'Modelo')
field_user = fields.Char('Campo usuario')
is_partner = fields.Boolean('Es contacto')
tittle = fields.Char(
'Titulo de la notificacion',
help="""Si es un constructor agrupado asignar un texto plano,
sino asignar el campo o el simbolo '-' seguido de texto plano""")
field_notification = fields.Char(
'Campo notificacion',
help="""Si es un constructor agrupado asignar un texto plano,
sino asignar el campo o el simbolo '-' seguido de texto plano""")
notification_html = fields.Boolean('Es html')
url = fields.Char('Url', help="Especificar direccion desde /web... comodin {id} si se requiere ir a un registro")
url_id = fields.Char('ID URL', help="'id' o Campo tipo objeto relacionado")
grouped = fields.Boolean('Agrupado')
persistent = fields.Boolean('Notificacion Persistente')
condition_ids = fields.One2many('notification.constructor.line', 'constructor_id', string="Condiciones")
user_from = fields.Char('Remitente', help='Permite mapeo de campos a un nivel, ej: message_id.email_from')
@api.model
def get_notification(self):
self.env.cr.execute("SELECT id FROM notification_constructor")
notif_constructor_obj = self.env['notification.constructor']
constructors = self.env.cr.fetchall()
for cons in constructors:
notif_constructor_obj.browse(cons).create_notifications()
@api.multi
def create_notifications(self):
avancys_notif_obj = self.env['avancys.notification']
dominio = []
for line in self.condition_ids:
if line.c2[0:3] == "now":
if line.c2[4:5] == '+':
date = datetime.now() + timedelta(minutes=int(line.c2[6:len(line.c2)]))
elif line.c2[4:5] == '-':
date = datetime.now() - timedelta(minutes=int(line.c2[6:len(line.c2)]))
elif len(line.c2) == 3:
date = datetime.now()
else:
raise Warning('Las condiciones de fecha no son validas')
date = datetime.strftime(date, '%Y-%m-%d %H:%M:%S')
crit = (str(line.c1), str(line.operator), date)
else:
if str(line.c2) == 'True':
cond = True
elif str(line.c2) == 'False':
cond = False
else:
cond = str(line.c2)
crit = (str(line.c1), str(line.operator), cond)
dominio.append(crit)
modelo_ids = self.env[self.table.model].search(dominio)
notif_data = []
orm2sql = self.env['avancys.orm2sql']
if not self.grouped:
for i in modelo_ids:
for user in getattr(i, self.field_user):
if self.is_partner:
user_notification = user.system_user_id.id
else:
user_notification = user.id
if self.persistent:
user_constructor = avancys_notif_obj.search([
('constructor_id', '=', self.id),
('user_id', '=', user_notification),
('modelo_id', '=', i.id),
('state', '=', 'pending')])
else:
user_constructor = avancys_notif_obj.search([
('constructor_id', '=', self.id),
('user_id', '=', user_notification),
('modelo_id', '=', i.id)])
if len(user_constructor) > 0:
continue
if self.tittle[0] == '-':
tittle = self.tittle[1:len(self.tittle)]
else:
if '.' in self.tittle:
tittle = getattr(getattr(i, self.tittle.split('.')[0])[0], self.tittle.split('.')[1])
else:
tittle = getattr(i, self.tittle)
try:
tittle = tittle[0].display_name
except:
if tittle:
if len(tittle) == 0:
tittle = False
else:
pass
else:
tittle = False
user_from = False
if self.user_from:
if '.' in self.user_from:
user_from = getattr(
getattr(i, self.user_from.split('.')[0])[0], self.user_from.split('.')[1])
else:
user_from = getattr(i, self.user_from)
try:
user_from = user_from[0].display_name
except:
if len(user_from) == 0:
user_from = False
else:
pass
if tittle and user_from:
if len(user_from.split(' ')) > 2:
user_from = user_from.split(' ')[0] + ' ' + user_from.split(' ')[1]
tittle = user_from + ': ' + tittle
elif user_from:
tittle = user_from
if self.field_notification[0] == '-':
field_notification = self.field_notification[1:len(self.tittle)]
else:
if '.' in self.field_notification:
field_notification = getattr(i, self.field_notification.split('.')[0])
field_notification = getattr(field_notification[0], self.field_notification.split('.')[1])
else:
field_notification = getattr(i, self.field_notification)
try:
field_notification = field_notification[0].display_name
except:
if len(field_notification) == 0:
field_notification = False
else:
pass
if self.notification_html:
if field_notification:
field_notification = html2text(field_notification).replace('\n', '')
else:
field_notification = ''
if self.url:
if not self.url_id:
raise Warning(
"Debe especificar un campo relacionado al id para la url, por lo general es 'id'")
if self.url_id == 'id':
url_id = i.id
else:
url_id = getattr(i, self.url_id)[0].id
url = self.url.replace('{id}', str(url_id))
else:
url = False
if user_notification is False:
continue
notif_data.append({
'user_id': user_notification,
'tittle': tittle,
'notification': field_notification,
'url': url,
'state': 'pending',
'date': orm2sql.local_date(datetime.strftime(datetime.now(), '%Y-%m-%d') + " 00:00:00"),
'constructor_id': self.id,
'persistent': self.persistent,
'modelo_id': i.id,
})
else:
users = []
for i in modelo_ids:
for user in getattr(i, self.field_user):
if self.is_partner:
user_notification = user[0].system_user_id.id
else:
user_notification = user[0].id
if len(user) > 0:
if user_notification not in users:
users.append(user_notification)
for user in users:
if self.persistent:
user_constructor = avancys_notif_obj.search([
('constructor_id', '=', self.id),
('user_id', '=', user),
('state', '=', 'pending')])
else:
user_constructor = avancys_notif_obj.search([
('constructor_id', '=', self.id),
('user_id', '=', user)])
if len(user_constructor) > 0:
continue
if user is False:
continue
notif_data.append({
'user_id': user,
'tittle': self.tittle,
'notification': self.field_notification,
'url': self.url,
'state': 'pending',
'date': orm2sql.local_date(datetime.strftime(datetime.now(), '%Y-%m-%d') + " 00:00:00"),
'constructor_id': self.id,
'persistent': self.persistent,
})
orm2sql.sqlcreate(self.env.uid, self.env.cr, 'avancys_notification', notif_data)
return
class NotificationConstructorLine(models.Model):
_name = 'notification.constructor.line'
c1 = fields.Char('Campo de busqueda')
operator = fields.Char('Operador')
c2 = fields.Char(
'Condicion',
help='''
Para relacionar la fecha actual, asignar la palabra 'now' y agregar el operador = o - con espacios
intermedios, ej. 'now + 60' para compararla con la hora actual + 1 hora
''')
constructor_id = fields.Many2one('notification.constructor', 'Constructor')
|
odoopruebasmp/Odoo_08
|
v8_llevatelo/avancys_notification/avancys_notification.py
|
avancys_notification.py
|
py
| 11,966 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35764996048
|
import os
# import urllib.request
# from types import SimpleNamespace
# from urllib.error import HTTPError
import random
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# import tabulate
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
from torch_geometric.utils import to_undirected,add_self_loops,remove_self_loops
from torch_geometric.data import InMemoryDataset, download_url
from torch_geometric.data import Data,DataLoader
from torch_geometric.datasets import TUDataset
def collate_graph_adj(edge_list, ptr,use_gpu=False):
if not use_gpu:
edges = torch.cat([torch.tensor(i) + ptr[idx] for idx, i in enumerate(edge_list)], dim=1)
N = ptr[-1]
return torch.sparse_coo_tensor(edges,[1.]*edges.shape[1], (N, N))
else:
edges = torch.cat([torch.tensor(i).cuda(0) + ptr[idx] for idx, i in enumerate(edge_list)], dim=1)
N = ptr[-1]
val = torch.tensor([1.]*edges.shape[1]).cuda(0)
return torch.sparse_coo_tensor(edges,val, (N, N)).cuda(0)
class EdgeIndex_Processor():
def __init__(self, edge_index):
super().__init__()
self.random_walk = None
adj,N = self.to_sparse_tensor(edge_index)
adj_with_selfloop = self.to_sparse_tensor_with_selfloop(edge_index)
self.N = N
self.adj = adj.float()
self.adj_with_loop = adj_with_selfloop.float()
self.k_hop_neibrs = [adj.float()]
self.calc_random_walk_matrix()
def to_sparse_tensor(self, edge_index):
edge_index = remove_self_loops(edge_index)[0]
r = len(edge_index[0])
N = edge_index.max() + 1
t = torch.sparse_coo_tensor(edge_index, [1] * r, (N, N))
return t, N
def to_sparse_tensor_with_selfloop(self, edge_index):
edge_index = add_self_loops(edge_index)[0]
r = len(edge_index[0])
N = edge_index.max() + 1
t = torch.sparse_coo_tensor(edge_index, [1] * r, (N, N))
return t
def calc_random_walk_matrix(self):
t = self.adj_with_loop.to_dense().sum(dim=1)
t = 1./t
n = len(t)
ind = torch.tensor([[i,i] for i in range(n)]).T
diag = torch.sparse_coo_tensor(ind,t,(n,n))
random_walk = torch.sparse.mm(diag,self.adj)
self.random_walk = random_walk
def calc_random_walk_feature(self,order=10):
t = self.random_walk
tot_walk_feats = []
walk_feats = []
for i in range(self.N):
walk_feats.append(t[i,i])
tot_walk_feats.append(walk_feats)
for i in range(order):
walk_feats = []
t = torch.sparse.mm(t,self.random_walk)
for i in range(self.N):
walk_feats.append(t[i, i])
tot_walk_feats.append(walk_feats)
tot_walk_feats = torch.tensor(tot_walk_feats).T
return tot_walk_feats
def calc_adj_power(self,adj, power):
t = adj
for _ in range(power - 1):
t = torch.sparse.mm(t, adj)
# set value to one
indices = t.coalesce().indices()
v = t.coalesce().values()
v = torch.tensor([1 if i > 1 else i for i in v])
diag_mask = indices[0] != indices[1]
indices = indices[:, diag_mask]
v = v[diag_mask]
t = torch.sparse_coo_tensor(indices, v, (self.N, self.N))
return t
def postprocess_k_hop_neibrs(self,sparse_adj):
diag = torch.diag(1. / sparse_adj.to_dense().sum(dim=1))
diag = diag.to_sparse()
out = torch.sparse.mm(diag, sparse_adj)
return out
def calc_k_hop_neibrs(self,k_hop=2):
adj_hop_k = self.calc_adj_power(self.adj, k_hop)
one_hop = self.k_hop_neibrs[0]
prev_hop = self.k_hop_neibrs[1:k_hop]
for p in prev_hop:
one_hop += p
final_res = adj_hop_k - one_hop
indices = final_res.coalesce().indices()
v = final_res.coalesce().values()
v = [0 if i <= 0 else 1 for i in v]
masking = []
v_len = len(v)
for i in range(v_len):
if v[i] > 0:
masking.append(i)
v = torch.tensor(v)
masking = torch.tensor(masking).long()
indices = indices[:, masking]
v = v[masking]
final_res = torch.sparse_coo_tensor(indices, v, (self.N, self.N))
return final_res
def run(self,k_hop=[2,3,4,5,6],random_walk_order=20):
walk_feature = self.calc_random_walk_feature(order=random_walk_order)
for k in k_hop:
t = self.calc_k_hop_neibrs(k)
self.k_hop_neibrs.append(t.float())
# normed_k_hop_adj = [self.postprocess_k_hop_neibrs(i.float()) for i in self.k_hop_neibrs] # 是否使用D^-1*A
return self.k_hop_neibrs,walk_feature
def transform(t):
q, j = EdgeIndex_Processor(t.edge_index).run()
hop1, hop2, hop3, hop4, hop5, hop6 = q[0], q[1], q[2], q[3], q[4], q[5]
t.rand_feature = j
x2 = torch.concat((t.x, j), dim=1)
hop1_feature = hop1.matmul(x2)
hop2_feature = hop2.matmul(x2)
hop3_feature = hop3.matmul(x2)
hop4_feature = hop4.matmul(x2)
hop5_feature = hop5.matmul(x2)
hop6_feature = hop6.matmul(x2)
hop1 = hop1.coalesce().indices().tolist()
hop2 = hop2.coalesce().indices().tolist()
hop3 = hop3.coalesce().indices().tolist()
hop4 = hop4.coalesce().indices().tolist()
hop5 = hop5.coalesce().indices().tolist()
hop6 = hop6.coalesce().indices().tolist()
t.hop1 = hop1
t.hop2 = hop2
t.hop3 = hop3
t.hop4 = hop4
t.hop5 = hop5
t.hop6 = hop6
t.hop1_feature = hop1_feature
t.hop2_feature = hop2_feature
t.hop3_feature = hop3_feature
t.hop4_feature = hop4_feature
t.hop5_feature = hop5_feature
t.hop6_feature = hop6_feature
return t
if __name__=='__main__':
pass
# edges = torch.tensor([[0, 1, 0, 2, 1, 3, 2, 3], [1, 0, 2, 0, 3, 1, 3, 2]]).long()
# data_model = EdgeIndex_Processor(edges)
# q,j = data_model.run()
# print (q[0])
# print (j)
# s = Synthetic_Dataset(root='data/pyg_TRIANGLE_EX/test')
# for d in s:
# if max(d.y)>1:
# print (d.y)
|
tianyao-aka/Expresive_K_hop_GNNs
|
QM9/func_util_V2.py
|
func_util_V2.py
|
py
| 6,416 |
python
|
en
|
code
| 2 |
github-code
|
6
|
71811415868
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""code_info
@Time : 2020 2020/7/13 15:53
@Author : Blanc
@File : selenium_test.py
"""
from selenium import webdriver
browser = webdriver.Chrome()
browser.get('https://space.bilibili.com/1')
name=browser.find_element_by_id('h-name')
print(name.text)
browser.close()
|
Flynn-Lu/PythonCode
|
2020python实训/Day11/selenium_test.py
|
selenium_test.py
|
py
| 331 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5722675237
|
import os
import struct
from lxml import etree
import datetime
# | Character | Byte order | Size | Alignment |
# | --------- | ---------------------- | -------- | --------- |
# | @ | native | native | native |
# | = | native | standard | none |
# | < | little-endian | standard | none | <
# | > | big-endian | standard | none | >
# | ! | network (= big-endian) | standard | none |
# | Format | C Type | Python type | Standard size | Notes |
# | ------ | ------------------ | ----------------- | ------------- | -------- |
# | x | pad byte | no value | | |
# | c | char | bytes of length 1 | 1 | |
# | b | signed char | integer | 1 | (1), (2) |
# | ? | _Bool | bool | 1 | (1) |
# | h | short | integer | 2 | (2) |
# | H | unsigned short | integer | 2 | (2) |
# | i | int | integer | 4 | (2) |
# | I | unsigned int | integer | 4 | (2) |
# | l | long | integer | 4 | (2) |
# | L | unsigned long | integer | 4 | (2) |
# | q | long long | integer | 8 | (2) |
# | Q | unsigned long long | integer | 8 | (2) |
# | n | ssize_t | integer | | (3) |
# | N | size_t | integer | | (3) |
# | e | (6) | float | 2 | (4) |
# | f | float | float | 4 | (4) |
# | d | double | float | 8 | (4) |
# | s | char[] | bytes | | |
# | p | char[] | bytes | | |
# | P | void* | integer | | (5) |
class CustomFuncs:
@staticmethod
def systemtime_16_le(bytes16):
"""
typedef struct _SYSTEMTIME {
WORD wYear;
WORD wMonth;
WORD wDayOfWeek;
WORD wDay;
WORD wHour;
WORD wMinute;
WORD wSecond;
WORD wMilliseconds;
} SYSTEMTIME, *PSYSTEMTIME, *LPSYSTEMTIME;
"""
n = struct.unpack('<8H', bytes16)
d = datetime.datetime(n[0], n[1], n[3], n[4], n[5], n[6], n[7] * 1000)
return d.isoformat()
@staticmethod
def hex_str(bytes0):
""" convert unknown length of bytes to hex string. """
return bytes0.hex()
class ByteSnipper:
def __init__(self, fp_bin):
self.f = open(fp_bin, 'rb')
def get_bytes(self, start_offset, byte_size):
self.f.seek(start_offset, 0)
return self.f.read(byte_size)
class TreeFuncs:
@staticmethod
def get_tree(fp):
if os.path.isfile(fp):
with open(fp, 'rb') as f:
try:
tree = etree.parse(f)
except Exception as e:
print(f'Error: Failed to open the input XML file! fp={fp}')
print(e)
quit()
else:
return tree
else:
print(f'fp="{fp}" is not a file!')
quit()
@staticmethod
def write_tree(fp, tree):
with open(fp, 'wb') as f:
tree.write(f, encoding='utf-8', xml_declaration=True)
def parse_to_xml(fp_bin, fp_xml):
# parse binary data
byte_snipper = ByteSnipper(fp_bin)
# parse xml settings
tree_root = TreeFuncs.get_tree(fp_xml)
# loop through <Pattern> element
for pattern in tree_root.xpath('/Patterns/Pattern'):
data_result = "Not Set Error"
# get start offset in integer
try:
start_offset_int = int(pattern.get('start_offset'), 0)
except Exception as e:
data_result = f"{e.__class__.__name__}: start_offset"
else:
# Unpack Format -------------------
if pattern.get('unpack_format') is not None:
data_format = pattern.get('unpack_format')
print(f'data_format={data_format}')
# Validate data length
try:
data_length = struct.calcsize(data_format)
except Exception as e:
data_result = f'{e.__class__.__name__}: data_length'
else:
data_bytes = byte_snipper.get_bytes(start_offset_int, data_length)
# if unpack_index is not specified, return tuple.
if pattern.get('unpack_index') is None:
data_result = str(struct.unpack(data_format, data_bytes))
else:
# Validate unpack index type
try:
unpack_index = int(pattern.get('unpack_index'))
except Exception as e:
data_result = f'{e.__class__.__name__}: unpack_index'
else:
# Validate unpack index range
try:
data_result = str(struct.unpack(data_format, data_bytes)[unpack_index])
except Exception as e:
data_result = f"{e.__class__.__qualname__}: unpack_index"
# Code Page -----------------------
elif pattern.get('code_page') is not None:
decode_error = pattern.get('decode_error') if pattern.get('decode_error') is not None else 'replace'
data_length = int(pattern.get('length'), 0)
data_bytes = byte_snipper.get_bytes(start_offset_int, data_length)
data_result = data_bytes.decode(pattern.get('code_page'), decode_error).rstrip(' \0\r\n\t')
# Function -------------------------
elif pattern.get('function') is not None:
data_length = int(pattern.get('length'), 0)
custom_fnc = getattr(CustomFuncs, pattern.get('function'))
data_bytes = byte_snipper.get_bytes(start_offset_int, data_length)
data_result = custom_fnc(data_bytes)
# Nested -----------------------
elif pattern.get('nested') is not None:
pass
# set XML element value
finally:
pattern.text = data_result
return tree_root
def test_from_cmd():
fp_data = 'bintoxml_data.dat'
fp_xml_in = 'bintoxml_input.xml'
fp_xml_out = 'bintoxml_output.xml'
tree_root = parse_to_xml(fp_data, fp_xml_in)
TreeFuncs.write_tree(fp_xml_out, tree_root)
if __name__ == "__main__":
test_from_cmd()
|
HappyKimoto/BinaryToXml
|
bintoxml.py
|
bintoxml.py
|
py
| 7,191 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35684957719
|
# Given the head of a LinkedList and two positions ‘p’ and ‘q’, reverse the LinkedList from position ‘p’ to ‘q’.
class Node:
def __init__(self, v, n=None):
self.value = v
self.next = n
def print_ll(self):
while self is not None:
print(self.value)
self = self.next
def reverse_sub_ll(head, p, q):
p_temp = head
q_temp = head
temp = head
while temp is not None:
if temp.next == p:
p_temp = temp
if temp == q:
q_temp = temp.next
temp = temp.next
previous = q_temp
current = p
next = None
while next is not q_temp:
next = current.next
current.next = previous
previous = current
current = next
p_temp.next = previous
def main():
head = Node(1)
head.next = Node(2)
head.next.next = Node(3)
head.next.next.next = Node(4)
head.next.next.next.next = Node(5)
head.print_ll()
reverse_sub_ll(head, head.next, head.next.next.next)
print("-- After Reversing --")
head.print_ll()
pass
if __name__ == '__main__':
main()
|
hitesh-goel/ds-algo
|
grokking-tci/6_reverse_ll/reverse_sub_ll.py
|
reverse_sub_ll.py
|
py
| 1,144 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11726198344
|
import sys
import codecs
import os
import numpy as np
import torch
from torch.autograd import Variable
from .constants import MAX_CHAR_LENGTH, NUM_CHAR_PAD, PAD_CHAR, PAD_POS, PAD_TYPE, ROOT_CHAR, ROOT_POS, ROOT_TYPE, END_CHAR, END_POS, END_TYPE, _START_VOCAB, ROOT, PAD_ID_WORD, PAD_ID_CHAR, PAD_ID_TAG, DIGIT_RE
from .conllu_reader import CoNLLReader
from .dictionary import Dictionary
def init_seed(seed):
np.random.seed(seed)
torch.manual_seed(seed)
def create_dict(train_path, dev_path, test_path, word_embed_dict, dry_run):
word_dictionary = Dictionary('word', default_value=True, singleton=True)
char_dictionary = Dictionary('character', default_value=True)
pos_dictionary = Dictionary('pos', default_value=True)
type_dictionary = Dictionary('type', default_value=True)
xpos_dictionary = Dictionary('xpos', default_value=True)
char_dictionary.add(PAD_CHAR)
pos_dictionary.add(PAD_POS)
xpos_dictionary.add(PAD_POS)
type_dictionary.add(PAD_TYPE)
char_dictionary.add(ROOT_CHAR)
pos_dictionary.add(ROOT_POS)
xpos_dictionary.add(ROOT_POS)
type_dictionary.add(ROOT_TYPE)
char_dictionary.add(END_CHAR)
pos_dictionary.add(END_POS)
xpos_dictionary.add(END_POS)
type_dictionary.add(END_TYPE)
vocab = dict()
with codecs.open(train_path, 'r', 'utf-8', errors='ignore') as file:
li = 0
for line in file:
line = line.strip()
if len(line) == 0 or line[0]=='#':
continue
tokens = line.split('\t')
if '-' in tokens[0] or '.' in tokens[0]:
continue
for char in tokens[1]:
char_dictionary.add(char)
word = DIGIT_RE.sub(b"0", str.encode(tokens[1])).decode()
pos = tokens[3] if tokens[4]=='_' else tokens[3]+'$$$'+tokens[4]
xpos = tokens[4]
typ = tokens[7]
pos_dictionary.add(pos)
xpos_dictionary.add(xpos)
type_dictionary.add(typ)
if word in vocab:
vocab[word] += 1
else:
vocab[word] = 1
li = li + 1
if dry_run and li == 100:
break
# collect singletons
min_occurence = 1
singletons = set([word for word, count in vocab.items() if count <= min_occurence])
# if a singleton is in pretrained embedding dict, set the count to min_occur + c
for word in vocab.keys():
if word in word_embed_dict or word.lower() in word_embed_dict:
vocab[word] += 1
vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)
vocab_list = [word for word in vocab_list if word in _START_VOCAB or vocab[word] > min_occurence]
max_vocabulary_size = 50000
if len(vocab_list) > max_vocabulary_size:
vocab_list = vocab_list[:max_vocabulary_size]
def expand_vocab(data_paths):
vocab_set = set(vocab_list)
for data_path in data_paths:
if os.path.exists(data_path):
with codecs.open(data_path, 'r', 'utf-8', errors='ignore') as file:
li = 0
for line in file:
line = line.strip()
if len(line) == 0 or line[0]=='#':
continue
tokens = line.split('\t')
if '-' in tokens[0] or '.' in tokens[0]:
continue
for char in tokens[1]:
char_dictionary.add(char)
word = DIGIT_RE.sub(b"0", str.encode(tokens[1])).decode()
pos = tokens[3] if tokens[4]=='_' else tokens[3]+'$$$'+tokens[4]
typ = tokens[7]
xpos = tokens[4]
pos_dictionary.add(pos)
type_dictionary.add(typ)
xpos_dictionary.add(xpos)
if word not in vocab_set and (word in word_embed_dict or word.lower() in word_embed_dict):
vocab_set.add(word)
vocab_list.append(word)
li = li + 1
if dry_run and li==100:
break
expand_vocab([dev_path, test_path])
for word in vocab_list:
word_dictionary.add(word)
if word in singletons:
word_dictionary.add_singleton(word_dictionary.get_index(word))
word_dictionary.close()
char_dictionary.close()
pos_dictionary.close()
xpos_dictionary.close()
type_dictionary.close()
return word_dictionary, char_dictionary, pos_dictionary, xpos_dictionary, type_dictionary
def read_data(source_path, word_dictionary, char_dictionary, pos_dictionary, xpos_dictionary, type_dictionary, bptt, max_size=None, normalize_digits=True, symbolic_root=False, symbolic_end=False, dry_run=False):
max_char_length = 0
print('Reading data from %s' % source_path)
counter = 0
reader = CoNLLReader(source_path, word_dictionary, char_dictionary, pos_dictionary, type_dictionary, xpos_dictionary, None)
inst = reader.getNext(normalize_digits=normalize_digits, symbolic_root=symbolic_root, symbolic_end=symbolic_end)
data = []
while inst is not None and (not dry_run or counter < 100):
inst_size = inst.length()
sent = inst.sentence
if len(sent.words) > bptt:
# generate seqeuences
num_sequences = len(sent.words) - bptt
for seq_no in range(num_sequences):
word_ids, char_id_seqs, pos_ids, xpos_ids, tar_ids = [], [], [], [], []
for i in range(bptt):
word_ids.append(sent.word_ids[seq_no+i])
tar_ids.append(sent.word_ids[seq_no+i+1])
char_id_seqs.append(sent.char_id_seqs[seq_no+i])
pos_ids.append(inst.pos_ids[seq_no+i])
xpos_ids.append(inst.xpos_ids[seq_no+i])
data.append([word_ids, char_id_seqs, pos_ids, tar_ids, xpos_ids])
max_len = max([len(char_seq) for char_seq in sent.char_seqs])
max_char_length = max(max_len, max_char_length)
inst = reader.getNext(normalize_digits=normalize_digits, symbolic_root=symbolic_root, symbolic_end=symbolic_end)
counter += 1
reader.close()
return data, max_char_length
def read_data_to_variable(source_path, word_dictionary, char_dictionary, pos_dictionary, xpos_dictionary, type_dictionary, bptt, max_size=None, normalize_digits=True, symbolic_root=False, symbolic_end=False, use_gpu=False, volatile=False, dry_run=False):
data, max_char_length = read_data(source_path, word_dictionary, char_dictionary, pos_dictionary, xpos_dictionary, type_dictionary, bptt, max_size=max_size, normalize_digits=normalize_digits, symbolic_root=symbolic_root, symbolic_end=symbolic_end, dry_run=dry_run)
wid_inputs = np.empty([len(data), bptt], dtype=np.int64)
cid_inputs = np.empty([len(data), bptt, max_char_length], dtype=np.int64)
pid_inputs = np.empty([len(data), bptt], dtype=np.int64)
xpid_inputs = np.empty([len(data), bptt], dtype=np.int64)
wid_outputs = np.empty([len(data), bptt], dtype=np.int64)
for di in range(len(data)):
word_ids, char_id_seqs, pos_ids, tar_wid, xpos_ids = data[di]
wid_inputs[di, :] = word_ids
for c, cids in enumerate(char_id_seqs):
cid_inputs[di, c, :len(cids)] = cids
cid_inputs[di, c, len(cids):] = PAD_ID_CHAR
pid_inputs[di, :] = pos_ids
xpid_inputs[di, :] = xpos_ids
wid_outputs[di, :] = tar_wid
words = Variable(torch.from_numpy(wid_inputs), requires_grad=False)
chars = Variable(torch.from_numpy(cid_inputs), requires_grad=False)
poss = Variable(torch.from_numpy(pid_inputs), requires_grad=False)
xposs = Variable(torch.from_numpy(xpid_inputs), requires_grad=False)
targets = Variable(torch.from_numpy(wid_outputs), requires_grad=False)
if use_gpu:
words = words.cuda()
chars = chars.cuda()
poss = poss.cuda()
targets = targets.cuda()
xposs = xposs.cuda()
return words, chars, poss, targets, xposs
def get_batch_variable(data, batch_size):
words, chars, poss, targets, xposs = data
index = torch.randperm(words.size(0)).long()[:batch_size]
if words.is_cuda:
index = index.cuda()
return words[index], chars[index], poss[index], targets[index], xposs[index]
def iterate_batch_variable(data, batch_size):
words, chars, poss, targets, xposs = data
index = torch.arange(0, words.size(0), dtype=torch.long)
if words.is_cuda:
index = index.cuda()
num_batches = words.size(0) // batch_size
for bi in range(num_batches):
idx = index[bi * batch_size: (bi+1)*batch_size]
yield words[idx], chars[idx], poss[idx], targets[idx], xposs[idx]
|
ganeshjawahar/ELMoLex
|
dat/nlm_data.py
|
nlm_data.py
|
py
| 8,163 |
python
|
en
|
code
| 12 |
github-code
|
6
|
4488441296
|
"""
"""
import argparse
import copy
import functools
import itertools
# import operator
import os
from pathlib import Path
import re
import galsim
import joblib
import metadetect
import ngmix
import numpy as np
import pyarrow as pa
import pyarrow.compute as pc
import pyarrow.dataset as ds
import pyarrow.parquet as pq
import yaml
from chromatic_shear_bias.generators import generators
@functools.cache
def read_sed_file(file_name, wave_type, flux_type):
return galsim.sed.SED(file_name, wave_type, flux_type)
def build_star(star_params, sed_dir):
_standard_dict = {
"lte*": "starSED/phoSimMLT",
"bergeron*": "starSED/wDs",
"k[mp]*": "starSED/kurucz",
}
wave_type = "Nm"
flux_type = "flambda"
sed_filename = star_params.get("sedFilename").strip()
if not sed_filename.endswith(".gz"):
# Some files are missing ".gz" in their suffix; if this is the case,
# append to the current suffix
sed_filename += ".gz"
path_name = Path(sed_filename)
for k, v in _standard_dict.items():
matched = False
if path_name.match(k):
sed_path = Path(sed_dir) / v / path_name
matched = True
break # we should only have one match
if not matched:
raise ValueError(
f"Filename {sed_filename} does not match any known patterns in {sed_dir}"
)
if not sed_path.exists():
raise ValueError(f"Filename {sed_filename} not found in {sed_dir}")
sed_file = sed_path.as_posix()
sed = read_sed_file(sed_file, wave_type, flux_type)
sed = sed.withFluxDensity(1, wavelength=600)
# print(f"\tBuilding star took {end - start} s")
return galsim.DeltaFunction() * sed
def DC2_generator(predicate=None, seed=None):
dataset = "/oak/stanford/orgs/kipac/users/smau/dc2_stellar_healpixel_parquet"
columns = [
"^sedFilename$",
]
sed_dir = "/oak/stanford/orgs/kipac/users/smau/"
batch_generator = generators.generate_batches(dataset, columns=columns, predicate=predicate)
for batch in batch_generator:
row_generator = generators.generate_rows(batch, n_sample=batch.num_rows, seed=seed)
for row in row_generator:
built = build_star(row, sed_dir)
yield built
|
LSSTDESC/chromatic-shear-bias
|
chromatic_shear_bias/generators/stars.py
|
stars.py
|
py
| 2,290 |
python
|
en
|
code
| 4 |
github-code
|
6
|
36712615798
|
from .mail import on_warning_last_data_upd
import threading
from datetime import datetime
class SensorDataSignals:
def __init__(self):
self.date = datetime.now()
self.timer = threading.Timer(10, on_warning_last_data_upd(datetime.now()))
def time_warning(self, sender, **kwargs):
if self.timer is not None:
self.timer.cancel()
self.date = datetime.now()
self.timer = threading.Timer(10, on_warning_last_data_upd(self.date))
self.timer.start()
|
novelsk/AtlasDjango
|
app/atlas/signals.py
|
signals.py
|
py
| 513 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31106755329
|
'''
Given a binary tree t and an integer s, determine whether there is a root to leaf path in t such that the sum of vertex values equals s.
Example
For
t = {
"value": 4,
"left": {
"value": 1,
"left": {
"value": -2,
"left": null,
"right": {
"value": 3,
"left": null,
"right": null
}
},
"right": null
},
"right": {
"value": 3,
"left": {
"value": 1,
"left": null,
"right": null
},
"right": {
"value": 2,
"left": {
"value": -2,
"left": null,
"right": null
},
"right": {
"value": -3,
"left": null,
"right": null
}
}
}
}
and
s = 7,
the output should be hasPathWithGivenSum(t, s) = true.
'''
class Tree(object):
def __init__(self, x):
self.value = x
self.left = None
self.right = None
def hasPathWithGivenSum(t, s):
if t is None:
return s == 0
elif t.left is None and t.right is not None:
return hasPathWithGivenSum(t.right,s-t.value)
elif t.right is None and t.left is not None:
return hasPathWithGivenSum(t.left,s-t.value)
else:
return hasPathWithGivenSum(t.left,s-t.value) or hasPathWithGivenSum(t.right,s-t.value)
|
JorG96/DataStructures
|
hasPathWithGivenSum.py
|
hasPathWithGivenSum.py
|
py
| 1,521 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43140645221
|
"""``atomicmass`` - Return the atomic mass of an atom or molecule.
This is really just a wrapper for
`periodictable
<https://periodictable.readthedocs.io/en/latest/index.html>`_
but returns the mass as an `astropy quantity
<http://docs.astropy.org/en/stable/units/index.html>`_.
"""
import periodictable as pt
import astropy.units as u
def atomicmass(species):
r"""Return the atomic mass of an atom or molecule.
**Parameters**
species
Chemical formula requested species. See `periodictable
<https://periodictable.readthedocs.io/en/latest/index.html>`_
for formatting options.
**Returns**
The atomicmass of *species* as an astropy quantity with units = AMU
:math:`(1\, \mathrm{AMU} = 1.660539 \times 10^{-27}\, \mathrm{kg})`.
If ``periodictable`` returns a ValueError, *None* is returned.
**Examples**
::
>>> from nexoclom.atomicdata import atomicmass
>>> print(atomicmass('Na'))
22.98977 u
>>> print(atomicmass('H2O'))
18.01528 u
>>> print(atomicmass('X'))
WARNING: mathMB.atomicmass: X not found
None
"""
el = [e.symbol for e in pt.elements]
if species in el:
atom = eval('pt.' + species)
mass = atom.mass * u.u
else:
try:
mass = pt.formula(species).mass * u.u
except ValueError:
print(f'WARNING: mathMB.atomicmass: {species} not found')
mass = None
return mass
|
mburger-stsci/nexoclom
|
nexoclom/atomicdata/atomicmass.py
|
atomicmass.py
|
py
| 1,498 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2223517855
|
from math import cos, sin, radians
class Day12():
def __init__(self, input, target):
instructions = input.strip()
self.instructions = [[x[0], int(x[1:])]
for x in instructions.split('\n')]
self.direction = [1, 0]
self.location = [0, 0]
self.waypoint = [10, 1]
self.target = target
def run_instructions(self):
for instruction in self.instructions:
self.move(instruction)
def move(self, instruction):
movement = [0, 0]
if instruction[0] == 'N':
movement = [0, instruction[1]]
elif instruction[0] == 'S':
movement = [0, -instruction[1]]
elif instruction[0] == 'E':
movement = [instruction[1], 0]
elif instruction[0] == 'W':
movement = [-instruction[1], 0]
elif instruction[0] == 'L':
self.rotate(instruction)
elif instruction[0] == 'R':
self.rotate(instruction)
elif instruction[0] == 'F':
if self.target == 'ship':
movement = [x * instruction[1] for x in self.direction]
elif self.target == 'waypoint':
movement = [x * instruction[1] for x in self.waypoint]
self.location = [sum(x) for x in zip(self.location, movement)]
movement = [0, 0]
if self.target == 'ship':
self.location = [sum(x) for x in zip(self.location, movement)]
if self.target == 'waypoint':
self.waypoint = [sum(x) for x in zip(self.waypoint, movement)]
print(instruction)
print(self.location, self.waypoint, self.direction)
# print(instruction, self.direction, self.waypoint, self.location)
# def move_ship(self, instruction):
# movement = [0, 0]
# if instruction[0] == 'N':
# movement = [0, instruction[1]]
# elif instruction[0] == 'S':
# movement = [0, -instruction[1]]
# elif instruction[0] == 'E':
# movement = [instruction[1], 0]
# elif instruction[0] == 'W':
# movement = [-instruction[1], 0]
# elif instruction[0] == 'L':
# self.rotate(instruction)
# elif instruction[0] == 'R':
# self.rotate(instruction)
# elif instruction[0] == 'F':
# movement = [x * instruction[1] for x in self.direction]
# self.location = [sum(x) for x in zip(self.location, movement)]
# print(instruction, self.direction, self.location)
# def move_waypoint(self, instruction):
# movement = [0, 0]
# if instruction[0] == 'N':
# movement = [0, instruction[1]]
# elif instruction[0] == 'S':
# movement = [0, -instruction[1]]
# elif instruction[0] == 'E':
# movement = [instruction[1], 0]
# elif instruction[0] == 'W':
# movement = [-instruction[1], 0]
# elif instruction[0] == 'L':
# self.rotate(instruction)
# elif instruction[0] == 'R':
# self.rotate(instruction)
# elif instruction[0] == 'F':
# movement = [x * instruction[1] for x in self.direction]
def get_distance(self):
return sum([abs(x) for x in self.location])
def rotate(self, instruction):
if self.target == 'ship':
target_direction = self.direction
elif self.target == 'waypoint':
target_direction = self.waypoint
if instruction[0] == 'R':
rotated = [
target_direction[0] * cos(radians(instruction[1]))
+ target_direction[1] * sin(radians(instruction[1])),
target_direction[0] * -sin(radians(instruction[1]))
+ target_direction[1] * cos(radians(instruction[1]))
]
else:
rotated = [
target_direction[0] * cos(radians(instruction[1]))
+ target_direction[1] * -sin(radians(instruction[1])),
target_direction[0] * sin(radians(instruction[1]))
+ target_direction[1] * cos(radians(instruction[1]))
]
if self.target == 'ship':
self.direction = [round(x) for x in rotated]
elif self.target == 'waypoint':
self.waypoint = [round(x) for x in rotated]
test_input = '''F10
N3
F7
R90
F11'''
test_part_1 = Day12(test_input, 'ship')
test_part_1.run_instructions()
print(test_part_1.get_distance())
test_part_2 = Day12(test_input, 'waypoint')
test_part_2.run_instructions()
print(test_part_2.get_distance())
with open('input/day_12.txt', 'r') as f:
input = f.read()
actual_part_1 = Day12(input, 'ship')
actual_part_1.run_instructions()
print(actual_part_1.get_distance())
actual_part_2 = Day12(input, 'waypoint')
actual_part_2.run_instructions()
print(actual_part_2.get_distance())
|
thekakkun/coding_challenges
|
advent_of_code/2020/day_12.py
|
day_12.py
|
py
| 4,925 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27959312759
|
import torch
# Define Net
class TestNet(torch.nn.Module):
def __init__(self):
super(TestNet, self).__init__()
def forward(self, x1, x2):
y1 = torch.add(x1, 10)
y2 = torch.add(x2, 5)
y3 = torch.add(y1, y2)
y4 = torch.add(y3, 10)
return y4
def sample1():
x1 = torch.tensor([[1,2,3],[4,5,6]])
x2 = torch.tensor([[10,20,20],[40,50,60]])
model = TestNet()
model.eval()
output = model(x1, x2)
my_script_module = torch.jit.script(model)
frozen_model = torch.jit.freeze(my_script_module)
print(frozen_model.graph)
torch.jit.save(frozen_model, "simple_jit_add.torchscript")
print("x1:{}".format(x1))
print("x2:{}".format(x2))
print("output:{}".format(output))
if __name__ == '__main__':
sample1()
|
SAITPublic/PimAiCompiler
|
examples/runtime/python/ir_net/simple_add.py
|
simple_add.py
|
py
| 805 |
python
|
en
|
code
| 2 |
github-code
|
6
|
33016130821
|
from flask import Flask, flash, json, request, redirect, Response, url_for
from flask_cors import CORS
app = Flask(__name__)
app.config['SESSION_TYPE'] = 'filesystem'
app.config.from_envvar('APP_SETTINGS')
CORS(app)
@app.route('/ping', methods=['GET'])
def ping():
response = app.response_class(
response='pong',
status=200,
mimetype='application/json'
)
return response
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
|
aaronjenkins/flask-api-template
|
api.py
|
api.py
|
py
| 478 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7074303331
|
#Note: 1)The detection works only on grayscale images. So it is important to convert the color image to grayscale.
# 2) detectMultiScale function is used to detect the faces.
# It takes 3 arguments — the input image, scaleFactor and minNeighbours. scaleFactor specifies how much the image size is reduced with each scale.
# minNeighbours specifies how many neighbors each candidate rectangle should have to retain it.
# 3) faces contains a list of coordinates for the rectangular regions where faces were found.
import numpy as np
import cv2
# Load the cascade
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_eye.xml')
smile_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_smile.xml')
font = cv2.FONT_HERSHEY_SIMPLEX
# Read the input image
img = cv2.imread('images/input/img.jpg')
# Convert into grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#Convert into hsvscale
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# Detect faces
# faces = face_cascade.detectMultiScale(
# gray,
# scaleFactor=1.1,
# minNeighbors=5,
# minSize=(200, 200),
# flags=cv2.CASCADE_SCALE_IMAGE
# )
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
#most common parameters of the detectMultiScale function
# scaleFactor : Parameter specifying how much the image size is reduced at each image scale.
# minNeighbors : Parameter specifying how many neighbors each candidate rectangle should have to retain it.
# minSize : Minimum possible object size. Objects smaller than that are ignored.
# maxSize : Maximum possible object size. Objects larger than that are ignored.
# Draw rectangle around the faces
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0), 3)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
cv2.putText(img,'Face',(x, y), font, 1,(255,0,0),2)
#eyes
eyes = eye_cascade.detectMultiScale(roi_gray)
#detect eyes and draw rectangle around it
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
cv2.putText(img,'Eye',(x + ex,y + ey), 1, 1, (0, 255, 0), 1)
#smile
smile = smile_cascade.detectMultiScale(
roi_gray,
scaleFactor= 1.16,
minNeighbors=35,
minSize=(25, 25),
flags=cv2.CASCADE_SCALE_IMAGE
)
#detect smile and draw rectangle around it
for (sx, sy, sw, sh) in smile:
cv2.rectangle(roi_color, (sh, sy), (sx+sw, sy+sh), (255, 0, 0), 2)
cv2.putText(img,'Smile',(x + sx,y + sy), 1, 1, (0, 255, 0), 1)
#Display Number of Faces
cv2.putText(img,'Number of Faces : ' + str(len(faces)),(40, 40), font, 1,(255,0,0),2)
#save the cropped faces
crop_face = img[y:y + h, x:x + w]
cv2.imwrite('images/output/' + str(w) + str(h) + '_faces.jpg', crop_face)
# Display the output
cv2.imshow('Original', img)
cv2.imshow('Detected Gray', gray)
cv2.imshow('Detected HSV', hsv)
k = cv2.waitKey(0)
if k == 27: # wait for ESC key to exit
cv2.destroyAllWindows()
elif k == ord('s'): # wait for 's' key to save and exit
cv2.imwrite('images/output/detected_image.jpg',img)
cv2.destroyAllWindows()
|
amanpanditap/Python_Projects
|
facedetection/facedetection-image.py
|
facedetection-image.py
|
py
| 3,295 |
python
|
en
|
code
| 3 |
github-code
|
6
|
9781885238
|
from egzP2atesty import runtests
def partition(tab,p,r,indeksy):
x=tab[indeksy[r]][1]
i=p-1
for j in range(p,r):
if tab[indeksy[j]][1]>=x:
i+=1
tab[indeksy[i]],tab[indeksy[j]]=tab[indeksy[j]],tab[indeksy[i]]
tab[indeksy[i+1]],tab[indeksy[r]]=tab[indeksy[r]],tab[indeksy[i+1]]
return i+1
def quickSort(tab,p,r,indeksy):
if p<r:
q=partition(tab,p,r,indeksy)
quickSort(tab,p,q-1,indeksy)
quickSort(tab,q+1,r,indeksy)
def zdjecie(T, m, k):
#tutaj proszę wpisać własną implementację
n=len(T)
indeksy=[0 for _ in range(n)]
starts=[0 for _ in range(m)]
ends=[0 for _ in range(m)]
items=0
width=m+k-1
currEnd=-1
for i in range(m):
starts[i]=currEnd+1
currEnd+=width
ends[i]=currEnd
width-=1
kolumna=0
rzad=0
while items<n:
if starts[rzad]+kolumna<=ends[rzad]:
indeksy[starts[rzad]+kolumna]=items
items+=1
rzad+=1
if rzad>=m:
rzad=0
kolumna+=1
quickSort(T, 0, len(T) - 1,indeksy)
return None
runtests ( zdjecie, all_tests=False )
|
wiksat/AlghorithmsAndDataStructures
|
ASD/BitAlgo-Summer/egzP2a/egzP2a.py
|
egzP2a.py
|
py
| 1,169 |
python
|
pl
|
code
| 0 |
github-code
|
6
|
23386753962
|
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.status import HTTP_404_NOT_FOUND
from scraper.models import PartsDetails
from scraper.serializers import PartsDetailsSerializer
# Create your views here.
@api_view(['GET'])
def company_parts(request, format=None):
try:
filter_params = {}
company_name = request.query_params.get('manufacturer')
if company_name:
filter_params['company_name'] = company_name
category_name = request.query_params.get('category')
if category_name:
filter_params['category_name'] = category_name
model_name = request.query_params.get('model')
if model_name:
filter_params['model_name'] = model_name
print(filter_params)
parts_details = PartsDetails.objects.filter(**filter_params).values(
'company_name', 'category_name', 'model_name', 'part_name')
print(parts_details)
if not parts_details:
return Response(
"No resource found, please check the query parameters "
"and values in URL!", status=HTTP_404_NOT_FOUND)
except PartsDetails.DoesNotExist:
print("error")
return Response(status=HTTP_404_NOT_FOUND)
if request.method == 'GET':
parts_details_serializer = PartsDetailsSerializer(parts_details,
many=True)
return Response(parts_details_serializer.data)
|
spsree4u/urparts_scraper
|
scraper/views.py
|
views.py
|
py
| 1,537 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37009080740
|
import os
import pathlib
import requests
from flask import Flask, session, abort, redirect, request, render_template, make_response
from google.oauth2 import id_token
from google_auth_oauthlib.flow import Flow
from pip._vendor import cachecontrol
import google.auth.transport.requests
from static.py.chat import socketio
from flask_sqlalchemy import SQLAlchemy
from static.py.models import User, db
import uuid
from static.py.user_repository import _user_repo as users, create_username
from static.py.PassHandler import PassHandler
app = Flask(__name__)
app.secret_key = "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"
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.abspath('static/db/users.db')
# db = SQLAlchemy(app)
socketio.init_app(app)
db.init_app(app)
with app.app_context():
db.create_all()
# db.drop_all()
# db.session.commit()
pass_handler = PassHandler()
# Google Login Fuctionlity
os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1"
GOOGLE_CLIENT_ID = "301822394319-o8gridp2md6qcpc0uk0clkug0puecbio.apps.googleusercontent.com"
client_secrets_file = os.path.join(pathlib.Path(__file__).parent, "client_secret.json")
flow = Flow.from_client_secrets_file(
client_secrets_file=client_secrets_file,
scopes=["https://www.googleapis.com/auth/userinfo.profile", "https://www.googleapis.com/auth/userinfo.email",
"openid"],
redirect_uri="http://127.0.0.1:5000/callback"
)
def login_is_required(function):
def wrapper(*args, **kwargs):
if "google" in session and "google_id" not in session:
return abort(401) # Authorization required
elif "email" in session and "name" not in session:
return abort(401)
else:
return function()
return wrapper
@app.route("/login")
def login():
authorization_url, state = flow.authorization_url()
session["state"] = state
return redirect(authorization_url)
@app.route("/callback")
def callback():
flow.fetch_token(authorization_response=request.url)
if not session["state"] == request.args["state"]:
abort(500) # State does not match!
credentials = flow.credentials
request_session = requests.session()
cached_session = cachecontrol.CacheControl(request_session)
token_request = google.auth.transport.requests.Request(session=cached_session)
id_info = id_token.verify_oauth2_token(
id_token=credentials._id_token,
request=token_request,
audience=GOOGLE_CLIENT_ID,
clock_skew_in_seconds=10
)
session["login_type"] = "google"
session["google_id"] = id_info.get("sub")
session["name"] = id_info.get("name")
session["given_name"] = id_info.get("given_name")
session["email"] = id_info.get("email")
session["profile_picture"] = id_info.get("picture")
session["family_name"] = id_info.get("family_name") if id_info.get("family_name") != None else ""
if users.get_user_by_email(session["email"]) is None:
username = create_username(session["given_name"], session["family_name"])
user = users.create_user(session["given_name"], session["family_name"], session["email"], username, str(uuid.uuid4()))
session["username"] = user.username
return redirect("/home")
else:
user = users.get_user_by_email(session["email"])
session["username"] = user.username
return redirect("/home")
# Email Login Functionality
@app.route("/signup")
def signup():
return render_template('signup.html')
@app.route("/elogin")
def elogin():
return render_template('elogin.html')
@app.route("/loginuser", methods=['POST'])
def loginuser():
user = users.get_user_by_username(request.form['username'])
if user is None:
# print("User not found")
return render_template('elogin.html', error="User not found")
if pass_handler.verify_password(request.form['password'], user.password) is False:
# print("Incorrect password")
return render_template('elogin.html', error="Incorrect password")
print(user.username)
session["username"] = user.username
session["name"] = user.username
session["given_name"] = user.first_name
# print(user.first_name)
session["email"] = user.email
session["profile_picture"] = "/static/images/userAccount.jpg"
return redirect('/home')
@app.route("/setuser", methods=['POST'])
def setuser():
# print(request.form['username'])
user = users.get_user_by_username(request.form['username'])
# print(user)
if user is not None:
return render_template('signup.html', error="Username already exists")
elif users.get_user_by_email(request.form['email']) is not None:
return render_template('signup.html', error="Email already exists")
elif request.form['password'] != request.form['confirm_password']:
return render_template('signup.html', error="Passwords do not match")
else:
user = users.create_user(request.form['fname'], request.form['lname'], request.form['email'], request.form['username'], pass_handler.hash_password(request.form['password']))
session["login_type"] = "email"
session["name"] = user.username
session["given_name"] = user.first_name
session["email"] = user.email
session["profile_picture"] = "/static/images/userAccount.jpg"
return redirect('/home')
@app.route("/spectate")
def spectate():
return render_template("spectate.html")
@app.route("/logout")
def logout():
session.clear()
return redirect("/")
@app.route("/inbox")
def inbox():
return render_template("inbox.html")
@app.route("/profile")
def profile():
global users
user = users.get_user_by_username(session.get("username"))
print(session.get("username"))
# if user.get_wins(session.get("username")) is None:
# users.add_win(session.get("username"))
# users.add_loss(session.get("username"))
# # wins = 0
# # losses = 0
# else:
# wins = users.get_wins(session.get("username"))
# losses = users.get_losses(session.get("username"))
user_info = {
"name": session.get("given_name"),
"full_name": session.get("name"),
"email": session.get("email"),
"profile_picture": session.get("profile_picture"),
"wins": users.get_wins(session.get("username")),
"losses": users.get_losses(session.get("username")),
}
return render_template("profile.html", user_info=user_info)
@app.route("/host")
def host():
return render_template("host.html")
@app.route("/join")
def join():
return render_template("join.html")
@app.route("/game")
def game():
lobby_name = request.args['lobby']
# spectate = request.args['spectate']
user_info = {
"name": session.get("given_name"),
"full_name": session.get("name"),
"email": session.get("email"),
"profile_picture": session.get("profile_picture"),
"wins": users.get_wins(session.get("username")),
"losses": users.get_losses(session.get("username")),
}
user_info["name"] = session.get("given_name")
user_info["profile_picture"] = "static/images/userAccount.jpg"
# print(user_info)
return render_template("game.html", user_info=user_info, lobby_name=lobby_name)
@app.route("/")
def index():
if session.get("name") is not None:
return redirect("/home")
return render_template("index.html")
@app.route("/home")
@login_is_required
def home():
user_name = session.get("given_name")
return render_template("home.html", user_name=user_name)
@app.route("/1player")
def onePlayer():
return render_template("player1.html")
@app.route("/leaderboard")
def leaderboard():
global users
top_users = users.get_top_users(5)
# for user in top_users:
# print(user.username, user.elo)
return render_template("leaderboard.html", top_users=top_users, length=len(top_users))
@app.route("/settings")
def settings():
return render_template("settings.html")
if __name__ == "__main__":
socketio.run(app, debug=True, allow_unsafe_werkzeug=True)
|
SeanDaBlack/checkmasters
|
app.py
|
app.py
|
py
| 8,145 |
python
|
en
|
code
| 0 |
github-code
|
6
|
86625733247
|
#! /usr/bin/env python
import os
import sys
import time
import numpy as np
from multiprocess import Pool
sys.path.append(os.path.join(os.environ['REPO_DIR'], 'utilities'))
from utilities2015 import *
from metadata import *
from data_manager import *
from learning_utilities import *
###################################
import json
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='Apply classifiers')
parser.add_argument("stack", type=str, help="stack")
parser.add_argument("filenames", type=str, help="Filenames")
parser.add_argument("classifier_id", type=int, help="classifier id")
args = parser.parse_args()
stack = args.stack
filenames = json.loads(args.filenames)
classifier_id = args.classifier_id
classifier_properties = classifier_settings.loc[classifier_id]
input_img_version = classifier_properties['input_img_version']
cnn_model = dataset_settings.loc[int(classifier_settings.loc[classifier_id]['train_set_id'].split('/')[0])]['network_model']
svm_id = int(classifier_properties['svm_id'])
############################
# if classifier_id == 12:
# available_classifiers = {2: DataManager.load_classifiers(classifier_id=2),
# 10: DataManager.load_classifiers(classifier_id=10)}
# else:
available_classifiers = {svm_id: DataManager.load_classifiers(classifier_id=svm_id)}
def clf_predict(stack, fn, model_name):
if is_invalid(stack=stack, fn=fn):
return
try:
features = DataManager.load_dnn_features(stack=stack, model_name=model_name, fn=fn, input_img_version=input_img_version)
except Exception as e:
sys.stderr.write('%s\n' % e.message)
return
# actual_setting = resolve_actual_setting(setting=classifier_id, stack=stack, fn=fn)
# clf_allClasses_ = available_classifiers[actual_setting]
clf_allClasses_ = available_classifiers[svm_id]
for structure, clf in clf_allClasses_.iteritems():
probs = clf.predict_proba(features)[:, clf.classes_.tolist().index(1.)]
# output_fn = DataManager.get_sparse_scores_filepath(stack=stack, structure=structure,
# classifier_id=actual_setting, fn=fn)
output_fn = DataManager.get_sparse_scores_filepath(stack=stack, structure=structure,
classifier_id=classifier_id, fn=fn)
create_parent_dir_if_not_exists(output_fn)
bp.pack_ndarray_file(probs, output_fn)
upload_to_s3(output_fn)
t = time.time()
pool = Pool(NUM_CORES/2)
pool.map(lambda fn: clf_predict(stack=stack, fn=fn, model_name=cnn_model), filenames)
pool.close()
pool.join()
sys.stderr.write('Classifier predict: %.2f\n' % (time.time()-t))
|
mistycheney/MouseBrainAtlas
|
deprecated/learning/apply_classifiers_v4.py
|
apply_classifiers_v4.py
|
py
| 2,806 |
python
|
en
|
code
| 3 |
github-code
|
6
|
71862936509
|
MEM_SIZE = 100
reg = {'a': 0, 'b': 0, 'c': 0, 'd': 0, 'e': 0, 'f': 0, 'sp': 0, 'acc': 0, 'pc': 0, 'ivec': 0, 'int': 0, 'timer': 0,
'halt': False}
memory = [0] * MEM_SIZE
# move the values
def mov(opr):
reg[opr[0]] = reg[opr[1]]
reg['pc'] = reg['pc'] + 1
# memory list include lists one by one > reg dict keys to add the values
def movv(opr):
reg[opr[0]] = int(opr[1])
reg['pc'] = reg['pc'] + 1
# addition
def add(opr):
reg['acc'] = reg[opr[0]] + reg[opr[1]]
reg['pc'] = reg['pc'] + 1
# substraction
def sub(opr):
reg['acc'] = reg[opr[0]] - reg[opr[1]]
reg['pc'] = reg['pc'] + 1
# modules
def mod(opr):
reg['acc'] = reg[opr[0]] % reg[opr[1]]
reg['pc'] = reg['pc'] + 1
#get the output
def out(opr):
print(reg[opr[0]])
reg['pc'] = reg['pc'] + 1
#program end
def halt(opr):
reg['halt'] = True
reg['pc'] = reg['pc'] + 1
f = open('ass-1.asm', 'r')
def runm():
while reg['halt'] == False:
i = reg['pc']
op = globals()[memory[i][0]]
op(memory[i][1:])
pass
reg['timer'] = reg['timer'] - 1
if reg['int'] == 1 and reg['timer'] == 0:
reg['sp'] = reg['sp'] + 1
reg['pc'] = reg['ivec']
memory[reg['sp']] = reg['pc']
reg['int'] = 0
for l in f:
if l.startswith("#"):
continue
comm = l.split()
print(comm)
if comm:
memory[int(comm[0])] = comm[1:]
runm()
print(reg)
print(memory)
|
LearnCsWithDIR/Create-Virtual-Machine
|
vm-1.py
|
vm-1.py
|
py
| 1,479 |
python
|
en
|
code
| 1 |
github-code
|
6
|
43634206373
|
# pylint: disable=no-self-use,invalid-name,no-value-for-parameter
from __future__ import division
from __future__ import absolute_import
import torch
from allennlp.common.testing.model_test_case import ModelTestCase
from allennlp.nn.decoding.chu_liu_edmonds import decode_mst
class BiaffineDependencyParserTest(ModelTestCase):
def setUp(self):
super(BiaffineDependencyParserTest, self).setUp()
self.set_up_model(self.FIXTURES_ROOT / u"biaffine_dependency_parser" / u"experiment.json",
self.FIXTURES_ROOT / u"data" / u"dependencies.conllu")
def test_dependency_parser_can_save_and_load(self):
self.ensure_model_can_train_save_and_load(self.param_file)
def test_mst_decoding_can_run_forward(self):
self.model.use_mst_decoding_for_validation = True
self.ensure_model_can_train_save_and_load(self.param_file)
def test_batch_predictions_are_consistent(self):
self.ensure_batch_predictions_are_consistent()
def test_decode_runs(self):
self.model.eval()
training_tensors = self.dataset.as_tensor_dict()
output_dict = self.model(**training_tensors)
decode_output_dict = self.model.decode(output_dict)
assert set(decode_output_dict.keys()) == set([u'arc_loss', u'tag_loss', u'loss',
u'predicted_dependencies', u'predicted_heads',
u'words', u'pos'])
def test_mst_respects_no_outgoing_root_edges_constraint(self):
# This energy tensor expresses the following relation:
# energy[i,j] = "Score that i is the head of j". In this
# case, we have heads pointing to their children.
# We want to construct a case that has 2 children for the ROOT node,
# because in a typical dependency parse there should only be one
# word which has the ROOT as it's head.
energy = torch.Tensor([[0, 9, 5],
[2, 0, 4],
[3, 1, 0]])
length = torch.LongTensor([3])
heads, _ = decode_mst(energy.numpy(), length.item(), has_labels=False)
# This is the correct MST, but not desirable for dependency parsing.
assert list(heads) == [-1, 0, 0]
# If we run the decoding with the model, it should enforce
# the constraint.
heads_model, _ = self.model._run_mst_decoding(energy.view(1, 1, 3, 3), length) # pylint: disable=protected-access
assert heads_model.tolist()[0] == [0, 0, 1]
def test_mst_decodes_arc_labels_with_respect_to_unconstrained_scores(self):
energy = torch.Tensor([[0, 2, 1],
[10, 0, 0.5],
[9, 0.2, 0]]).view(1, 1, 3, 3).expand(1, 2, 3, 3).contiguous()
# Make the score for the root label for arcs to the root token be higher - it
# will be masked for the MST, but we want to make sure that the tags are with
# respect to the unmasked tensor. If the masking was incorrect, we would decode all
# zeros as the labels, because torch takes the first index in the case that all the
# values are equal, which would be the case if the labels were calculated from
# the masked score.
energy[:, 1, 0, :] = 3
length = torch.LongTensor([3])
heads, tags = self.model._run_mst_decoding(energy, length) # pylint: disable=protected-access
assert heads.tolist()[0] == [0, 0, 1]
assert tags.tolist()[0] == [0, 1, 0]
|
plasticityai/magnitude
|
pymagnitude/third_party/allennlp/tests/models/biaffine_dependency_parser_test.py
|
biaffine_dependency_parser_test.py
|
py
| 3,576 |
python
|
en
|
code
| 1,607 |
github-code
|
6
|
21884431887
|
"""Simulate a number of large CHIME populations."""
from frbpoppy import CosmicPopulation, lognormal, Survey
from frbpoppy import SurveyPopulation, pprint
N_SRCS = [3e4, 3.5e4, 4e4]
N_DAYS = 100
RATE = [8, 9, 10] # per day
# Chime started in Aug 2018. Assuming 2/day for one-offs.
# Total of 9 repeaters published on 9 Aug 2019. = ~year
N_CHIME = {'rep': 9, 'one-offs': 365*2, 'time': 365}
for n in N_SRCS:
for ra in RATE:
print(f'# sources: {n}')
print(f'rate: {ra}')
r = CosmicPopulation(n, n_days=N_DAYS, repeaters=True)
r.set_dist(model='vol_co', z_max=1.0)
r.set_dm_host(model='gauss', mean=100, std=200)
r.set_dm_igm(model='ioka', slope=1000, std=None)
r.set_dm(mw=True, igm=True, host=True)
r.set_emission_range(low=100e6, high=10e9)
r.set_lum(model='powerlaw', per_source='different', low=1e40,
high=1e45, power=0)
r.set_si(model='gauss', mean=-1.4, std=1)
r.set_w(model='lognormal', per_source='different', mean=0.1, std=1)
rate = lognormal(ra, 1, int(n))
r.set_time(model='poisson', rate=rate)
# Set up survey
s = Survey('chime-frb', n_days=N_DAYS)
s.set_beam(model='chime-frb')
# Only generate FRBs in CHIME's survey region
r.set_direction(model='uniform',
min_ra=s.ra_min,
max_ra=s.ra_max,
min_dec=s.dec_min,
max_dec=s.dec_max)
r.generate()
surv_pop = SurveyPopulation(r, s)
surv_pop.name = 'cosmic_chime'
print(surv_pop.source_rate)
print(surv_pop.burst_rate)
pprint(f'# one-offs: {surv_pop.n_one_offs()}')
pprint(f'# repeaters: {surv_pop.n_repeaters()}')
|
TRASAL/frbpoppy
|
tests/chime/sim_runs.py
|
sim_runs.py
|
py
| 1,794 |
python
|
en
|
code
| 26 |
github-code
|
6
|
15634510217
|
'''
height = raw_input('pls input your height(cm)')
weight = raw_input('pls input your weight(kg)')
height = float(height)/100
w = float(w)
bmi = w/h**2
print(bmi)
if bmi > 30:
print('you are heavy')
elif 30 >= bmi > 18:
print('you are healthy')
else:
print('you are thin')
'''
username = raw_input('pls type username: ')
age = raw_input('pls type your age: ')
age = int(age) # 'str' format to 'inter'
if username == 'Mary' and age == 60:
print('I hate you')
print('who')
'''
elif username == 'Lucy' and age == 20:
print('I love you')
else:
print('Who are you?')
'''
|
greatlqp/python_lesson
|
test_0308_2.py
|
test_0308_2.py
|
py
| 604 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37158488723
|
import pandas as pd
from pandas import Series, DataFrame
import matplotlib.pyplot as plt
from datetime import datetime
eboladata=pd.read_csv('datavis/ebola.csv')
filtered = eboladata[eboladata['value']>0]
filtereddata = filtered[filtered['Indicator'].str.contains('death')]
Guineadata = filtereddata[filtereddata['Country']=='Guinea']
Guineadata = Guineadata[Guineadata['Indicator']=='Cumulative number of confirmed Ebola deaths']
Sierradata = filtereddata[filtereddata['Country']=='Sierra Leone']
Sierradata = Sierradata[Sierradata['Indicator']=='Cumulative number of confirmed Ebola deaths']
Liberiadata = filtereddata[filtereddata['Country'].str.contains('Liberia')] #some named as Liberia 2
Liberiadata = Liberiadata[Liberiadata['Indicator']=='Cumulative number of confirmed Ebola deaths']
Guineadata = Guineadata.sort(columns='Date')
Sierradata = Sierradata.sort_values(by='Date')
Liberiadata = Liberiadata.sort_values(by='Date')
g_x=[datetime.strptime(date, '%Y-%m-%d').date() for date in Guineadata['Date']]
g_y = Guineadata['value']
s_x=[datetime.strptime(date, '%Y-%m-%d').date() for date in Sierradata['Date']]
s_y = Sierradata['value']
l_x=[datetime.strptime(date, '%Y-%m-%d').date() for date in Liberiadata['Date']]
l_y = Liberiadata['value']
plt.figure(figsize=(10,10))
plt.plot(g_x, g_y, color='red', linewidth=2, label='Guinea')
plt.plot(s_x, s_y, color='orange', linewidth=2, label='Sierra Leone')
plt.plot(l_x, l_y, color='blue', linewidth=2, label='Liberia')
plt.xlabel('Date', fontsize=18)
plt.ylabel('Number of Ebola Deaths', fontsize=18)
plt.legend()
|
QiliWu/Python-datavis
|
datavis/ebola comfirmed death.py
|
ebola comfirmed death.py
|
py
| 1,578 |
python
|
en
|
code
| 2 |
github-code
|
6
|
8372223063
|
import os
from unittest.mock import patch, Mock, PropertyMock
import pytest
from m1l0_services.imagebuilder.v1.imagebuilder_service_pb2 import BuildRequest, BuildConfig
from builder.core.imagebuilder import ImageBuilder
@patch("shutil.rmtree")
def test_cleanup_code_path(mock_shutil):
mock_shutil.return_value = Mock()
config = {
"source": "dir:///tmp/123",
"service": "dockerhub",
"repository": "m1l0/myproject",
"revision": "latest"
}
request = BuildRequest(
id="123",
config=BuildConfig(**config)
)
imagebuilder = ImageBuilder(request, code_copy_path="/tmp/code/123")
imagebuilder.cleanup_code_path()
mock_shutil.assert_called_with("/tmp/code/123")
@patch("docker.APIClient.remove_image")
def test_cleanup_repository(mock_remove_image):
mock_remove_image.return_value = Mock()
config = {
"source": "dir:///tmp/123",
"service": "dockerhub",
"repository": "m1l0/myproject",
"revision": "latest"
}
request = BuildRequest(
id="123",
config=BuildConfig(**config)
)
imagebuilder = ImageBuilder(request, code_copy_path="/tmp/code/123")
imagebuilder._repository = "m1l0/myproject"
imagebuilder.cleanup_repository()
mock_remove_image.assert_called_with("m1l0/myproject", force=True)
def test_imagename_property():
config = {
"source": "dir:///tmp/123",
"service": "dockerhub",
"repository": "m1l0/myproject",
"revision": "latest"
}
request = BuildRequest(
id="123",
config=BuildConfig(**config)
)
imagebuilder = ImageBuilder(request, code_copy_path="/tmp/code/123")
imagebuilder._imagename = "m1l0/myproject"
imagebuilder._repository = "m1l0/myproject"
assert imagebuilder.imagename == "m1l0/myproject"
assert imagebuilder.repository == "m1l0/myproject"
@patch("builder.core.imagebuilder.build_docker_image")
@patch("builder.core.imagebuilder.prepare_archive")
@patch("builder.core.imagebuilder.create_dockerfile")
@patch("os.listdir")
def test_build(mock_listdir, mock_docker, mock_archive, mock_builder):
mock_listdir.return_value = iter(["main.py"])
mock_docker.return_value = "DOCKERFILE CONTENTS"
mock_archive.return_value = "test.tar.gz"
mock_builder.return_value = iter(["80%", "90%", "100%", "imagename: m1l0/myproject"])
config = {
"source": "dir:///tmp/123",
"service": "dockerhub",
"repository": "m1l0/myproject",
"revision": "latest"
}
request = BuildRequest(
id="123",
config=BuildConfig(**config)
)
imagebuilder = ImageBuilder(request, code_copy_path="/tmp/code/123")
res = imagebuilder.build()
res = list(res)
assert res == ["80%", "90%", "100%"]
assert imagebuilder.imagename == 'imagename: m1l0/myproject'
@patch("builder.core.imagebuilder.push_docker_image")
def test_push(mock_push):
mock_push.return_value = iter(["80%", "90%", "100%", "repository: m1l0/myproject"])
config = {
"source": "dir:///tmp/123",
"service": "dockerhub",
"repository": "m1l0/myproject",
"revision": "latest"
}
request = BuildRequest(
id="123",
config=BuildConfig(**config)
)
imagebuilder = ImageBuilder(request, code_copy_path="/tmp/code/123")
res = imagebuilder.push()
res = list(res)
assert res == ["80%", "90%", "100%"]
assert imagebuilder.repository == "repository: m1l0/myproject"
|
m1l0ai/m1l0_image_builder
|
tests/test_imagebuilder.py
|
test_imagebuilder.py
|
py
| 3,538 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6246496296
|
from urllib.request import urlretrieve
import os
def get_dataset_file(url, savepath):
if not os.path.exists(savepath):
os.mkdir(os.path.dirname(savepath))
urlretrieve(url, savepath)
if __name__ == "__main__":
url = "http://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-white.csv"
savepath = "wine_dataset/winequality-white.csv"
get_dataset_file(url, savepath)
|
cheesecat47/ML_DL_Jan2020
|
get_dataset.py
|
get_dataset.py
|
py
| 424 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12309608299
|
from setuptools import find_packages, setup
import os
version = "0.0.1"
readme = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
req_file = os.path.join(os.path.dirname(__file__), 'requirements.txt')
requirements = [i.strip() for i in open(req_file).readlines()]
setup_params = dict(
name="pyexcel",
version=version,
description="Excel DBAPI Driver",
author="mclovinxie",
author_email="[email protected]",
long_description=readme,
classifiers=[
"Development Status :: 3 - Alpha",
'Environment :: Console',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Database :: Front-Ends',
],
keywords='Excel SQLAlchemy Dialect',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
entry_points={
"sqlalchemy.dialects":
["pyexcel = pyexcel.dialect:ExcelDialect"]
},
install_requires=requirements
)
if __name__ == '__main__':
setup(**setup_params)
|
mclovinxie/dialect-pyexcel
|
setup.py
|
setup.py
|
py
| 1,222 |
python
|
en
|
code
| 3 |
github-code
|
6
|
69809838269
|
import tkinter
import tkinter.messagebox
import customtkinter
import requests
import webbrowser
from PIL import Image, ImageTk
import spotify
customtkinter.set_appearance_mode("system") # Modes: "System" (standard), "Dark", "Light"
customtkinter.set_default_color_theme("green") # Themes: "blue" (standard), "green", "dark-blue"
class App(customtkinter.CTk):
WIDTH = 960
HEIGHT = 540
URLS = []
def __init__(self):
super().__init__()
self.title("KPOP Dictionary")
self.geometry(f"{App.WIDTH}x{App.HEIGHT}")
self.protocol("WM_DELETE_WINDOW", self.on_closing)
image = Image.open("spotify-hex-colors-gradient-background.png").resize((self.WIDTH, self.HEIGHT))
self.bg_image = ImageTk.PhotoImage(image)
self.image_label = tkinter.Label(master=self, image=self.bg_image)
self.image_label.place(relx=0.5, rely=0.5, anchor=tkinter.CENTER)
# two frames -> grid 2x1
self.grid_columnconfigure(1, weight=1)
self.grid_rowconfigure(1, weight=1)
self.frame_left = customtkinter.CTkFrame(master=self, width=320, corner_radius=2)
self.frame_left.grid(row=0, column=0, sticky="nswe", padx=10, pady=10)
self.frame_right = customtkinter.CTkFrame(master=self)
self.frame_right.grid(row=0, column=1, sticky="nswe", padx=10, pady=10)
# left frame -> grid 1x11
self.frame_left.grid_rowconfigure(0, minsize=10) # empty row with minsize as spacing
self.frame_left.grid_rowconfigure(5, weight=1) # empty row as spacing
self.frame_left.grid_rowconfigure(9, minsize=20) # empty row with minsize as spacing
self.frame_left.grid_rowconfigure(11, minsize=10) # empty row with minsize as spacing
self.title_label = customtkinter.CTkLabel(master=self.frame_left,
text="KPOP Dictionary",
text_font=("Roboto Medium", -36))
self.title_label.grid(row=1, column=0, padx=20, pady=20)
self.search_label = customtkinter.CTkLabel(master=self.frame_left,
text="Type in search term",
text_font=("Roboto Medium", -24))
self.search_label.grid(row=2, column=0, padx=20, pady=20)
self.entrybox = customtkinter.CTkEntry(master=self.frame_left,
width=300,
placeholder_text="e.g. Next Level",
text_font=("Roboto Medium", -22))
self.entrybox.grid(row=3, column=0, padx=20, pady=20)
self.type_label = customtkinter.CTkLabel(master=self.frame_left,
text="Choose term type",
text_font=("Roboto Medium", -24))
self.type_label.grid(row=4, column=0, padx=20, pady=20)
self.radio_var = tkinter.IntVar(value=0)
self.radio_button_1 = customtkinter.CTkRadioButton(master=self.frame_left,
variable=self.radio_var,
value=0,
text="Song",
text_font=("Roboto Medium", -22),
command=self.radiobutton_event)
self.radio_button_1.grid(row=6, column=0, padx=20, pady=10)
self.radio_button_2 = customtkinter.CTkRadioButton(master=self.frame_left,
variable=self.radio_var,
value=1,
text="Album",
text_font=("Roboto Medium", -22),
command=self.radiobutton_event)
self.radio_button_2.grid(row=7, column=0, padx=20, pady=10)
self.radio_button_3 = customtkinter.CTkRadioButton(master=self.frame_left,
variable=self.radio_var,
value=2,
text="Artist",
text_font=("Roboto Medium", -22),
command=self.radiobutton_event)
self.radio_button_3.grid(row=8, column=0, padx=20, pady=10)
self.button = customtkinter.CTkButton(master=self.frame_left,
text="Search term",
text_font=("Roboto Medium", -22),
command=self.button_event)
self.button.grid(row=9, column=0, padx=20, pady=10)
def button_event(self):
print(self.entrybox.get())
if self.entrybox.get() == "":
return
self.frame_right = customtkinter.CTkFrame(master=self)
self.frame_right.grid(row=0, column=1, sticky="nswe", padx=10, pady=10)
image_urls = []
urls = []
image_urls, App.URLS = spotify.search_spotify(self.entrybox.get(), self.radio_var.get())
count = len(image_urls) if len(image_urls) <= 9 else 9
for i in range(0, count):
image = Image.open(requests.get(image_urls[i], stream=True).raw).resize((150, 150))
button = customtkinter.CTkButton(self.frame_right, image=ImageTk.PhotoImage(image), text="")
if i == 0:
button.configure(command = self.button_1)
elif i == 1:
button.configure(command = self.button_2)
elif i == 2:
button.configure(command = self.button_3)
elif i == 3:
button.configure(command = self.button_4)
elif i == 4:
button.configure(command = self.button_5)
elif i == 5:
button.configure(command = self.button_6)
elif i == 6:
button.configure(command = self.button_7)
elif i == 7:
button.configure(command = self.button_8)
else:
button.configure(command = self.button_9)
r = int(i / 3)
c = int(i % 3)
button.grid(row=r,column=c, padx=20, pady=10)
def button_1(self):
webbrowser.open(App.URLS[0])
def button_2(self):
webbrowser.open(App.URLS[1])
def button_3(self):
webbrowser.open(App.URLS[2])
def button_4(self):
webbrowser.open(App.URLS[3])
def button_5(self):
webbrowser.open(App.URLS[4])
def button_6(self):
webbrowser.open(App.URLS[5])
def button_7(self):
webbrowser.open(App.URLS[6])
def button_8(self):
webbrowser.open(App.URLS[7])
def button_9(self):
webbrowser.open(App.URLS[8])
def radiobutton_event(self):
print("radiobutton toggled, current value:", self.radio_var.get())
def on_closing(self, event=0):
self.destroy()
if __name__ == "__main__":
app = App()
app.mainloop()
|
algebrabender/Spotify-API-Project
|
gui.py
|
gui.py
|
py
| 7,464 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19334470717
|
#!/usr/bin/python3
file0 = open('index0','r')
file = open('index','r')
indexs = file.read().split()
index0s = file0.read().split()
for index0 in index0s:
if not index0 in indexs:
indexs.append(index0)
for index in indexs:
print(index)
|
zhangfeiyang/finance-tmp
|
update_index.py
|
update_index.py
|
py
| 255 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8879382170
|
# -*- coding: utf-8 -*-
import unittest
import sys
sys.path.append('../src/')
import mac_tran_dao as dao
class TestSearchWordFromChunk(unittest.TestCase):
def test_chunk_srch_en(self):
dao.delete_word_tbl()
en_input = "human"
jp_input = u"人間" #need the u prefix to declare utf-8
dao.insert_word_pair(en_input,jp_input)
result = dao.word_having_chunk("hu")[0]
retrieved_pair= dao.get_word_pair_by_id(int(result))
self.assertEqual(en_input, retrieved_pair[0])
self.assertEqual(jp_input,retrieved_pair[1])
def test_chunk_srch_jp(self):
dao.delete_word_tbl()
en_input = "human"
jp_input = u"人間" #need the u prefix to declare utf-8
dao.insert_word_pair(en_input,jp_input)
result = dao.word_having_chunk("人")[0]
retrieved_pair= dao.get_word_pair_by_id(int(result))
self.assertEqual(en_input, retrieved_pair[0])
self.assertEqual(jp_input,retrieved_pair[1])
"""
Test for multiple words containing the input chunk.
Ensure that what gets put in, is retrieved by verifying
the number of elements.
"""
def test_chunk_srch_multiple_en(self):
dao.delete_word_tbl()
pairs = dict()
pairs["living" ]=u"生活する"
pairs["invited" ]=u"招待される"
pairs["involved" ]=u"関わる"
pairs["indeed" ]=u"そのとおり"
pairs["intention" ]=u"想定"
pairs["initiation"]=u"開始"
pairs["nicotine" ]=u"ニコチン"
pairs["bin" ]=u"ゴミ箱"
for en_word in pairs.keys():
dao.insert_word_pair(en_word,pairs[en_word])
list_pairs = list()
result = dao.word_having_chunk("in")
for wid in result:
retrieved_pair=dao.get_word_pair_by_id(int(wid))
list_pairs.append(retrieved_pair)
#print list_stuff
#first validate that the element counts match.
self.assertEqual(len(list_pairs),8)
#then validate that the mapping between
#the pairs are still in tact.
for en_jp_pair in list_pairs:
self.assertEqual (en_jp_pair[1],pairs[en_jp_pair[0]])
if __name__=='__main__':
unittest.main()
|
iku000888/Machine-Translation-JP-EN
|
tests/chunk_search_test.py
|
chunk_search_test.py
|
py
| 2,164 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37342054211
|
import pyaml
from github import Github
import requests
import datetime
import time
def open_json(fileUrl):
import json
import requests
if fileUrl[0:4] == "http":
# es URL
try:
pointer = requests.get(fileUrl)
return json.loads(pointer.content.decode('utf-8'))
except:
return None
else:
# es file
try:
file = open(fileUrl, "r")
return json.loads(file.read())
except:
return None
def open_jsonref(fileUrl):
import jsonref
import requests
if fileUrl[0:4] == "http":
# es URL
pointer = requests.get(fileUrl)
return jsonref.loads(pointer.content.decode('utf-8'))
else:
# es file
file = open(fileUrl, "r")
return jsonref.loads(file.read())
def echo(concept, variable):
print("*** " + concept + " ***")
print(variable)
print("--- " + concept + " ---")
def updated_raw_version_github(original_file_content, repository, path, timeout = 1000):
uploaded = datetime.datetime.now()
remotepath = "https://raw.githubusercontent.com/smart-data-models/" + repository + "/master/" + path
frequency = 5 # seconds
counter = 0
difference = True
try:
while difference:
# text = requests.get(remotepath).content.decode('utf-8')[:-1]
text = requests.get(remotepath).text[:-1]
counter += frequency
if counter > timeout:
return False
text = requests.get(remotepath).text
print("retrieved test: " + text)
print(ord(text[-1]))
if str(text) == str(original_file_content):
difference = False
available = datetime.datetime.now()
print("uploaded at : " + str(uploaded))
print("available at : " + str(available))
return True
else:
print("______________________________________________")
print(original_file_content)
print("uploaded at : " + str(uploaded))
print("**********************************************")
print(text)
print("not matched at :" + str(datetime.datetime.now()))
time.sleep(frequency)
except (FileNotFoundError, IOError):
print("file not available at : ")
print("not matched at :" + str(datetime.datetime.now()))
return False
def parse_description(schemaPayload):
output = {}
purgedDescription = str(schemaPayload["description"]).replace(chr(34), "")
separatedDescription = purgedDescription. split(". ")
copiedDescription = list.copy(separatedDescription)
for descriptionPiece in separatedDescription:
if descriptionPiece in propertyTypes:
output["type"] = descriptionPiece
copiedDescription.remove(descriptionPiece)
elif descriptionPiece.find("Model:") > -1:
copiedDescription.remove(descriptionPiece)
output["model"] = descriptionPiece.replace("'", "").replace(
"Model:", "")
if descriptionPiece.find("Units:") > -1:
copiedDescription.remove(descriptionPiece)
output["units"] = descriptionPiece.replace("'", "").replace(
"Units:", "")
description = ". ".join(copiedDescription)
return output, description
def parse_payload(schemaPayload, level):
output = {}
if level == 1:
if "allOf" in schemaPayload:
for index in range(len(schemaPayload["allOf"])):
echo("passing to next level this payload=", str(schemaPayload["allOf"][index]))
if "definitions" in schemaPayload["allOf"][index]:
partialOutput = parse_payload(schemaPayload["allOf"][index]["definitions"], level + 1)
output = dict(output, **partialOutput)
elif "properties" in schemaPayload["allOf"][index]:
partialOutput = parse_payload(schemaPayload["allOf"][index], level + 1)
output = dict(output, **partialOutput["properties"])
else:
partialOutput = parse_payload(schemaPayload["allOf"][index], level + 1)
output = dict(output, **partialOutput)
if "anyOf" in schemaPayload:
for index in range(len(schemaPayload["anyOf"])):
echo("original output", output)
if "definitions" in schemaPayload["anyOf"][index]:
partialOutput = parse_payload(schemaPayload["anyOf"][index]["definitions"], level + 1)
output = dict(output, **partialOutput)
elif "properties" in schemaPayload["anyOf"][index]:
partialOutput = parse_payload(schemaPayload["anyOf"][index], level + 1)
output = dict(output, **partialOutput["properties"])
else:
partialOutput = parse_payload(schemaPayload["anyOf"][index], level + 1)
output = dict(output, **partialOutput)
if "oneOf" in schemaPayload:
for index in range(len(schemaPayload["oneOf"])):
echo("original output", output)
if "definitions" in schemaPayload["oneOf"][index]:
partialOutput = parse_payload(schemaPayload["oneOf"][index]["definitions"], level + 1)
output = dict(output, **partialOutput)
elif "properties" in schemaPayload["oneOf"][index]:
partialOutput = parse_payload(schemaPayload["oneOf"][index], level + 1)
output = dict(output, **partialOutput["properties"])
else:
partialOutput = parse_payload(schemaPayload["oneOf"][index], level + 1)
output = dict(output, **partialOutput)
if "properties" in schemaPayload:
output = parse_payload(schemaPayload["properties"], level + 1)
elif level < 8:
if isinstance(schemaPayload, dict):
for subschema in schemaPayload:
if subschema in ["allOf", "anyOf", "oneOf"]:
output[subschema] = []
for index in range(len(schemaPayload[subschema])):
if "properties" in schemaPayload[subschema][index]:
partialOutput = parse_payload(schemaPayload[subschema][index], level + 1)
output[subschema].append(partialOutput["properties"])
else:
partialOutput = parse_payload(schemaPayload[subschema][index], level + 1)
output[subschema].append(partialOutput)
elif subschema == "properties":
echo("properties level", level)
output[subschema] = {}
for prop in schemaPayload["properties"]:
echo(" dealing at level " + str(level) + " with prop=", prop)
echo("parsing this payload at " + str(level) + " from prop =" + prop, schemaPayload["properties"][prop])
try:
output[subschema][prop]
except:
output[subschema][prop] = {}
for item in list(schemaPayload["properties"][prop]):
echo("parsing at level " + str(level) + " item= ", item)
if item in ["allOf", "anyOf", "oneOf"]:
output[subschema][prop][item] = []
for index in range(len(schemaPayload[subschema][prop][item])):
output[subschema][prop][item].append(parse_payload(schemaPayload[subschema][prop][item][index], level + 1))
elif item == "description":
print("Detectada la descripcion de la propiedad=" + prop)
x_ngsi, description = parse_description(schemaPayload[subschema][prop])
output[subschema][prop][item] = description
if x_ngsi:
output[subschema][prop]["x-ngsi"] = x_ngsi
elif item == "items":
output[subschema][prop][item] = parse_payload(schemaPayload[subschema][prop][item], level + 1)
elif item == "properties":
output[subschema][prop][item] = parse_payload(schemaPayload[subschema][prop][item], level + 1)
elif item == "type":
if schemaPayload[subschema][prop][item] == "integer":
output[subschema][prop][item] = "number"
else:
output[subschema][prop][item] = schemaPayload[subschema][prop][item]
else:
output[subschema][prop][item] = schemaPayload[subschema][prop][item]
elif isinstance(schemaPayload[subschema], dict):
output[subschema] = parse_payload(schemaPayload[subschema], level + 1)
else:
if subschema == "description":
x_ngsi, description = parse_description(schemaPayload)
output[subschema] = description
if x_ngsi:
output["x-ngsi"] = x_ngsi
else:
output[subschema] = schemaPayload[subschema]
elif isinstance(schemaPayload, list):
for index in range(len(schemaPayload)):
partialOutput = parse_payload(schemaPayload[index], level + 1)
output = dict(output, **partialOutput)
else:
return None
return output
def github_push_from_variable(contentVariable, repoName, fileTargetPath, message, globalUser, token):
from github import Github
g = Github(token)
repo = g.get_organization(globalUser).get_repo(repoName)
try:
file = repo.get_contents("/" + fileTargetPath)
update = True
except:
update = False
if update:
repo.update_file(fileTargetPath, message, contentVariable, file.sha)
else:
repo.create_file(fileTargetPath, message, contentVariable, "master")
baseModelFileName = "model.yaml"
#credentialsFile = "/home/aabella/transparentia/CLIENTES/EU/FIWARE/credentials.json"
credentialsFile = "/home/fiware/credentials.json"
credentials = open_jsonref(credentialsFile)
token = credentials["token"]
globalUser = credentials["globalUser"]
g = Github(token)
propertyTypes = ["Property", "Relationship", "GeoProperty"]
configFile = "datamodels_to_publish.json"
dataModelsToPublish = open_jsonref(configFile)
print(dataModelsToPublish)
print(type(dataModelsToPublish))
echo("subject", dataModelsToPublish["subject"])
echo("dataModels", dataModelsToPublish["dataModels"])
echo("filter or no ", dataModelsToPublish["filterDataModels"])
repoName = dataModelsToPublish["subject"]
dataModels = dataModelsToPublish["dataModels"]
if isinstance(dataModels, str):
dataModels = [dataModels]
enableDataModelFilter = dataModelsToPublish["filterDataModels"]
for dataModel in dataModels:
# have to be removed if the data model is fixed
# if dataModel in ["WoodworkingMachine"]: continue
echo("repoName", repoName)
result = {}
result[dataModel] = {}
echo("dataModel=", dataModel)
schemaUrl = "https://raw.githubusercontent.com/smart-data-models/" + repoName + "/master/" + dataModel + "/schema.json"
echo("urlschema", schemaUrl)
schemaExpanded = open_jsonref(schemaUrl)
echo("schemaExpanded", schemaExpanded)
result[dataModel]["properties"] = parse_payload(schemaExpanded, 1)
try: # the required clause is optional
required = schemaExpanded["required"]
except:
required = []
try:
entityDescription = schemaExpanded["description"].replace(chr(34),"")
except:
entityDescription = "No description available"
try:
version = schemaExpanded["$schemaVersion"]
except:
version = ""
try:
tags = schemaExpanded["modelTags"]
except:
tags = ""
try:
modelSchema = schemaExpanded["$id"]
except:
modelSchema = ""
try:
licenseUrl = schemaExpanded["licenseUrl"]
except:
licenseUrl = "https://github.com/smart-data-models/" + repoName + "/blob/master/" + dataModel + "/LICENSE.md"
try:
disclaimer = schemaExpanded["disclaimer"]
except:
disclaimer = "Redistribution and use in source and binary forms, with or without modification, are permitted provided that the license conditions are met. Copyleft (c) 2022 Contributors to Smart Data Models Program"
try:
derivedFrom = schemaExpanded["derivedFrom"]
except:
derivedFrom = ""
result[dataModel]["type"] = "object"
result[dataModel]["description"] = entityDescription
result[dataModel]["required"] = required
result[dataModel]["x-version"] = version
result[dataModel]["x-model-tags"] = tags
result[dataModel]["x-model-schema"] = modelSchema
result[dataModel]["x-license-url"] = licenseUrl
result[dataModel]["x-disclaimer"] = disclaimer
result[dataModel]["x-derived-from"] = derivedFrom
echo("result", result)
path = dataModel + "/" + baseModelFileName
message = "updated " + baseModelFileName + " - support subproperties"
# keep the original references when there are $ref clauses
schema = open_json(schemaUrl)
if "allOf" in schema:
for cursor in range(len(schema["allOf"])):
if "properties" in schema["allOf"][cursor]:
for element in schema["allOf"][cursor]["properties"]:
if element in result[dataModel]["properties"]:
if "description" in schema["allOf"][cursor]["properties"][element] and "description" in result[dataModel]["properties"][element]:
_, description = parse_description(schema["allOf"][cursor]["properties"][element])
result[dataModel]["properties"][element]["description"] = description
print("replaced descripton in " + element + " to " + schema["allOf"][cursor]["properties"][element]["description"])
else:
print("Nothing to expand")
content_variable = pyaml.dumps(result, width=4096, force_embed=True).decode("utf-8")
github_push_from_variable(content_variable, repoName, path, message, globalUser, token)
available = False
while not available:
available = updated_raw_version_github(content_variable, repoName, path)
|
smart-data-models/data-models
|
utils/10_model.yaml_v13.py
|
10_model.yaml_v13.py
|
py
| 15,076 |
python
|
en
|
code
| 94 |
github-code
|
6
|
23448354960
|
import socket
from socket import AF_INET6, SOCK_STREAM
from threading import Thread
localIPv6 = "fe80::c10c:de5e:2cbf:132c%9"
globalIPv6 = "2001:14ba:a0bd:dd00:c10c:de5e:2cbf:132c"
# Remember to Disable firewall for clients in other networks to be able to connect to the server
portIPv6 = 36000
buffer = 1024
backlog = 5
def receive(clientSocketIPv6):
while True:
message = clientSocketIPv6.recv(buffer).decode("utf8")
if message.startswith("/downloaded"):
fileData = message.split("\n")[1]
fileName = message.split("\n")[2]
newFile = open(fileName, "a")
newFile.write(fileData)
newFile.close()
else:
print(message)
def send(clientSocketIPv6):
message = input(">>> ")
while message != "/quit":
clientSocketIPv6.send(bytes(message, "utf8"))
message = input(">>> ")
print("You have quitted from the server. See you again")
clientSocketIPv6.send(bytes(message, "utf8"))
def main():
clientSocketIPv6 = socket.socket(AF_INET6, SOCK_STREAM)
addressIPv6 = (globalIPv6, portIPv6)
clientSocketIPv6.connect(addressIPv6)
receive_thread = Thread(target=receive, args=(clientSocketIPv6,))
send_thread = Thread(target=send, args=(clientSocketIPv6,))
receive_thread.start()
send_thread.start()
if __name__ == "__main__":
main()
|
SpringNuance/chat_application_command-line-version
|
client_IPv6.py
|
client_IPv6.py
|
py
| 1,436 |
python
|
en
|
code
| 1 |
github-code
|
6
|
39165330944
|
#
# @lc app=leetcode.cn id=20 lang=python3
#
# [20] 有效的括号
#
# @lc code=start
class Solution:
# 插入左括号,判断右括号
def isValid(self, s: str) -> bool:
sLen = len(s)
if sLen % 2 !=0: return False
stack = list()
rightMap = {
')': '(',
'}': '{',
']': '[',
}
for i in s:
if i in rightMap:
if len(stack) == 0 or rightMap[i] != stack[-1]:
return False
stack = stack[:-1]
else:
stack.append(i)
return len(stack) == 0
# @lc code=end
|
cl6222877/leetcode_gogo
|
20.有效的括号.py
|
20.有效的括号.py
|
py
| 635 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4510504745
|
from tkinter import *
import tkinter as tk
import sqlite3
import sys
print("Imported")
con = sqlite3.connect("project.db")
print("Connected")
root = tk.Tk()
v = tk.IntVar()
v1 = tk.IntVar()
v2 = tk.IntVar()
v3 = tk.IntVar()
def createtable():
create = ("CREATE TABLE IF NOT EXISTS vehicle(NAME VARCHAR(200),"+
"CONTACT VARCHAR(200),"+
"VEHICLE VARCHAR(100),"+
"DATEOFBOOKING VARCHAR(100),"+
"DURATION VARCHAR(100),"+
"RENT VARCHAR(100))")
con.execute(create)
print("CREATED SUCCESSFULLY")
def insertt(aname, acon, aveh, adob, adur, arent):
aname = aname.get()
acon = acon.get()
aveh = aveh.get()
adob = adob.get()
adur = adur.get()
arent = arent.get()
print(aname, acon, aveh, adob, adur, arent)
print("Value is", aname)
insert = str("INSERT INTO vehicle(NAME,CONTACT,VEHICLE,DATEOFBOOKING,DURATION,RENT)"+" VALUES(?,?,?,?,?,?)")
con.execute(insert,(aname, acon, aveh, adob, adur, arent))
con.commit()
print("INSERTED SUCCESSFULLY")
def btnClickdis():
winddis = tk.Toplevel(root)
winddis.geometry('500x500+50+50')
winddis.title("BOOKING RECORD")
curs = con.cursor()
query = 'SELECT * FROM vehicle'
curs.execute(query)
Label(winddis, text="NAME", font=('arial',10,'bold')).grid(row=0, column=0)
Label(winddis, text="CONTACT", font=('arial',10,'bold')).grid(row=0, column=1)
Label(winddis, text="VEHICLE", font=('arial',10,'bold')).grid(row=0, column=2)
Label(winddis, text="DATE OF BOOKING", font=('arial',10,'bold')).grid(row=0, column=3)
Label(winddis, text="DURATION", font=('arial',10,'bold')).grid(row=0, column=4)
Label(winddis, text="RENT", font=('arial',10,'bold')).grid(row=0, column=5)
data=curs.fetchall()
for i, row in enumerate(data) :
Label(winddis, text=row[0]).grid(row=i+1, column=0)
Label(winddis, text=row[1]).grid(row=i+1, column=1)
Label(winddis, text=row[2]).grid(row=i+1, column=2)
Label(winddis, text=row[3]).grid(row=i+1, column=3)
Label(winddis, text=row[4]).grid(row=i+1, column=4)
Label(winddis, text=row[5]).grid(row=i+1, column=5)
def searching(e):
windsearch = tk.Toplevel(root,width=500,height=500)
windsearch.title("SEARCH")
windsearch.geometry('500x500+50+50')
sname = e.get()
print("Record of", sname, "is to be deleted")
data = con.execute('SELECT NAME,CONTACT,VEHICLE,DATEOFBOOKING,DURATION,RENT FROM vehicle where NAME=?;', (sname,))
for row in data:
Label(windsearch, text=row[0], font=('Verdana',12,'bold')).grid(row=1, column=4)
Label(windsearch, text=row[1], font=('Verdana',12,'bold')).grid(row=2, column=4)
Label(windsearch, text=row[2], font=('Verdana',12,'bold')).grid(row=3, column=4)
Label(windsearch, text=row[3], font=('Verdana',12,'bold')).grid(row=4, column=4)
Label(windsearch, text=row[4], font=('Verdana',12,'bold')).grid(row=5, column=4)
Label(windsearch, text=row[5], font=('Verdana',12,'bold')).grid(row=6, column=4)
Label(windsearch, text="NAME", font=('Verdana',15,'bold')).grid(row=1, column=1)
Label(windsearch, text="CONTACT", font=('Verdana',15,'bold')).grid(row=2, column=1)
Label(windsearch, text="VEHICLE", font=('Verdana',15,'bold')).grid(row=3, column=1)
Label(windsearch, text="DATE OF BOOKING", font=('Verdana',15,'bold')).grid(row=4, column=1)
Label(windsearch, text="DURATION", font=('Verdana',15,'bold')).grid(row=5, column=1)
Label(windsearch, text="RENT", font=('Verdana',15,'bold')).grid(row=6, column=1)
def delcnfrm(e):
winddelcnfrm = tk.Toplevel(root)
winddelcnfrm.geometry('250x250+50+50')
winddelcnfrm.title("DELETE?")
print(e)
delname = e.get()
print(delname)
l = Label(winddelcnfrm,font=('timesnewroman',10,'bold'),text="CONFIRM?",fg="black").grid(columnspan=2)
btndelete = Button(winddelcnfrm,fg="black",font=('arial',10,'bold'),text="YES",command=lambda:deleterec(delname),relief="raise",width=10,height=3,bg="cyan").grid(row=2,column=0)
btndelete = Button(winddelcnfrm,fg="black",font=('arial',10,'bold'),text="NO",command=btnClickdel,relief="raise",width=10,height=3,bg="cyan").grid(row=2,column=1)
def deleterec(delname):
con.execute("DELETE from vehicle where NAME=?;",(delname,))
con.commit()
print("DELETED SUCCESSFULLY")
def btnLogin():
pass
def btnClickdel():
winddel = tk.Toplevel(root)
winddel.geometry('700x500+50+50')
winddel.title("DELETE A RECORD")
l = Label(winddel,font=('timesnewroman',10,'bold'),text="Enter the name whose vehicle booking details you want to delete.",fg="black").grid(row=0,column=0)
e = Entry(winddel,font=(20),bd=6)
e.place(x=75,y=75)
Button(winddel,text="DELETE",font=(20),bg="aquamarine",relief="raise",command=lambda:delcnfrm(e),width=10,height=1).place(x=150,y=150)
def btnClickLoginRegister():
windlgrg = tk.Toplevel(root)
windlgrg.geometry('500x500+500+150')
windlgrg.title("LOGIN OR REGISTER")
Button(windlgrg, text="LOGIN", width=25, height=2, command=btnLogin, bg="gold", bd=7, relief="raise", font=(30)).place(x=110,y=100)
Button(windlgrg, text="REGISTER", width=25, height=2, command=btnRegister, bg="gold", bd=7, relief="raise", font=(30)).place(x=110,y=230)
def buttonClickA():
winda = tk.Toplevel(root, width=500, height=500)
winda.geometry('1000x1000+50+50')
winda.title("I'M ADMIN")
l = tk.Label(winda, text="CHOOSE YOUR OPTION :", font=('Verdana',15,'bold')).place(x=350,y=50)
Button(winda, text="VIEW THE BOOKINGS!", width=40, height=3, command=btnClickdis, bg="gold", bd=7, relief="raise", font=(30)).place(x=350,y=100)
Button(winda, text="SEARCH A RECORD!", width=40, height=3, command=buttonClick, bg="gold", bd=7, relief="raise", font=(30)).place(x=350,y=230)
Button(winda, text="DELETE A RECORD!", width=40, height=3, command=btnClickdel, bg="gold", bd=7, relief="raise", font=(30)).place(x=350,y=360)
def buttonClickB():
windb = tk.Toplevel(root)
windb.geometry('1000x1000+50+50')
windb.title("I'M CUSTOMER")
l = tk.Label(windb, text="CHOOSE YOUR OPTION :", font=('Verdana',15,'bold')).place(x=350,y=100)
Button(windb, text="BOOK A VEHICLE!", width=40, height=3, command=buttonClick1, bg="maroon1", bd=7, relief="raise", font=(30)).place(x=350,y=150)
Button(windb, text="SEARCH YOUR RECORD!", width=40, height=3, command=buttonClick, bg="maroon1", bd=7, relief="raise", font=(30)).place(x=350,y=280)
Button(windb, text="GIVE YOUR REVIEWS!", width=40, height=3, command=buttonClick2, bg="maroon1", bd=7, relief="raise", font=(30)).place(x=350,y=410)
def buttonClick():
winds = tk.Toplevel(root,width=500,height=500)
winds.title("SEARCH WINDOW")
winds.geometry('1000x700+50+50')
l = tk.Label(winds, font=('timesnewroman',10,'bold'), text="Enter the name whose vehicle booking details you are looking for!", fg="black").place(x=100,y=75)
e= Entry(winds, font=(20), bd=6)
e.place(x=100, y=125)
Button(winds, text="SEARCH", font=(20), bg="tomato", relief="raise", command=lambda:searching(e)).place(x=450,y=200)
def btnRegister():
windc = tk.Toplevel(root)
windc.geometry('1200x800+50+50')
windc.title("PERSONAL DETAILS")
pd = tk.Label(windc, text="PERSONAL DETAILS", font=('arial',40,'bold'), bd=6, fg="magenta2", anchor='center').grid(row=0, column=1, columnspan=4, pady=5)
l1 = tk.Label(windc, text="NAME:", font=(20)).grid(row=1, column=0, pady=5)
l2 = tk.Label(windc, text="CONTACT_NO:", font=(20)).grid(row=2, column=0, pady=5)
l3 = tk.Label(windc, text="ADDRESS:", font=(20)).grid(row=3, column=0, pady=5)
l4 = tk.Label(windc, text="EMAIL_ID:", font=(20)).grid(row=4, column=0, pady=5)
l5 = tk.Label(windc, text="GENDER:", font=(20)).grid(row=5, column=0, pady=5)
Button(windc, text="REGISTER", font=(20), command=btnClickLoginRegister, bg="yellow", relief="raise").grid(row=7, column=1, rowspan=2)
aname = Entry(windc, width=80, font=(20), bd=6)
aname.place(x=150, y=90)
acon = Entry(windc, width=80, font=(20), bd=6)
acon.place(x=150, y=125)
e3 = Text(windc, font=(20), height=5, bd=6).grid(row=3, column=1, pady=5)
e4 = Text(windc, font=(20), height=0, bd=6).grid(row=4, column=1, pady=5)
tk.Radiobutton(windc, text="Male", variable=v, value=1, font=(20)).grid(row=5, column=1, sticky=W)
tk.Radiobutton(windc, text="Female", variable=v, value=2, font=(20)).grid(row=6, column=1, sticky=W)
def buttonClick2():
windr = tk.Toplevel(root)
windr.geometry('1000x1000+50+50')
windr.title("REVIEW")
re = tk.Label(windr, text="WELCOME TO THE REVIEW SECTION", font=('arial',40,'bold'), bd=6, fg="magenta2", anchor='center').place(x=30,y=5)
l = tk.Label(windr, text="Your Name here.", font=('System',15)).place(x=30,y=80)
l1 = tk.Label(windr, text="Give your reviews here.", font=('System',15)).place(x=30,y=125)
l2 = tk.Label(windr, text="If you have any complaints regarding our rental agency ,enter them here.", font=('System',15)).place(x=30,y=250)
l3 = tk.Label(windr, text="Enter your suggestions ,if any.", font=('System',15)).place(x=30,y=375)
e = Text(windr, height=0, width=60, font=(20), bd=6).place(x=200,y=80)
e1 = Text(windr, height=2, font=(20), bd=6).place(x=30,y=175)
e2 = Text(windr, height=2, font=(20), bd=6).place(x=30,y=300)
e3 = Text(windr, height=2, font=(20), bd=6).place(x=30,y=425)
Button(windr, text="DONE", font=(20), bg="yellow", relief="raise").place(x=425,y=500)
def buttonClick3(aname,acon):
windn = tk.Toplevel(root)
windn.geometry('1200x700+50+50')
windn.title("VEHICLE BOOKING DETAILS")
vd = tk.Label(windn, text="VEHICLE BOOKING DETAILS", font=('arial',40,'bold'), bd=6, fg="magenta2", anchor='center').place(x=200,y=5)
Button(windn, text="VEHICLES FOR RENT", font=(20), command=buttonClick4, bg="orange", relief="raise").place(x=250,y=75)
Button(windn, text="VIEW RATES", font=(20), command=buttonClick5, bg="orange", relief="raise").place(x=700,y=75)
l6 = tk.Label(windn ,text="ENTER THE NAME OF THE VEHICLE YOU WANT ON RENT :", font=(20)).place(x=30,y=125)
la = tk.Label(windn, text="* BIKE",font=(20)).place(x=850,y=125)
lb = tk.Label(windn, text="* CAR",font=(20)).place(x=850,y=150)
lc = tk.Label(windn, text="* JEEP",font=(20)).place(x=850,y=175)
ld = tk.Label(windn, text="* BUS",font=(20)).place(x=850,y=200)
le = tk.Label(windn, text="* TRUCK",font=(20)).place(x=850,y=225)
l7 = tk.Label(windn, text="DATE OF BOOKING:",font=(20)).place(x=30,y=300)
l8 = tk.Label(windn, text="DURATION:",font=(20)).place(x=30,y=350)
l9 = tk.Label(windn, text="RENT A DRIVER?",font=(20)).place(x=30,y=400)
Button(windn, text="NEXT", font=(20), command=lambda:buttonClick6(aname, acon, aveh, adob, adur), bg="orange", relief="raise").place(x=515,y=570)
aveh = Entry(windn, width=80, font=(20), bd=6)
aveh.place(x=30, y=250)
adob = Entry(windn, width=80, font=(20), bd=6)
adob.place(x=225, y=300)
adur = Entry(windn, width=80, font=(20), bd=6)
adur.place(x=225, y=350)
tk.Radiobutton(windn, text="Yes", variable=v2, value=1, font=(20)).place(x=225,y=400)
tk.Radiobutton(windn, text="No", variable=v2, value=2, font=(20)).place(x=225,y=450)
def buttonClick4():
windv = tk.Toplevel(root)
windv.geometry('700x500+50+50')
windv.title("VEHICLES FOR RENT")
l = tk.Label(windv, text="THE VEHICLES WHICH ARE AVAILABLE FOR RENT WITH US ARE:", font=(20)).grid()
l1 = tk.Label(windv, text="BIKES", font=(20)).grid()
l2 = tk.Label(windv, text="CARS ", font=(20)).grid()
l3 = tk.Label(windv, text="JEEPS", font=(20)).grid()
l4 = tk.Label(windv, text="BUSES", font=(20)).grid()
l5 = tk.Label(windv, text="TRUCKS", font=(20)).grid()
def buttonClick5():
windr = tk.Toplevel(root)
windr.geometry('500x500+50+50')
windr.title("RENTS OF VEHICLES")
l1 = tk.Label(windr, text="RENT OF BIKE IS RS. 300/DAY", font=(20)).grid()
l2 = tk.Label(windr, text="RENT OF CAR IS RS. 1500/DAY", font=(20)).grid()
l3 = tk.Label(windr, text="RENT OF JEEP IS RS. 2000/DAY", font=(20)).grid()
l4 = tk.Label(windr, text="RENT OF BUS IS RS. 9000/DAY", font=(20)).grid()
l5 = tk.Label(windr, text="RENT OF TRUCK IS RS. 10000/DAY", font=(20)).grid()
def buttonClick6(aname, acon, aveh, adob, adur):
windp = tk.Toplevel(root)
windp.geometry('1200x700+50+50')
windp.title("PAYMENT DETAILS")
pay = tk.Label(windp, text="PAYMENT DETAILS", font=('arial',40,'bold'), bd=6, fg="magenta2", anchor='center').place(x=300,y=5)
l1 = tk.Label(windp, text="TOTAL RENT:", font=(20)).place(x=30,y=100)
l = tk.Label(windp, text="Enter the rent as per the vehicle chosen and the number of days for which the vehcile is rented", font=(15)).place(x=225,y=140)
arent = Entry(windp, font=(20),bd=6)
arent.place(x=200, y=100)
l2 = tk.Label(windp, text="PAYMENT VIA:", font=(20)).place(x=30,y=175)
l3 = tk.Label(windp, text="*IN CASE OF ANY DAMAGE DONE TO THE VEHICLE,DAMAGE FINE WILL BE CHARGED.", font=('Verdana',15,'bold'), fg="firebrick1").place(x=30,y=275)
l4 = tk.Label(windp, text="Damage amount is 50% of the rent!!!", font=('Verdana',15,'bold'), fg="firebrick1").place(x=30,y=300)
l5 = tk.Label(windp, text="*IF VEHICLE IS NOT RETURNED BACK ON TIME,LATE FINE WILL BE CHARGED.", font=('Verdana',15,'bold'), fg="firebrick1").place(x=30,y=350)
l6 = tk.Label(windp, text="The late fine is 25% of the rent(if late by a day)!!!", font=('Verdana',15,'bold'), fg="firebrick1").place(x=30,y=375)
tk.Radiobutton(windp, text="Credit Card", variable=v3, value=1, font=(20)).place(x=200,y=175)
tk.Radiobutton(windp, text="Cash", variable=v3, value=2, font=(20)).place(x=200,y=225)
ok = Button(windp, text="SUBMIT", font=('arial',20,'bold'), fg="black", bg="cyan2", relief="raise", command=lambda:insertt(aname, acon, aveh, adob, adur, arent)).place(x=525,y=500)
createtable()
root.title('Vehicle Rental Agency')
root.geometry('1350x700+100+50')
root.config(bg="sky blue")
backgrnd = Frame(root, width=1600, height=300, relief="raise", bg="sky blue")
backgrnd.pack(side = TOP)
backgrnd1 = Frame(root, width=1600, height=400, relief="raise", bg="sky blue")
backgrnd1.pack(side = TOP)
label1 = Label(backgrnd, font=('times',35,'bold'), text="***VEHICLE RENTAL AGENCY***", fg='black', bd=10, bg="plum1").grid()
Button(backgrnd1, text="ADMIN", width=40, height=3, command=buttonClickA, bg="blue", bd=7, relief="raise", font=(30)).place(x=450,y=150)
Button(backgrnd1, text="CUSTOMER", width=40, height=3, command=btnClickLoginRegister, bg="blue", bd=7, relief="raise", font=(30)).place(x=450,y=280)
root.mainloop()
|
karankhat/Vehicle_Rental_Agency
|
python.py
|
python.py
|
py
| 14,870 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20899251668
|
from utilities import *
def main():
# Sampling
print("Sampling the room...")
frames = sample_noise()
name, _ = write_noise("sample.wav", frames)
new_frames = create_noise(name)
name,_ = write_noise("whitenoise.wav", new_frames)
player = WavePlayerLoop("whitenoise.wav",True)
player.play()
# Play
um = True
try:
while True:
if um:
filename = "whitenoise0.wav"
else:
filename = "whitenoise1.wav"
frames = sample_noise()
print("Sampled")
name, _ = write_noise(filename, frames)
new_frames = create_noise(name)
name,_ = write_noise(filename, new_frames)
player.filepath = os.path.abspath(filename)
print("Changed")
os.remove(filename)
um = not um
except KeyboardInterrupt:
print("Thanks!")
return
main()
|
mx60s/tamuhack2019
|
main.py
|
main.py
|
py
| 944 |
python
|
en
|
code
| 2 |
github-code
|
6
|
24447759905
|
from cloudify import ctx
from cloudify.decorators import operation
from cloudify.state import ctx_parameters as inputs
@operation
def set_floating_ip_on_port(**_):
"""
Use this operation when connecting a host to a floating IP. This operation
will set the `public_ip` runtime property on the host instance
"""
floating_ip = ctx.target.instance.runtime_properties['floating_ip_address']
ctx.source.instance.runtime_properties['floating_ip_address'] = floating_ip
ctx.source.instance.update()
ctx.logger.info('Setting floating IP {0} for `{1}`'.format(
floating_ip,
ctx.source.instance.id
))
def _get_ip_address_and_hostname():
"""
Get IP address and hostname from their respective resource pools and
update the resource_pool object's runtime properties
:return: A tuple (ip_address, hostname)
"""
resource_pool = ctx.target.instance.runtime_properties['resource_pool']
resource = resource_pool.pop(0)
ctx.target.instance.runtime_properties['resource_pool'] = resource_pool
ctx.target.instance.update()
return resource['ip_address'], resource['hostname']
@operation
def get_resources_from_resource_pool(**_):
"""
Get one of each IP address and hostname from the resource pool and keep
them in the `resource` object's runtime props
This operation runs in a relationship where `resource` is the source
and `resource_pool` is the target
"""
ip_address, fixed_hostname = _get_ip_address_and_hostname()
ctx.logger.info('Setting IP `{0}` for instance `{1}`'.format(
ip_address, ctx.source.instance.id
))
ctx.logger.info('Setting hostname `{0}` for instance `{1}`'.format(
fixed_hostname, ctx.source.instance.id
))
ctx.source.instance.runtime_properties['fixed_ip'] = ip_address
ctx.source.instance.runtime_properties['fixed_hostname'] = fixed_hostname
ctx.source.instance.update()
@operation
def setup_resource_pool(**_):
""" Create the resource pool from the user's inputs """
ctx.instance.runtime_properties['resource_pool'] = inputs['resource_pool']
ctx.instance.update()
ctx.logger.info(
'Setting resource pool: {0}'.format(inputs['resource_pool'])
)
@operation
def set_ip_from_port(**_):
"""
Use this operation to pass an IP from a port object to the host. This
operation will set the `public_ip` runtime property on the host instance
"""
port_runtime_props = ctx.target.instance.runtime_properties
private_ip = port_runtime_props['fixed_ip_address']
# If the port has a floating IP attached to it, we should use as the
# public IP. If not, just use the internal fixed_ip_address
public_ip = port_runtime_props.get('floating_ip_address', private_ip)
ctx.source.instance.runtime_properties['public_ip'] = public_ip
ctx.source.instance.update()
ctx.logger.info('Setting IP {0} from port for `{1}`'.format(
public_ip,
ctx.source.instance.id
))
|
Cloudify-PS/manager-of-managers
|
plugins/cmom/cmom/misc/ip.py
|
ip.py
|
py
| 3,018 |
python
|
en
|
code
| 1 |
github-code
|
6
|
25632521939
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# __author__ = 'CwT'
from queue import Queue, Empty
import logging
import traceback
from selenium.common.exceptions import TimeoutException
from . import Global
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class Scheduler(object):
def __init__(self):
self.FIFOqueue = Queue()
def wait(self):
logger.debug("start to exit, remaining tasks %d" % self.FIFOqueue.qsize())
self.FIFOqueue.join()
def add_task(self, target, depth, data=None):
# print("Add one target to scheduler", target)
self.FIFOqueue.put((target, data, depth))
def get_task(self, block=False):
return self.FIFOqueue.get(block=block)
def run(self, browser, scanner, setting):
try:
while True:
# print("Get one", self.FIFOqueue.qsize())
target, data, depth = self.get_task()
# print("Target: ", target)
options = {
"url": target,
"batch": True,
"level": setting.level,
"threads": setting.threads,
"timeout": setting.timeout
}
if data:
post_data = '&'.join(["%s=%s" % (k, v) for k, v in data.items()])
options["data"] = post_data
if setting.test:
logger.debug("options: %s" % options)
if not setting.test:
scanner.add_and_start(**options)
try:
if depth >= setting.depth != -1:
continue
# record the depth we are dealing with before we actually get the page
Global.CURRENT_DEPTH = depth
if data:
browser.post(target, data)
else:
browser.get(target)
except TimeoutException:
pass
finally:
self.FIFOqueue.task_done()
except Empty:
logger.debug("Empty queue, ready to quit")
pass
except Exception as e:
logger.error("something wrong happened!! %s" % e.message)
logger.error(type(e))
traceback.print_exc()
while not self.FIFOqueue.empty():
self.get_task()
self.FIFOqueue.task_done()
raise
|
futurelighthouse/crawler_sqlmap
|
crawler/util/scheduler.py
|
scheduler.py
|
py
| 2,516 |
python
|
en
|
code
| null |
github-code
|
6
|
9439114675
|
import shutil
import os
import random
import argparse
import sys
from xml.dom import minidom
import traceback
parser = argparse.ArgumentParser(description="Choose a random number of individual files from a data repository")
parser.add_argument("-fs", "--files", help="Set the path to the directory with the XML files", required=True)
parser.add_argument("-p", "--population", help="Set the number of files that will be selected", type=int, required=True)
parser.add_argument("-s", "--seed", help="Set the seed to obtain previous results", default=None)
parser.add_argument("-f", "--filter", help="Specify keywords to filter out specific files; the first element is the field to filter, all following elements are the keywords; keywords are separated by comma")
parser.add_argument("-d", "--delete", help="Delete the output folder 'selected_files' if it already exists", action="store_true")
args = parser.parse_args()
try:
filters = None
if(args.filter != None):
filters = args.filter.split(",")
if(filters != None and len(filters) < 2):
raise Exception("The '-f/--filter' option needs at least two elements")
if(os.path.exists(args.files + "/selected_files")):
if(args.delete):
shutil.rmtree(args.files + "/selected_files")
else:
raise Exception("The output folder 'selected_files' in the directory '" + args.files + "' already exists. Delete it manually or use the '-d/--delete' option.")
file_list = []
print("\rLoading files...", end="")
number_of_files = 0
for dirpath, dirnames, filenames in os.walk(args.files):
for file in filenames:
if(file.endswith(".xml")):
if(filters != None):
with open(args.files + "/" + file, "r") as xml_reader:
content = xml_reader.read().strip()
if("<" + filters[0] + ">" in content):
items = content.split("<" + filters[0] + ">")[-1].split("</" + filters[0] + ">")[0].split("|")
for item in items:
if(item.strip() in filters[1:]):
file_list.append(file)
number_of_files += 1
print("\rLoaded " + str(number_of_files) + " file(s)", end="")
break
else:
file_list.append(file)
number_of_files += 1
print("\rLoaded " + str(number_of_files) + " file(s)", end="")
print("\rLoading files -> done")
if(not len(file_list)):
raise Exception("No XML file found in path '" + args.files + "' or all files were filtered out.")
if(args.population > len(file_list)):
raise Exception("The population size cannot be larger than the number of files.")
if(args.seed == None):
args.seed = str(random.randrange(sys.maxsize))
random.seed(args.seed)
print("\rSelecting randomly " + str(args.population) + " files...", end="")
selected_files = random.sample(file_list, args.population)
print("\rSelecting randomly " + str(args.population) + " files -> done")
os.mkdir(args.files + "/selected_files")
progress = 0
for file in selected_files:
shutil.copyfile(args.files + "/" + file, args.files + "/selected_files/" + file)
progress += 1
print("\rCopy progress: " + str(int((progress/len(selected_files))*100)) + "%", end="")
print("\rCopy progress: finished")
with open(args.files + "/selected_files/seed.txt", "w") as seedWriter:
print("Seed: " + args.seed)
seedWriter.write(str(args.seed))
except Exception as ex:
print(ex)
print(traceback.format_exc())
|
fusion-jena/QuestionsMetadataBiodiv
|
data_repositories/random_file_selector.py
|
random_file_selector.py
|
py
| 3,800 |
python
|
en
|
code
| 4 |
github-code
|
6
|
42287839856
|
import argparse
import os.path
import glob
from snakePipes import __version__
def ListGenomes():
"""
Return a list of all genome yaml files (sans the .yaml suffix)
"""
dName = os.path.dirname(__file__)
genomes = [os.path.basename(f)[:-5] for f in glob.glob(os.path.join(dName, "shared/organisms/*.yaml"))]
return genomes
def mainArguments(defaults, workingDir=False, createIndices=False, preprocessing=False):
"""
Return a parser with the general and required args. This will include EITHER
a -d option OR -i and -o, depending on the workingDir setting
defaults is a dictionary of default values
A number of standard arguments are eliminated in the createIndices workflow.
"""
# Set up some defaults for the sake of readthedocs
if 'smtpServer' not in defaults:
defaults['smtpServer'] = None
if 'smtpPort' not in defaults:
defaults['smtpPort'] = 0
if 'onlySSL' not in defaults:
defaults['onlySSL'] = False
if 'emailSender' not in defaults:
defaults['emailSender'] = None
parser = argparse.ArgumentParser(add_help=False)
if not createIndices and not preprocessing:
genomes = ListGenomes()
parser.add_argument("genome", metavar="GENOME", help="Genome acronym of the target organism. Either a yaml file or one of: {}".format(", ".join(genomes)))
required = parser.add_argument_group('Required Arguments')
if workingDir:
required.add_argument("-d", "--working-dir",
dest="workingdir",
help="working directory is output directory and must contain DNA-mapping pipeline output files",
required=True)
else:
if not createIndices:
required.add_argument("-i", "--input-dir",
dest="indir",
required=True,
help="input directory containing the FASTQ files, either paired-end OR single-end data")
required.add_argument("-o", "--output-dir",
dest="outdir",
required=True,
help="output directory")
general = parser.add_argument_group('General Arguments')
general.add_argument("-h", "--help",
action="help",
help="show this help message and exit")
general.add_argument("-v", "--verbose",
dest="verbose",
action="store_true",
help="verbose output (default: '%(default)s')",
default=defaults["verbose"])
if not workingDir and not createIndices:
general.add_argument("--ext",
help="Suffix used by input fastq files (default: '%(default)s').",
default=defaults["ext"])
general.add_argument("--reads",
nargs=2,
help="Suffix used to denote reads 1 and 2 for paired-end data. This should typically be either '_1' '_2' or '_R1' '_R2' (default: '%(default)s). "
"Note that you should NOT separate the values by a comma (use a space) or enclose them in brackets.",
default=defaults["reads"])
general.add_argument("-c", "--configFile",
help="configuration file: config.yaml (default: '%(default)s')",
default=defaults["configFile"])
general.add_argument("--clusterConfigFile",
help="configuration file for cluster usage. In absence, the default options "
"specified in defaults.yaml and workflows/[workflow]/cluster.yaml would be selected (default: '%(default)s')",
default=defaults["clusterConfigFile"])
general.add_argument("-j", "--jobs",
dest="maxJobs",
metavar="INT",
help="maximum number of concurrently submitted Slurm jobs / cores if workflow is run locally (default: '%(default)s')",
type=int, default=defaults["maxJobs"])
general.add_argument("--local",
dest="local",
action="store_true",
default=False,
help="run workflow locally; default: jobs are submitted to Slurm queue (default: '%(default)s')")
general.add_argument("--keepTemp",
action="store_true",
help="Prevent snakemake from removing files marked as being temporary (typically intermediate files that are rarely needed by end users). This is mostly useful for debugging problems.")
general.add_argument("--snakemakeOptions",
action="append",
help="Snakemake options to be passed directly to snakemake, e.g. use --snakemakeOptions='--dryrun --rerun-incomplete --unlock --forceall'. WARNING! ONLY EXPERT USERS SHOULD CHANGE THIS! THE DEFAULT VALUE WILL BE APPENDED RATHER THAN OVERWRITTEN! (default: '%(default)s')",
default=[defaults["snakemakeOptions"]])
general.add_argument("--DAG",
dest="createDAG",
action="store_true",
help="If specified, a file ending in _pipeline.pdf is produced in the output directory that shows the rules used and their relationship to each other.")
general.add_argument("--version",
action="version",
version="%(prog)s {}".format(__version__))
emailArgs = parser.add_argument_group('Email Arguments')
emailArgs.add_argument("--emailAddress",
help="If specified, send an email upon completion to the given email address")
emailArgs.add_argument("--smtpServer",
default=defaults["smtpServer"],
help="If specified, the email server to use.")
emailArgs.add_argument("--smtpPort",
type=int,
default=defaults["smtpPort"],
help="The port on the SMTP server to connect to. A value of 0 specifies the default port.")
emailArgs.add_argument("--onlySSL",
action="store_true",
default=defaults["onlySSL"],
help="The SMTP server requires an SSL connection from the beginning.")
emailArgs.add_argument("--emailSender",
default=defaults["emailSender"],
help="The address of the email sender. If not specified, it will be the address indicated by `--emailAddress`")
emailArgs.add_argument("--smtpUsername",
help="If your SMTP server requires authentication, this is the username to use.")
emailArgs.add_argument("--smtpPassword",
help="If your SMTP server requires authentication, this is the password to use.")
return parser
def snpArguments(defaults):
"""
Arguments related to allele-specific pipelines
"""
parser = argparse.ArgumentParser(add_help=False)
snpargs = parser.add_argument_group('Allele-specific mapping arguments')
snpargs.add_argument("--VCFfile",
default='',
help="VCF file to create N-masked genomes (default: 'None')")
snpargs.add_argument("--strains",
default='',
help="Name or ID of SNP strains separated by comma (default: 'None')")
snpargs.add_argument("--SNPfile",
default='',
help="File containing SNP locations (default: 'None')")
snpargs.add_argument("--NMaskedIndex",
default='',
help="N-masked index of the reference genome (default: 'None')")
return parser
# DNA-mapping options added
def commonOptions(grp, defaults, bw=True, plots=True, preprocessing=False):
"""
Common options found in many workflows
grp is an argument group that's simply appended to
"""
if not preprocessing:
grp.add_argument("--downsample",
dest="downsample",
metavar="INT",
help="Downsample the given number of reads randomly from of each FASTQ file (default: '%(default)s')",
type=int,
default=defaults["downsample"])
grp.add_argument("--trim",
dest="trim",
action="store_true",
help="Activate fastq read trimming. If activated, Illumina adaptors are trimmed by default. "
"Additional parameters can be specified under --trimmerOptions. (default: '%(default)s')",
default=defaults["trim"])
grp.add_argument("--trimmer",
dest="trimmer",
choices=['cutadapt', 'trimgalore', 'fastp'],
help="Trimming program to use: Cutadapt, TrimGalore, or fastp. Note that if you change this you may "
"need to change --trimmerOptions to match! (default: '%(default)s')",
default=defaults["trimmer"])
grp.add_argument("--trimmerOptions",
dest="trimmerOptions",
help="Additional option string for trimming program of choice. (default: '%(default)s')",
default=defaults["trimmerOptions"])
grp.add_argument("--fastqc",
dest="fastqc",
action="store_true",
help="Run FastQC read quality control (default: '%(default)s')",
default=defaults["fastqc"])
grp.add_argument("--bcExtract",
dest="UMIBarcode",
action="store_true",
help="To extract umi barcode from fastq file via UMI-tools and add it to the read name "
"(default: '%(default)s')",
default=defaults["UMIBarcode"])
grp.add_argument("--bcPattern",
help="The pattern to be considered for the barcode. 'N' = UMI position (required) 'C' = barcode position (optional) "
"(default: '%(default)s')",
default=defaults["bcPattern"])
if not preprocessing:
grp.add_argument("--UMIDedup",
action="store_true",
help="Deduplicate bam file based on UMIs via `umi_tools dedup` that are present in the read name. "
"(default: '%(default)s')",
default=defaults["UMIDedup"])
grp.add_argument("--UMIDedupSep",
help="umi separation character "
"that will be passed to umi_tools."
"(default: '%(default)s')",
default=defaults["UMIDedupSep"])
grp.add_argument("--UMIDedupOpts",
help="Additional options that will be passed to umi_tools."
"(default: '%(default)s')",
default=defaults["UMIDedupOpts"])
if bw and not preprocessing:
grp.add_argument("--bwBinSize",
dest="bwBinSize",
help="Bin size of output files in bigWig format (default: '%(default)s')",
type=int,
default=defaults["bwBinSize"])
if plots and not preprocessing:
grp.add_argument("--plotFormat",
choices=['png', 'pdf', 'None'],
metavar="STR",
type=str,
help="Format of the output plots from deepTools. Select 'none' for no plots (default: '%(default)s')",
default=defaults["plotFormat"])
|
maxplanck-ie/snakepipes
|
snakePipes/parserCommon.py
|
parserCommon.py
|
py
| 12,233 |
python
|
en
|
code
| 355 |
github-code
|
6
|
29579806350
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 7 12:41:01 2018
@author: Akitaka
"""
# 1:ライブラリのインポート--------------------------------
import numpy as np #numpyという行列などを扱うライブラリを利用
import pandas as pd #pandasというデータ分析ライブラリを利用
import matplotlib.pyplot as plt #プロット用のライブラリを利用
from sklearn import cross_validation, preprocessing, decomposition, manifold #機械学習用のライブラリを利用
from sklearn import datasets #使用するデータ
# 2:moon型のデータを読み込む--------------------------------
X,Y = datasets.make_moons(n_samples=200, noise=0.05, random_state=0)
# 3:データの整形-------------------------------------------------------
sc=preprocessing.StandardScaler()
sc.fit(X)
X_norm=sc.transform(X)
# 4:Isomapを実施-------------------------------
isomap = manifold.Isomap(n_neighbors=10, n_components=2)
X_isomap = isomap.fit_transform(X)
# 解説5:LLEを実施-------------------------------
lle = manifold.LocallyLinearEmbedding(n_neighbors=10, n_components=2)
X_lle = lle.fit_transform(X)
# 6: 結果をプロットする-----------------------------
#%matplotlib inline
plt.figure(figsize=(10,10))
plt.subplot(3, 1, 1)
plt.scatter(X[:,0],X[:,1], c=Y)
plt.xlabel('x')
plt.ylabel('y')
plt.subplot(3, 1, 2)
plt.scatter(X_isomap[:,0],X_isomap[:,1], c=Y)
plt.xlabel('IM-1')
plt.ylabel('IM-2')
plt.subplot(3, 1, 3)
plt.scatter(X_lle[:,0],X_lle[:,1], c=Y)
plt.xlabel('LLE-1')
plt.ylabel('LLE-2')
plt.show
|
nakanishi-akitaka/python2018_backup
|
1207/ml25.py
|
ml25.py
|
py
| 1,632 |
python
|
ja
|
code
| 5 |
github-code
|
6
|
27857361755
|
# Nombre: Diccionario.py
# Ovjetivo: Muestra el funcuinamiento de los diccionario
# Autor: Rafael Ochoa
# Fecha: 02/07/2019
lista = []
materias = {"Algoritmos": "100",
"Inteligencia Artificial": "69",
"Base de Datos": "100"}
lista.append(materias)
print(lista)
|
Rafa8a/Automatas2
|
Diccionario.py
|
Diccionario.py
|
py
| 287 |
python
|
es
|
code
| 0 |
github-code
|
6
|
21353904775
|
from functools import lru_cache
class Solution:
def ladderLength(self, beginWord: str, endWord: str, wordList: [str]) -> int:
wordList.append(beginWord)
wordList = list(set(wordList))
s_len = len(beginWord)
n = len(wordList)
wd_ids = [i for i in range(n)]
wd2id = dict(zip(wordList, wd_ids))
if not endWord in wordList:
return 0
aj_mat = [[0]*n for _ in range(n)]
# 判断图中节点的连通性
for i, wd in enumerate(wordList):
wd = list(wd)
for j in range(s_len):
old_c = wd[j]
for c in range(97, 123):
c = chr(c)
if c == old_c:
continue
wd[j] = c
new_s = ''.join(wd)
if new_s in wd2id:
aj_mat[i][wd2id[new_s]] = 1
wd[j] = old_c
# BFS
b_idx = wordList.index(beginWord)
que = [b_idx]
been = [0]*n
been[b_idx] = 1
while len(que) != 0:
# print(been)
curr = que[0]
del que[0]
if wordList[curr] == endWord:
break
for i in range(n):
if aj_mat[curr][i] and not been[i]:
been[i] = been[curr] + 1
que.append(i)
e_idx = wordList.index(endWord)
return been[e_idx]
s = Solution()
r = s.ladderLength(
'hit', 'cog', ["hot","dot","dog","lot","log","cog"]
)
print(r)
|
Alex-Beng/ojs
|
FuckLeetcode/127. 单词接龙.py
|
127. 单词接龙.py
|
py
| 1,603 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73573190267
|
import pymysql
from dbutils.pooled_db import PooledDB
class MysqlPool:
config = {
'creator': pymysql,
'host': "127.0.0.1",
'port': 3306,
'user': "tron",
'password': "123456",
'db': "vecrv_sun_airdrop_claimed",
'charset': 'utf8',
'maxconnections': 70,
'cursorclass': pymysql.cursors.DictCursor
}
pool = PooledDB(**config)
def __enter__(self):
self.conn = MysqlPool.pool.connection()
self.cursor = self.conn.cursor()
return self
def __exit__(self, type, value, trace):
if type == None or type == 0:
self.conn.commit()
else:
print(f"mysql exec failed: \"{self.cursor._last_executed}\"\n"
f"{type.__name__}{value}\n"
f"{trace}")
self.conn.rollback()
self.cursor.close()
self.conn.close()
def db_conn(func):
def wrapper(*args, **kw):
with MysqlPool() as db:
result = func(db, *args, **kw)
return result
return wrapper
class Mysql:
@staticmethod
@db_conn
def getAll(db, sql, param=None):
"""
@summary: 执行查询,并取出所有结果集
@param sql:查询SQL,如果有查询条件,请只指定条件列表,并将条件值使用参数[param]传递进来
@param param: 可选参数,条件列表值(元组/列表)
@return: result list(字典对象)/boolean 查询到的结果集
"""
if param is None:
count = db.cursor.execute(sql)
else:
count = db.cursor.execute(sql, param)
if count>0:
result = db.cursor.fetchall()
else:
result = False
return result
@staticmethod
@db_conn
def getOne(db, sql, param=None):
"""
@summary: 执行查询,并取出第一条
@param sql:查询SQL,如果有查询条件,请只指定条件列表,并将条件值使用参数[param]传递进来
@param param: 可选参数,条件列表值(元组/列表)
@return: result list/boolean 查询到的结果集
"""
if param is None:
count = db.cursor.execute(sql)
else:
count = db.cursor.execute(sql, param)
if count>0:
result = db.cursor.fetchone()
else:
result = False
return result
@staticmethod
@db_conn
def getMany(db, sql, num, param=None):
"""
@summary: 执行查询,并取出num条结果
@param sql:查询SQL,如果有查询条件,请只指定条件列表,并将条件值使用参数[param]传递进来
@param num:取得的结果条数
@param param: 可选参数,条件列表值(元组/列表)
@return: result list/boolean 查询到的结果集
"""
if param is None:
count = db.cursor.execute(sql)
else:
count = db.cursor.execute(sql, param)
if count>0:
result = db.cursor.fetchmany(num)
else:
result = False
return result
@staticmethod
@db_conn
def insertOne(db, sql, value):
"""
@summary: 向数据表插入一条记录
@param sql:要插入的SQL格式
@param value:要插入的记录数据tuple/list
@return: insertId 受影响的行数
"""
db.cursor.execute(sql, value)
return Mysql.__getInsertId(db)
@staticmethod
@db_conn
def insertMany(db, sql, values):
"""
@summary: 向数据表插入多条记录
@param sql:要插入的SQL格式
@param values:要插入的记录数据tuple(tuple)/list[list]
@return: count 受影响的行数
"""
count = db.cursor.executemany(sql,values)
return count
@staticmethod
def __getInsertId(db):
"""
获取当前连接最后一次插入操作生成的id,如果没有则为0
"""
db.cursor.execute("SELECT @@IDENTITY AS id")
result = db.cursor.fetchall()
return result[0]['id']
@staticmethod
def __query(db, sql, param=None):
if param is None:
count = db.cursor.execute(sql)
else:
count = db.cursor.execute(sql, param)
return count
@staticmethod
@db_conn
def update(db, sql, param=None):
"""
@summary: 更新数据表记录
@param sql: SQL格式及条件,使用(%s,%s)
@param param: 要更新的 值 tuple/list
@return: count 受影响的行数
"""
return Mysql.__query(db, sql, param)
@staticmethod
@db_conn
def delete(db, sql, param=None):
"""
@summary: 删除数据表记录
@param sql: SQL格式及条件,使用(%s,%s)
@param param: 要删除的条件 值 tuple/list
@return: count 受影响的行数
"""
return Mysql.__query(db, sql, param)
|
dpneko/pyutil
|
mysql_client.py
|
mysql_client.py
|
py
| 5,065 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
36562104917
|
import sys
from scipy.sparse import csr_matrix
import numpy
import re
from collections import Counter
number = '[0-9]+'
isNumber = re.compile(number)
FREQ_THRESH = 5
def normalize_word(word):
if isNumber.search(word):
return '---$$$---'
else:
return word
def trim_vocab(vocab):
new_index = 0
for word, freq in vocab.items():
if freq <= FREQ_THRESH:
del vocab[word]
else:
vocab[word] = new_index
new_index += 1
return vocab
def get_vocab(fileName, lang1Vocab=Counter(), lang2Vocab=Counter()):
numLines = 0
for line in open(fileName, 'r'):
numLines += 1
lang1, lang2 = line.split('|||')
lang1 = unicode(lang1.strip().lower(), 'utf-8')
lang2 = unicode(lang2.strip().lower(), 'utf-8')
for word in lang1.split():
word = normalize_word(word)
lang1Vocab[word] += 1
for word in lang2.split():
word = normalize_word(word)
lang2Vocab[word] += 1
#trim the vocab by frequency and replace frequency by unique number
return numLines, trim_vocab(lang1Vocab), trim_vocab(lang2Vocab)
def convert_dict_to_csr_matrix(matrixDict, sizeData, langVocab):
row = numpy.zeros(len(matrixDict), dtype=int)
col = numpy.zeros(len(matrixDict), dtype=int)
values = numpy.zeros(len(matrixDict), dtype=int)
index = 0
for (r, c), val in matrixDict.iteritems():
row[index] = r
col[index] = c
values[index] = val
index += 1
matrixLang = csr_matrix((values,(row,col)), shape=(sizeData,len(langVocab)))
return matrixLang
def get_parallel_cooccurence_arrays(fileName, lang1Vocab, lang2Vocab, sizeData):
matrixDict1 = Counter()
numLine = 0
for line in open(fileName, 'r'):
lang1, lang2 = line.split('|||')
lang1 = unicode(lang1.strip().lower(), 'utf-8')
lang2 = unicode(lang2.strip().lower(), 'utf-8')
for word in lang1.split():
word = normalize_word(word)
if word in lang1Vocab:
# we want count of the words on the input
matrixDict1[(numLine,lang1Vocab[word])] += 1
numLine += 1
matrixLang1 = convert_dict_to_csr_matrix(matrixDict1, sizeData, lang1Vocab)
del matrixDict1
matrixDict2 = Counter()
numLine = 0
for line in open(fileName, 'r'):
lang1, lang2 = line.split('|||')
lang1 = unicode(lang1.strip().lower(), 'utf-8')
lang2 = unicode(lang2.strip().lower(), 'utf-8')
for word in lang2.split():
word = normalize_word(word)
if word in lang2Vocab:
# we want probability of occurrence on the output
matrixDict2[(numLine,lang2Vocab[word])] = 1
numLine += 1
matrixLang2 = convert_dict_to_csr_matrix(matrixDict2, sizeData, lang2Vocab)
del matrixDict2
return (matrixLang1, matrixLang2)
def get_datasets(trFile, valFile):
sizeTrData, lang1Vocab, lang2Vocab = get_vocab(trFile)
sizeValData, lang1Vocab, lang2Vocab = get_vocab(valFile, lang1Vocab, lang2Vocab)
sys.stderr.write("\nFiles read...\n")
sys.stderr.write("Total vocab sizes: lang1 = {0}, lang2 = {1}\n".format(len(lang1Vocab), len(lang2Vocab)))
sys.stderr.write("Size of files: Train = {0}, Val = {1}\n".format(sizeTrData, sizeValData))
datasets = []
datasets.append(get_parallel_cooccurence_arrays(trFile, lang1Vocab, lang2Vocab, sizeTrData))
datasets.append(get_parallel_cooccurence_arrays(valFile, lang1Vocab, lang2Vocab, sizeValData))
return datasets
|
mfaruqui/vector-semantics
|
src/nn/process_parallel_data.py
|
process_parallel_data.py
|
py
| 3,788 |
python
|
en
|
code
| 5 |
github-code
|
6
|
34181193922
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 8 14:11:28 2020
@author: Kollarlab
"""
from Instruments.HDAWG import HDAWG
from Instruments.SGS import RFgen
import numpy
import time
import sys
import scipy
import pylab
import scipy.optimize
from mplcursors import cursor as datacursor
import threading
from userfuncs import freeze
import userfuncs as uf
from Acqiris_development.Acqiris import Acqiris
hardwareAddress = "PXI23::0::0::INSTR"
IVIbinPath = "C:\\Program Files\\IVI Foundation\\IVI\\Bin\\"
if not IVIbinPath in sys.path:
sys.path.append(IVIbinPath)
######################
#measurement parameters
measDur = 5e-6
numFreqs = 20
freqs = numpy.linspace(4e9,10e9, numFreqs)
freqs = numpy.flipud(freqs)
numPoints = 15
#phases = numpy.linspace(0, numpy.pi,numPoints)
phases = numpy.linspace(0, 360,numPoints)
#setup the digitizer
#card = Acqiris(hardwareAddress)
card.triggerSlope = 'Rising'
card.triggerLevel = 0.1
card.averages = 1 #on-board averages
card.segments = 1
card.triggerDelay = 0
card.activeChannels = [1,2]
card.verbose = False
card.sampleRate = 2e9
card.clockSource = 'External'
card.channelRange = 0.5
card.samples = numpy.ceil(measDur*card.sampleRate)
card.SetParams() #warning. this may round the number of smaples to multiple of 1024
##set up the HDAWG.
##in this case, we just need channels 3,4 for our fake clock
#### Connect to HDAWG and initialize it
#hdawg = HDAWG('dev8163') #HDAWG device name
##hdawg.AWGs[0].samplerate = '2.4GHz'
##hdawg.channelgrouping = '1x4'
##hdawg.Channels[0].configureChannel(amp=1.0,marker_out='Marker', hold='True')
##hdawg.Channels[1].configureChannel(marker_out='Trigger', hold='True')
##hdawg.AWGs[0].Triggers[0].configureTrigger(slope='rising',channel='Trigger in 1')
###hdawg.daq.setInt('/dev8163/awgs/0/outputs/0/hold',1)
###hdawg.daq.setInt('/dev8163/awgs/0/outputs/1/hold',1)
#hdawg.OSCs[1].freq = 10e6
#hdawg.Channels[2].analog_outs = [0.5,0]
#hdawg.Channels[3].analog_outs = [0,0.5]
#hdawg.Channels[2].configureChannel(amp=1.0)
#hdawg.Channels[3].configureChannel(amp=1.0)
#lo generator
#(upper, 110738)
#freq = 8 GHz
#level = 12 dBm
#rf on
#mod off
#ext ref on (for good phase), or ext ref off for random phase
logen = RFgen('TCPIP0::rssgs100a110738::inst0::INSTR')
logen.set_Freq(8)
logen.set_Amp(12)
logen.mod_Off()
#logen.set_Internal_Reference()
logen.set_External_Reference()
logen.power_On()
#rf generator
#(lower, 110739)
#freq = 8 GHz
#level = 0 dBm
#rf on
#mod off
#ext ref on
rfgen = RFgen('TCPIP0::rssgs100a110739::inst0::INSTR')
rfgen.set_Freq(8)
rfgen.set_Amp(-4)
rfgen.mod_Off()
rfgen.set_External_Reference()
rfgen.power_On()
def plot_fig1():
fig = pylab.figure(1)
pylab.clf()
ax = pylab.subplot(1,1,1)
pylab.plot(Is, Qs, linestyle = '', marker = 'o', markersize = 5, color = 'mediumblue')
pylab.plot(xx, yy, color = 'firebrick')
# Move left y-axis and bottim x-axis to centre, passing through (0,0)
ax.spines['left'].set_position('center')
ax.spines['bottom'].set_position('center')
# Eliminate upper and right axes
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
# Show ticks in the left and lower axes only
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_aspect('equal')
titleStr = 'Mixer performance at ' + str(numpy.round(freq_GHz, 3)) + ' GHz'
pylab.title(titleStr)
# pylab.show(block = False)
datacursor()
fig.canvas.draw()
fig.canvas.flush_events()
return
def plot_main_fig(fig):
fig.clf()
ax = pylab.subplot(1,1,1)
pylab.plot(Is, Qs, linestyle = '', marker = 'o', markersize = 5, color = 'mediumblue')
pylab.plot(xx, yy, color = 'firebrick')
# Move left y-axis and bottim x-axis to centre, passing through (0,0)
ax.spines['left'].set_position('center')
ax.spines['bottom'].set_position('center')
# Eliminate upper and right axes
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
# Show ticks in the left and lower axes only
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_aspect('equal')
titleStr = 'Mixer performance at ' + str(numpy.round(freq_GHz, 3)) + ' GHz'
pylab.title(titleStr)
# pylab.show(block = False)
datacursor()
pylab.title('thread test figure')
fig.canvas.draw()
fig.canvas.flush_events()
return
def thread_test():
print(1)
time.sleep(1)
print(2)
time.sleep(1)
print(3)
time.sleep(1)
return
def thread_fig(fig):
ax= pylab.subplot(1,1,1)
xs = numpy.linspace(-5,5,50)
ys = xs**2
pylab.plot(xs, ys)
datacursor()
fig.canvas.draw()
fig.canvas.flush_events()
return
stigVec = numpy.zeros(len(freqs))
phiVec = numpy.zeros(len(freqs))
for find in range(0, len(freqs)):
freq = freqs[find]
freq_GHz = freq/1e9
rfgen.set_Freq(freq_GHz)
logen.set_Freq(freq_GHz)
time.sleep(0.05)
Idata = numpy.zeros(card.samples)
Qdata = numpy.zeros(card.samples)
Amps = numpy.zeros(numPoints)
Angles = numpy.zeros(numPoints)
Is = numpy.zeros(numPoints)
Qs = numpy.zeros(numPoints)
for tind in range(0, numPoints):
rfgen.set_Phase(phases[tind])
time.sleep(0.05)
card.ArmAndWait()
Idata, Qdata = card.ReadAllData()
Iav = numpy.mean(Idata)
Qav = numpy.mean(Qdata)
Amp = numpy.sqrt(Iav**2 + Qav**2)
Angle = numpy.arctan2(Iav, Qav)*180/numpy.pi
Amps[tind] = Amp
Angles[tind] = Angle
Is[tind] = Iav
Qs[tind] = Qav
mixerAxes, mixerCenter, mixerPhi = uf.fitEllipse(Is,Qs, verbose = True)
xx, yy = uf.make_elipse(mixerAxes, mixerCenter, mixerPhi, 150)
stig = (mixerAxes[1]-mixerAxes[0])/numpy.mean(mixerAxes)
stigVec[find] = stig
phiVec[find] = mixerPhi
# fig = pylab.figure(1)
# pylab.clf()
# ax = pylab.subplot(1,1,1)
# pylab.plot(Is, Qs, linestyle = '', marker = 'o', markersize = 5, color = 'mediumblue')
# pylab.plot(xx, yy, color = 'firebrick')
#
#
# # Move left y-axis and bottim x-axis to centre, passing through (0,0)
# ax.spines['left'].set_position('center')
# ax.spines['bottom'].set_position('center')
#
# # Eliminate upper and right axes
# ax.spines['right'].set_color('none')
# ax.spines['top'].set_color('none')
#
# # Show ticks in the left and lower axes only
# ax.xaxis.set_ticks_position('bottom')
# ax.yaxis.set_ticks_position('left')
#
#
# ax.set_aspect('equal')
# titleStr = 'Mixer performance at ' + str(numpy.round(freq_GHz, 3)) + ' GHz'
# pylab.title(titleStr)
## pylab.show(block = False)
#
# datacursor()
#
# fig.canvas.draw()
# fig.canvas.flush_events()
plot_fig1()
# thr = threading.Thread(target=thread_test)
# if numpy.mod(find,4) == 0:
if find == 0:
fig8 = pylab.figure(8)
ax = pylab.subplot(1,1,1)
pylab.plot([1,2], [3,4])
pylab.show()
# thr = threading.Thread(target=thread_fig, kwargs = {'fig': fig8})
thr = threading.Thread(target=plot_main_fig, kwargs = {'fig': fig8})
thr.start()
stigVec_dB = numpy.log10(stigVec+1)*10
fig2 = pylab.figure(2)
pylab.clf()
ax = pylab.subplot(2,2,1)
pylab.plot(freqs/1e9, stigVec, 'b.')
pylab.xlabel('Frequency (GHz)')
pylab.ylabel('Astigmatism (linear)')
pylab.title('Linear Astigmatism')
ax = pylab.subplot(2,2,2)
pylab.plot(freqs/1e9, stigVec_dB, 'r.')
pylab.xlabel('Frequency (GHz)')
pylab.ylabel('Astigmatism (dB)')
pylab.title('Log Astigmatism')
ax = pylab.subplot(2,2,3)
pylab.plot(freqs/1e9, 180*phiVec/numpy.pi, 'b.')
pylab.xlabel('Frequency (GHz)')
pylab.ylabel('Astigmatism Angle (degrees)')
pylab.title('Absolute Astigmatism Angle')
ax = pylab.subplot(2,2,4)
pylab.plot(freqs/1e9, 180*phiVec/numpy.pi - 45, 'r.')
pylab.xlabel('Frequency (GHz)')
pylab.ylabel('Astigmatism Angle (degrees) - 45')
pylab.title('IQ Angle Imbalance')
pylab.suptitle('Mixer Calibration')
pylab.tight_layout()
pylab.show()
rfgen.power_Off()
logen.power_Off()
|
MRitter95/Kollar-Lab
|
Old_scripts_delete_20220804/Control/DataFigureExample.py
|
DataFigureExample.py
|
py
| 8,375 |
python
|
en
|
code
| 2 |
github-code
|
6
|
24532771859
|
import os
raw_img_src = "../../Data/Input_Data/raw_img_data/"
section_ids = ["r1","r2","r3","r4"]
img_type = "xpl"
for i in range(10):
for j in range(10):
if j != 0:
print(j)
break
break
|
JonasLewe/thesis_codebase
|
Code/testing/merged_img_generator.py
|
merged_img_generator.py
|
py
| 245 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19855461730
|
import tkinter as tk
class TabFrameTemplate(tk.Frame):
def __init__(self,parent):
self.parent = parent
super().__init__(self.parent)
self["width"] = 1000
self["height"] = 500
self["bg"] = "green"
self.canvas = tk.Canvas(self,bg="#F3BFB3",width=800,height=500)
self.view_port = tk.Frame(self.canvas)
self.vsb = tk.Scrollbar(self, orient="vertical", command=self.canvas.yview)
self.canvas.configure(yscrollcommand=self.vsb.set)
self.vsb.grid(row=0, column=1, sticky="ns")
self.canvas.grid(row=0, column=0,ipadx=10,ipady=10)
self.canvas.grid_columnconfigure(0, weight=1)
self.canvas_window = self.canvas.create_window((0, 0),
window=self.view_port,
anchor='nw',
tags="self.view_port")
self.view_port.bind("<Configure>", self.onFrameConfigure)
self.canvas.bind("<Configure>", self.onCanvasConfigure)
self.parent.bind("<MouseWheel>", self._on_mousewheel)
self.onFrameConfigure(None)
self.view_port.grid_columnconfigure(0, weight=1)
def _on_mousewheel(self,event):
try:
self.canvas.yview_scroll(int(-1*(event.delta/120)), "units")
except:
pass
def onFrameConfigure(self, event):
self.canvas.configure(scrollregion=self.canvas.bbox("all"))
def onCanvasConfigure(self, event):
canvas_width = event.width
self.canvas.itemconfig(self.canvas_window, width=canvas_width)
|
wrrayos/InventoryCustodianSlip
|
templates/tabFrameTemplate.py
|
tabFrameTemplate.py
|
py
| 1,656 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39045297717
|
import copy
import coordinates as cor
import ctypes
import os
import Email_Sender_Machine as esm
import PyGameSource as pgs
import TreeCalc as tc
import pygame, sys
import math
os.system('cls')
PI = math.pi
pygame.init()
os.system('cls')
windowSize = pygame.display.get_desktop_sizes()
print(windowSize)
window = pygame.display.set_mode(*windowSize)
boardW = 700
boardH = 600
window.fill((255,255,255))
board = pgs.game_board(window,(windowSize[0][0]-boardW)/2,(windowSize[0][1]-boardH)/2,boardW,boardH,7,6)
board.set_color(0,23,0)
board.draw_board(window)
board.circle(window)
RED = (255,0,0)
BLUE = (0,0,255)
col_continue = 1
color_bead = RED
def show_game():
while True:
board.col_transparency(window)
if board.selected_col != None and col_continue%30==0:
board.beads(window,color_bead)
board.selected_col = None
color_bead = BLUE if color_bead is RED else RED
pgs.start = False
if col_continue>=1*30: break
col_continue+=1
col_continue += 1 if pgs.start else 0
print(col_continue)
for event in pygame.event.get():
if event.type == pygame.QUIT or event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
pygame.quit()
sys.exit()
pygame.display.update()
def start_game():
global board, RED, BLUE, col_continue , color_bead , window
game_board = [['-' for i in range(7)]for j in range(6)]
game_board_copy=copy.deepcopy(game_board)
column=None
game_finished = False
rows=[5,5,5,5,5,5,5]
process = open('Process.txt','w')
while not game_finished:
tree=tc.Node(game_board_copy)
tc.MAX(tree,tc.turn,rows)
#for shift in range(turn,stop_turn):
if tc.turn%2==0:
maximum=-1000000
for i,index in zip(tree.leaves,range(len(tree.leaves))):
if i==None:
continue
if i.value>maximum:
maximum=i.value
index_max=index
process = open('Process.txt','a',buffering=1)
process.write('index_max '+str(index_max)+'\n')
process.write(str(tree.leaves[index_max].row)+ ' ' +str(tree.leaves[index_max].col)+'\n')
for i in tree.leaves[index_max].rows:
process.write(str(i)+' ')
process.write('\n')
for i in tree.leaves[index_max].map:
for j in i:
process.write(str(j)+' ')
process.write('\n')
process.write('\n')
print()
print(index_max)
print(tree.row,tree.col)
print(*tree.leaves[index_max].rows)
tc.print2d(tree.leaves[index_max].map)
if tree.leaves[index_max].status==1:
print("you lose")
process.close()
esm.send_email('Process.txt','Process.txt',1)
game_finished = True
break
print()
process.write('\n')
tree.printTree_bfs(process)
print()
process.write('\n')
tree.leaves[index_max].printTree_bfs(process)
process.write('\n'+'#'*165+'\n')
print()
board.selected_col = index_max
board.beads(window,color_bead)
color_bead = BLUE
tree=tree.leaves[index_max]
else:
cor.gotoxy(0,0)
print('select a column: ',end='')
while True:
board.col_transparency(window)
if board.selected_col != None and col_continue%30==0:
board.beads(window,color_bead)
color_bead = BLUE if color_bead is RED else RED
pgs.start = False
if col_continue>=1*30: break
col_continue+=1
col_continue += 1 if pgs.start else 0
pgs.exit_from_game()
pygame.display.update()
if tree.leaves[board.selected_col].status == -1:
print('you win')
process.close()
esm.send_email('Process.txt','Process.txt',0)
game_finished = True
break
tree=tree.leaves[board.selected_col]
board.selected_col = None
game_board_copy=copy.deepcopy(tree.map)
rows=tree.rows
tc.turn+=1
tc.stop_turn=5+tc.turn
start_game()
#show_game()
|
Matin-Modarresi/connect-four
|
connect four/connect_four.py
|
connect_four.py
|
py
| 3,851 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15896435397
|
"""
RED NEURONAL CONVOLUCIONAL,
Dataset con fotos de Humanos y Caballos
"""
import tensorflow as tf
from keras.preprocessing.image import ImageDataGenerator # Genera las imagenes
# Preprocesado
# Rescala las imagenes del Train
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
# Rescala las imagenes del test
test_datagen = ImageDataGenerator(rescale = 1./255)
# Creando el DF Training SET
training_set = train_datagen.flow_from_directory('C:/Users/USUARIO/Desktop/CursoML/Data/training_set',
target_size = (64, 64),
batch_size = 32,
class_mode = 'binary')
# Creando el DF Test SET
test_set = test_datagen.flow_from_directory('C:/Users/USUARIO/Desktop/CursoML/Data/test_set',
target_size = (64, 64),
batch_size = 10,
class_mode = 'binary')
# Creamos la red RNC, Convolucion --> Pooling --> Flattenin --> Full Connect
RNC = tf.keras.models.Sequential()
# 1º Capa Convolucion2D
RNC.add(tf.keras.layers.Conv2D(filters=32, kernel_size=3, padding="same", activation="relu", input_shape=[64, 64, 3]))
# 2º Capa - Pooling, Simplificamos los problemas y reduce las operaciones
RNC.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2, padding='valid'))
# 3º Capa de Convolucion y Pooling
RNC.add(tf.keras.layers.Conv2D(filters=32, kernel_size=3, padding="same", activation="relu"))
RNC.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2, padding='valid'))
# 4º Capa - Flattening, adapta la estructura de forma vertical en una columna
RNC.add(tf.keras.layers.Flatten())
# Full Connection, añadimos la red neuronal totalmentne conectada
RNC.add(tf.keras.layers.Dense(units=128, activation='relu'))
# Capa de Salida
RNC.add(tf.keras.layers.Dense(units=1, activation='sigmoid')) # Funcion sigmoide
# Compilamos el modelos con el optimizador Adam y entropia cruzada binaria
RNC.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
# Entrenamos el modelo
RNC.fit_generator(training_set,
steps_per_epoch = 40,
epochs = 25,
validation_data = test_set,
validation_steps = 20)
# Observamos que el modelo aprende a identificar entre unas imagenes y otras, para mayor aprendizaje suministrar
# mas imagenes ya que la muestra de testing es pequeña. Se podría utilizar este mismo modelo con varias clasificaciones
# pero tendriamos que cambia la perdida a la hora nuestro modelo por loss = 'CategoricalCrossentropy'
|
karlosmir/ML-Projects
|
ML/RNC01.py
|
RNC01.py
|
py
| 2,915 |
python
|
es
|
code
| 0 |
github-code
|
6
|
4369691360
|
import pandas as pd
import numpy as np
import tensorflow as tf
import tensorflow_text as text
import pickle
import argparse
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import load_model
from sklearn.metrics import cohen_kappa_score
# Load the test data and split data from labels
test_data_file = '../../data/dataset/test_data.xlsx'
test_df = pd.read_excel(test_data_file)
y_test = test_df['domain1_score']
def calc_test_performance_glove(test_df, y_test):
"""
Calculates and prints out the Quadratic Weighted Kappa Score for the model using GloVe
:param test_df: The test data read into a DataFrame
:param y_test: All the essay targets
:return: None
"""
max_len = 275
test_df['essay'] = test_df['essay'].str.lower()
with open('model_glove/tokenizer_glove.pickle', 'rb') as handle:
tokenizer = pickle.load(handle)
sequences = tokenizer.texts_to_sequences(test_df['essay'])
padded_seq = pad_sequences(sequences, maxlen=max_len, padding='post')
model = load_model('model_glove/model_glove.h5')
preds = np.around(model.predict(padded_seq))
kappa_score = cohen_kappa_score(preds, y_test, weights='quadratic')
print(f"Quadratic Kappa Score on Test Data with GloVe: {kappa_score}\n")
def calc_test_performance_bert(test_df, y_test, small=True):
"""
Calculates and prints out the Quadratic Weighted Kappa Score for the model using BERT or small BERT
:param test_df: The test data read into a DataFrame
:param y_test: All the essay targets
:param small: A Boolean to calculate kappa score for either model using BERT or small BERT
:return: None
"""
if small:
model = tf.saved_model.load('model_bert_small')
else:
model = tf.saved_model.load('model_bert')
test_prediction_tensors = tf.nn.relu(model(tf.constant(test_df['essay'])))
preds = []
for values in test_prediction_tensors:
preds.append(values.numpy()[0])
preds = np.asarray(preds)
preds = np.around(preds)
kappa_score = cohen_kappa_score(preds, y_test, weights='quadratic')
print(f"Quadratic Kappa Score on Test Data with BERT: {kappa_score}\n")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-g', '--glove', action='store_true')
parser.add_argument('-b', '--bert', action='store_true')
parser.add_argument('-s', '--small', action='store_true')
config = parser.parse_args()
if not (config.glove or config.bert):
parser.error('No model type requested for getting test performance, add -b/--bert or -g/--glove')
if config.glove:
calc_test_performance_glove(test_df, y_test)
if config.bert:
calc_test_performance_bert(test_df, y_test, config.small)
|
chennychenchen99/AutoScorer
|
models/trained_model_files/calculate_test_performance.py
|
calculate_test_performance.py
|
py
| 2,875 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8273446218
|
from django.http import HttpResponse
from django.core.cache import cache
from custom_cache_page.utils import hash_key
class TestCache:
def test_cache_page(self, request_factory, mock_cached_view):
request = request_factory.get('/bo')
mock_cached_view(request)
cached_response = cache.get(hash_key('prefix:cached_views:0:/bo'))
assert cached_response
assert type(cached_response) == HttpResponse
assert cached_response.content == HttpResponse('hi').content
|
kishan-character/django-custom-cache-page
|
tests/test_cache.py
|
test_cache.py
|
py
| 510 |
python
|
en
|
code
| null |
github-code
|
6
|
32400182580
|
#!/usr/bin/env python
# coding=utf-8
import pylirc
class Buttons:
# 初始化,这里的app需要和调用它的文件名称一致,conf需要和之前实验中irexec地址一致,"/etc/lirc/irexec.conf"
def __init__(self, app, conf):
if not pylirc.init(app, conf, 1):
raise Exception("Unable to init pylirc")
# 阻塞模式关闭
pylirc.blocking(0)
def readbutton(self):
# 按下按键传递对应的config值,如果没有匹配的key则为None
btn = pylirc.nextcode()
if btn:
return btn[0]
else:
return None
|
chronosmaker/RPiRadio
|
Buttons.py
|
Buttons.py
|
py
| 623 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11769609560
|
#Lets check anagrams.
"""
Anagram is a word, phrase or name formed by rearranging the
letters of another word. Like spar from rasp
"""
def anagram(st1:str, st2:str) ->bool:
st_len = len(st1)
tru = []
for f in st1:
if f in st2:
tru.append(True)
else:
tru.append(False)
if all(tru):
return True
else:
return False
while True:
st1 = input("Enter the first string: ")
st2 = input("Enter the second string: ")
if len(st1) != len(st2):
print("Please enter words of same length")
else:
break
if anagram(st1, st2):
print(f"{st1} and {st2} are Anagrams")
else:
print(f"{st1} and {st2} are not Anagrams")
|
Kamalabot/Programmers57Challenges
|
exe24_anagram.py
|
exe24_anagram.py
|
py
| 719 |
python
|
en
|
code
| 1 |
github-code
|
6
|
10220457455
|
from typing import List
from nazurin.models import Illust, Image, Ugoira
from nazurin.utils import Request
from nazurin.utils.decorators import network_retry
from nazurin.utils.exceptions import NazurinError
from nazurin.utils.logging import logger
from .base import BaseAPI
class SyndicationAPI(BaseAPI):
"""Public API from publish.twitter.com"""
@network_retry
async def get_tweet(self, status_id: int):
"""Get a tweet from API."""
logger.info("Fetching tweet {} from syndication API", status_id)
API_URL = "https://cdn.syndication.twimg.com/tweet-result"
params = {
"features": "tfw_tweet_edit_backend:on",
"id": str(status_id),
"lang": "en",
}
async with Request() as request:
async with request.get(API_URL, params=params) as response:
if response.status == 404:
raise NazurinError("Tweet not found or unavailable.")
response.raise_for_status()
tweet = await response.json()
del tweet["__typename"]
return tweet
async def fetch(self, status_id: int) -> Illust:
"""Fetch & return tweet images and information."""
tweet = await self.get_tweet(status_id)
if "video" in tweet:
return await self.get_video(tweet)
imgs = self.get_images(tweet)
caption = self.build_caption(tweet)
return Illust(imgs, caption, tweet)
def get_images(self, tweet) -> List[Image]:
"""Get all images in a tweet."""
if "photos" not in tweet:
raise NazurinError("No photo found.")
photos = tweet["photos"]
imgs = []
for index, photo in enumerate(photos):
imgs.append(BaseAPI.parse_photo(tweet, photo, index))
return imgs
async def get_video(self, tweet) -> Ugoira:
variants = tweet["mediaDetails"][0]["video_info"]["variants"]
return await self.get_best_video(tweet, variants)
|
y-young/nazurin
|
nazurin/sites/twitter/api/syndication.py
|
syndication.py
|
py
| 2,028 |
python
|
en
|
code
| 239 |
github-code
|
6
|
14512503096
|
psw = input('Введите пароль: ') #запрос ввода пароля
msg = 'Ваш пароль состоит только из цифр' # задаем "по-умолчанию" значение сообщения, которое будет выводиться после ввода пароля
psw_len = len(psw) #вычисление длинны пароля
try:
result_1 = 2/psw_len # проверка пустого пароля
result_2 = int(psw) # проверка "не только цифры"
except ZeroDivisionError: # вывод "пустой пароль" в случае ошибки деления на "0"
msg = 'Вы ввели пустой пароль'
except ValueError: # вывод "все норм" в случае ошибки наличия символов
msg = 'Требования к паролю соблюдены'
print(msg) # вывод получившегося сообщения
|
zarubb/ps-pb-psw_vrf
|
app.py
|
app.py
|
py
| 958 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
27022192120
|
import cv2
import numpy as np
kernel = np.ones((5,5),np.uint8)
# Take input from webcam
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
#Guassian blur to reduce noise
frame = cv2.GaussianBlur(frame,(5,5),0)
#bgr to hsv
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
#split hsv
h, s, v = cv2.split(hsv)
#HSV values for upper and lower green
greenLower = np.array([29, 86, 6])
greenUpper = np.array([64, 255, 255])
# Apply thresholding
hthresh = cv2.inRange(np.array(h),np.array([29]),np.array([64]))
sthresh = cv2.inRange(np.array(s),np.array([86]),np.array([255]))
vthresh = cv2.inRange(np.array(v),np.array([6]),np.array([255]))
# AND h s and v
tracking = cv2.bitwise_and(hthresh,cv2.bitwise_and(sthresh,vthresh))
#Gussian blur again
dilation = cv2.dilate(tracking,kernel,iterations = 1)
closing = cv2.morphologyEx(dilation, cv2.MORPH_CLOSE, kernel)
res = cv2.GaussianBlur(closing,(5,5),0)
# Detect circles using HoughCircles
circles = cv2.HoughCircles(res,cv2.HOUGH_GRADIENT,2,120,param1=120,param2=50,minRadius=10,maxRadius=0)
#Draw Circles
if circles is not None:
for i in circles[0,:]:
# If the ball is far, draw it in green
if int(round(i[2])) < 30:
cv2.circle(frame,(i[0],i[1]),i[2],(0,255,0),5)
cv2.circle(frame,(i[0],i[1]),2,(0,255,0),10)
# else draw it in red
elif int(round(i[2])) > 35:
cv2.circle(frame,(i[0],i[1]),i[2],(0,0,255),5)
cv2.circle(frame,(i[0],i[1]),2,(0,0,255),10)
#circles = np.round(circles[0, :]).astype("int")
#X = circles
#print the coordinates of the center
print('x=,y=',i[0],i[1])
#Show the result in frames
cv2.imshow('HueComp',hthresh)
cv2.imshow('SatComp',sthresh)
cv2.imshow('ValComp',vthresh)
cv2.imshow('res',res)
cv2.imshow('tracking',frame)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
|
ashwin876/Ball_Tracking_Python
|
Green_ball_Tracking.py
|
Green_ball_Tracking.py
|
py
| 2,342 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14560619174
|
import os
from pathlib import Path
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from astropy.time import Time
from astropy.coordinates import solar_system_ephemeris # , EarthLocation
from astropy.coordinates import get_body_barycentric
solar_system_ephemeris.set('de432s')
def get_planet_coord(timestamp, planet_list):
"""
指定の時刻と惑星の座標を取得
Return: dict
key: planet name
value: dict(x, y, x)
座標値(km)
"""
def _get_planet_coord_list(timestamp, planet_list):
"""
指定時刻の指定惑星の座標情報インスタンスのリストを取得
"""
# astropyのTimeタイプへ変換
timestamp = Time(timestamp)
# 指定惑星の座標を取得
planet_coord_list = [get_body_barycentric(
_planet, timestamp) for _planet in planet_list]
return planet_coord_list
_planet_coord_list = _get_planet_coord_list(timestamp, planet_list)
dict_planet_coord = {}
for _planet, _coord in zip(planet_list, _planet_coord_list):
# x, y, z[km]
x, y, z = _coord.x, _coord.y, _coord.x
# dict_planet_coord[_planet] = [lon, lat, radius]
dict_planet_coord[_planet] = {'x': x, 'y': y, 'z': z}
return dict_planet_coord
def get_planet_coord_timeseries(timeseries, planet_list):
"""
指定時系列の指定惑星の座標を取得
"""
# 初期化
dict_planet_coord_timeseries = {}
for _planet in planet_list:
dict_planet_coord_timeseries[_planet] = {'x': [], 'y': [], 'z': []}
# 時系列での各惑星の座標を取得
for _timestamp in timeseries:
"""
指定時刻の指定惑星の座標
key: planet name
value: dict(x, y, x)
座標値(km)
"""
dict_planet_coord = get_planet_coord(_timestamp, planet_list)
for _planet in planet_list:
for _key in ['x', 'y', 'z']:
dict_planet_coord_timeseries[_planet][_key].append(
np.array(dict_planet_coord[_planet][_key]))
# Convert list into ndarray
for _planet in planet_list:
for _key in ['x', 'y', 'z']:
dict_planet_coord_timeseries[_planet][_key] = np.array(
dict_planet_coord_timeseries[_planet][_key])
return dict_planet_coord_timeseries
if __name__ == "__main__":
# currend work directory
CWD_PATH = Path(os.path.dirname(__file__))
# 結果出力フォルダ: 存在しない場合は作成する
OUTPUT_PATH = CWD_PATH / 'output'
if not os.path.exists(OUTPUT_PATH):
os.makedirs(OUTPUT_PATH)
# 期間を指定と取得
start, end = '2022-01-01', '2022-08-01'
timeseries = pd.date_range(start, end, freq='D')
delta_t = 24*60*60
# 惑星リスト
planet_list = ['venus', 'earth', 'mars']
# 辞書形式で指定の惑星と時系列情報を取得
dict_planet_coord_timeseries = get_planet_coord_timeseries(timeseries, planet_list)
time_list = np.arange(0, delta_t*len(timeseries), len(timeseries)).reshape(-1, 1)
# 指摘期間の惑星軌道を描画
fig = plt.figure(figsize=(8, 8))
ax = plt.subplot(1, 1, 1)
plt.scatter(0, 0, color='orange', s=200, label='Sun')
for _planet in dict_planet_coord_timeseries.keys():
x = dict_planet_coord_timeseries[_planet]['x']
y = dict_planet_coord_timeseries[_planet]['y']
plt.plot(x, y, label=_planet, linewidth=2)
plt.scatter(x[0], y[0], color='black', s=40) # initial point
plt.scatter(x[-1], y[-1], color='red', s=40) # final point
plt.legend()
plt.grid()
plt.gca().set_aspect('equal') # グラフのアスペクト比を揃える
plt.savefig(OUTPUT_PATH / 'test_planet_orbit.png')
plt.show()
plt.close(fig)
|
caron14/swingby_challenge
|
planet_position.py
|
planet_position.py
|
py
| 3,877 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39270259657
|
import datetime as dt
import re
import time
import requests
import html5lib
from bs4 import BeautifulSoup
import googleapiclient.discovery
import google.auth
def get_calendar_html(year, month):
CALURL = "https://syllabus.naist.jp/schedules/preview_monthly"
text = requests.get(f"{CALURL}/{str(year)}/{str(month)}").text
return text
def construct_data(html_text, year, month):
soup = BeautifulSoup(html_text, "html5lib")
# htmlの書式に則って授業情報の抜き出し
shedule_table = soup.find("table", attrs={"class": "tbl_m_schedule"})
tr_classes = shedule_table.find_all("td", id=re.compile('^\d+-\d+-\d+$'))
tr_class_note_dict = {
c["id"].rstrip("_note"): c.text.strip()
for c
in shedule_table.find_all("td", id=re.compile('^\d+-\d+-\d+_note$'))
}
# 開始時間のタプル
period_starttime = (
dt.time(9, 20),
dt.time(11, 0),
dt.time(13, 30),
dt.time(15, 10),
dt.time(16, 50),
dt.time(18, 30)
)
# 抜き出したデータを構造化
data = []
for c in tr_classes:
event_id = c["id"].split("-")
lines = c.get_text("[!tag]").strip().split("[!tag]") # 区切り文字列を"[!tag]"にして衝突防止
teachers = ""
nth = ""
# 授業名、教室、教員名の抽出 ここは適当なパターンマッチングなので修正の余地あり
for i in range(len(lines)):
if i == 0 or i == len(lines):
continue
line = lines[i]
if i == 1:
title = line
elif i == 2:
classroom = line.lstrip("\u3000").strip("[]")
elif line.startswith("\u3000"):
line = line.lstrip("\u3000")
teachers += line
elif line.startswith("<第"):
nth = line
teachers_list = [t.replace("\u3000", " ").strip(" ") for t in teachers.split("、")]
# 開始時刻と終了時刻を作成
date_start = dt.datetime.combine(
dt.date(year, month, int(event_id[0])),
period_starttime[int(event_id[1])]
)
date_end = date_start + dt.timedelta(hours=1, minutes=30)
# 辞書にして
event = {
"class": title,
"period": int(event_id[1]), # 時限 (0始まり)
"starttime": date_start.strftime("%Y-%m-%dT%H:%M:%S"),
"endtime": date_end.strftime("%Y-%m-%dT%H:%M:%S"),
"class_number": int(event_id[2]), # 何番目の授業か (IDとは別)
"classroom": classroom,
"teachers": teachers_list,
"note": tr_class_note_dict[c["id"]]
}
if nth:
event["nth"] = nth
# 格納
data.append(event)
return data
def send_events(calendarid_path, key_filename, event_data):
SCOPES = ['https://www.googleapis.com/auth/calendar']
with open(calendarid_path, "r") as f:
calender_id = f.read()
# Googleの認証情報をファイルから読み込む
gapi_creds = google.auth.load_credentials_from_file(key_filename, SCOPES)[0]
# APIと対話するためのResourceオブジェクトを構築する
service = googleapiclient.discovery.build('calendar', 'v3', credentials=gapi_creds)
# 予定を書き込む
# 書き込む予定情報を用意する
for _ in event_data:
_teachers = "\n".join(_["teachers"])
# descriptionテキストの作成
dsc = f'{_["period"] + 1}限' + "\n"
if "nth" in _:
if _["nth"]:
dsc += _["nth"] + "\n"
dsc += f'担当教員:' + "\n" + _teachers
if _["note"]:
dsc += "\n\n" + _["note"]
# bodyに格納
body = {
'summary': _["class"],
'location': _["classroom"],
'description': dsc,
'start': {
'dateTime': _["starttime"],
'timeZone': 'Japan'
},
'end': {
'dateTime': _["endtime"],
'timeZone': 'Japan'
}
}
# 用意した予定を登録する
event = service.events().insert(calendarId=calender_id, body=body).execute()
time.sleep(1.25)
def main():
import sys
args_ = sys.argv[1:]
YEAR, MONTH = int(args_[0]), int(args_[1])
CALID_PATH, KEYFILE = args_[2:]
html_text = get_calendar_html(YEAR, MONTH)
data = construct_data(html_text, YEAR, MONTH)
send_events(CALID_PATH, KEYFILE, data)
if __name__ == '__main__':
main()
|
Masahiro-Kobayashi-NAIST/NAIST-Class-to-Google-Calander
|
naist-calendar.py
|
naist-calendar.py
|
py
| 4,663 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30217414474
|
import matplotlib.pyplot as plt
plt.plot([1,2,3],[4,5,4], color = '#21c4ed', linestyle='dashed', marker='o')
# erste Liste die X-Werte, zweite Liste Y-Werte
# color via HEX - Farbe finden über color picker (google)
# allgemeine Infos = https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.plot
#linestyle = https://matplotlib.org/api/lines_api.html#matplotlib.lines.Line2D.set_linestyle
# marker = https://matplotlib.org/api/markers_api.html#module-matplotlib.markers
plt.show() # Starten der Anzeige
# verschieden Diagrammtypen
plt.pie([1, 2, 3])
plt.show()
plt.bar([1, 2, 4], [5, 6, 5])
plt.show()
plt.scatter([1, 2, 4], [5, 6, 5])
plt.show()
plt.scatter([1, 2, 4], [5, 6, 5], color = "#ff0000", marker = "x")
plt.show()
# Objektorientierte Erstellung eins Diagramms mit einer eingesestzten Grafik
import numpy as np
x = np.linspace(0,5,11)
y = x**2
af = plt.figure() #Diagramm erstellen (leere Arbeitsfläche)
axes1 = af.add_axes([0.1,0.1,0.8,0.8]) # Positionierung der Grafik
axes2 = af.add_axes([0.2,0.5,0.4,0.3]) # Positionierung der eingesetzten Grafik
# Großes Diagramm
axes1.plot(x,y,'b')
axes1.set_xlabel('X') # Achsenbezeichnung x
axes1.set_xlabel('Y') # Achsenbezeichnung Y
axes1.set_title('big diagramm') #Diagramm - Titel
# Eingesetztes Diagramm mit Achse 2
axes2.plot(y, x, 'r')
axes2.set_xlabel('X') # Achsenbezeichnung x
axes2.set_xlabel('Y') # Achsenbezeichnung Y
axes2.set_title('small diagramm') #Diagramm - Titel
plt.show()
###Erstellung von 2 oder mehreren Diagrammen in einem Output
diagramm, axes = plt.subplots(nrows = 1, ncols = 2) #diagramm ist variable & gibt das man ein Diagramm erstellen will;
# über axes werden die Anzahl der Plots definiert.
#Diagramm1
axes[0].plot(x,y)
axes[0].set_xlabel('X')
axes[0].set_ylabel('Y')
#Diagramm2
axes[1].plot(y,x)
axes[1].set_ylabel('Y')
axes[1].set_xlabel('X')
diagramm
plt.tight_layout()
plt.show()
diag = plt.figure(figsize=(8,4),dpi=150) #DPI gibt die Auflösung und somit die Größe an.
ax= diag.add_axes([0,0,1,1])
ax.plot(x,y)
###Erstellen und Abspeichern einer Grafik als PNG-Datei
diag, axes=plt.subplots(figsize=(12,3),dpi=100) #DPI gibt die Auflösung und somit die Größe an.
axes.plot(x,y)
diag.savefig('dateiname.png', dpi=200) # Abspeichern einer Matplotlib Grafik
### Legende erstellen bzw. Positionierung der Legende
diag = plt.figure()
ax=diag.add_axes([0,0,1,1])
ax.plot(x,x**2, label = 'x**2')
ax.plot(x,x**3, label = 'x**3')
ax.legend(loc=5) # Über loc 1-10 wird die Position der Legende bestimmt
### Grafik Formatierung (Farbe, Formen)
#Übersicht über alle Einstellungsmöglichkeiten:
diag, ax=plt.subplots()
ax.plot(x, x**2, color='#F4A460'# RGB Hex Code für color definieren Syntax:#Code
,alpha=0.9 # Transparenz Setting
,lw=1.5 # Dicke der Linie
,ls='--' # Art der Linie (gestrichelt, durchgehend)
,marker='o'# Setzen von Punkte auf der Linie
,markersize=10 #Größe der Marker
,markerfacecolor='yellow'#Farbe des markes
,markeredgewidth=3#Umrandungsdicke
,markeredgecolor='green')#Umrandungsfarbe
ax.set_xlim([0,4.5]) # Auswahl des Darstellungsbereichs von der X-Achse
ax.set_ylim([0,20]) #Auswahl des Darstellungsbereichs von der Y-Achse
#Example für verschiedene Linienformatierungen
diag, ax = plt.subplots(figsize=(12,6))
ax.plot(x, x+1, color="red", linewidth=0.25)
ax.plot(x, x+2, color="red", linewidth=0.50)
ax.plot(x, x+3, color="red", linewidth=1.00)
ax.plot(x, x+4, color="red", linewidth=2.00)
# Mögliche Linienstile ‘-‘, ‘–’, ‘-.’, ‘:’, ‘steps’
ax.plot(x, x+5, color="green", lw=3, linestyle='-')
ax.plot(x, x+6, color="green", lw=3, ls='-.')
ax.plot(x, x+7, color="green", lw=3, ls=':')
# Benutzerdefinierte Querstrich
line, = ax.plot(x, x+8, color="black", lw=1.50)
line.set_dashes([5, 10, 15, 10]) # Format: Linienlänge, Abstandslänge, ...
# Mögliche Markierungen: marker = '+', 'o', '*', 's', ',', '.', '1', '2', '3', '4', ...
ax.plot(x, x+ 9, color="blue", lw=3, ls='-', marker='+')
ax.plot(x, x+10, color="blue", lw=3, ls='--', marker='o')
ax.plot(x, x+11, color="blue", lw=3, ls='-', marker='s')
ax.plot(x, x+12, color="blue", lw=3, ls='--', marker='1')
# Markierungsgröße und Farbe
ax.plot(x, x+13, color="purple", lw=1, ls='-', marker='o', markersize=2)
ax.plot(x, x+14, color="purple", lw=1, ls='-', marker='o', markersize=4)
ax.plot(x, x+15, color="purple", lw=1, ls='-', marker='o', markersize=8, markerfacecolor="red")
ax.plot(x, x+16, color="purple", lw=1, ls='-', marker='s', markersize=8,
markerfacecolor="yellow", markeredgewidth=3, markeredgecolor="green");
plt.show()
#http://www.matplotlib.org - Die Webseite von Matplotlib.
#https://github.com/matplotlib/matplotlib - Der Sourcecode zu Matplotlib.
#http://matplotlib.org/gallery.html - Eine große Galerie, die viele Arten von Diagrammen zeigt, die mit Matplotlib erstellbar sind.
|
ThePeziBear/MyPythonLibrary
|
Visualizing_Python/Matplotlib/1_General_Matplotlib_settings.py
|
1_General_Matplotlib_settings.py
|
py
| 4,912 |
python
|
de
|
code
| 0 |
github-code
|
6
|
32543172289
|
import aioredis
import pytest
from aiorate_limiter import RateLimiterOpts
from aiorate_limiter.storage.redis import RedisRateLimiter, REDIS_SCRIPT_HASH
@pytest.fixture
async def redis():
redis = await aioredis.create_redis("redis://localhost:6379")
yield redis
redis.close()
await redis.wait_closed()
@pytest.mark.asyncio
async def test_consume(redis):
key, duration, points = "test_key", 5000, 10
opts = RateLimiterOpts(points=points, duration=duration)
redis_limiter = RedisRateLimiter(opts, redis)
await redis_limiter.init()
res = await redis_limiter.consume(key, 0)
assert res.is_allowed and res.remaining_points == points
# Reduce points
res = await redis_limiter.consume(key)
assert res.is_allowed and res.remaining_points == points - 1
# Reduce token
res = await redis_limiter.consume(key)
assert res.is_allowed and res.remaining_points == points - 2
# Reduce all tokens
res = await redis_limiter.consume(key, points * 10)
assert res.is_allowed is False
@pytest.mark.asyncio
async def test_script_load(redis):
key, duration, points = "test_key", 5000, 5
opts = RateLimiterOpts(points=points, duration=duration)
redis_limiter = RedisRateLimiter(opts, redis)
await redis_limiter.init()
assert (await redis.script_exists(REDIS_SCRIPT_HASH))[0]
# Check success loading script
await redis_limiter.consume(key, 0)
# Remove script
await redis.script_flush()
assert not (await redis.script_exists(REDIS_SCRIPT_HASH))[0]
with pytest.raises(Exception):
await redis_limiter.consume(key, 0)
|
theruziev/aiorate_limiter
|
tests/storages/test_redis_rl.py
|
test_redis_rl.py
|
py
| 1,623 |
python
|
en
|
code
| 2 |
github-code
|
6
|
7002248991
|
from ...flaskapp.utils.db_utils import conn
from ...common.constants import CURRENT_TERM
from ...flaskapp.utils.utils import previous_term
from ..utils import student_utils as student
def transcript_is_outdated(user_id):
cur.execute("""SELECT term_year, term_month
FROM students_completed_courses scc
JOIN courses ON courses.id = scc.course_id
WHERE student_id = %s
ORDER BY term_year DESC, term_month DESC
LIMIT 1""",
(user_id,))
latest_transcript_term = cur.fetchone()
return ((not latest_transcript_term)
or
(latest_transcript_term < previous_term(*CURRENT_TERM)))
# add flag transcript_outdated to students table
# on new quarter start, reset all students to False
# prompt student "Did you take classes in the Spring? Yes/No"
# No -> transcript_outdated = False
# Yes -> Transcript upload -> transcript_outdated = True
from ..utils import student_utils as student
def update_student(user_id, transcript, programs):
student.set_student_programs(user_id, programs)
student.handle_transcript(user_id, transcript)
|
minupalaniappan/gradfire
|
daviscoursesearch/flaskapp/service/user.py
|
user.py
|
py
| 1,113 |
python
|
en
|
code
| 12 |
github-code
|
6
|
40677398663
|
from magma.configuration_controller.request_consumer.request_db_consumer import (
RequestDBConsumer,
)
from magma.db_service.config import TestConfig
from magma.db_service.models import (
DBCbsd,
DBCbsdState,
DBRequest,
DBRequestType,
)
from magma.db_service.session_manager import Session
from magma.db_service.tests.local_db_test_case import LocalDBTestCase
from parameterized import parameterized
REQUEST_PROCESSING_LIMIT = 10
class RegistrationDBConsumerTestCase(LocalDBTestCase):
def test_get_pending_requests_retrieves_empty_list_of_requests_when_no_pending_requests_in_db(self):
# Given
consumer = RequestDBConsumer(
"someRequest", request_processing_limit=REQUEST_PROCESSING_LIMIT,
)
# When
reqs = consumer.get_pending_requests(self.session)
# Then
self.assertEqual(0, len(list(reqs.values())[0]))
def test_get_pending_requests_retrieves_pending_requests_only(self):
# Given
consumer = RequestDBConsumer(
"someRequest", request_processing_limit=REQUEST_PROCESSING_LIMIT,
)
self._prepare_two_pending_requests()
# When
reqs = consumer.get_pending_requests(self.session)
# Then
self.assertEqual(2, len(list(reqs.values())[0]))
@parameterized.expand([
(1, 1, 1),
(2, 2, 0),
(0, 2, 0),
(-1, 2, 0),
(-100, 2, 0),
])
def test_different_processes_dont_pick_up_each_others_requests(self, max_batch_size, req_count_1, req_count_2):
"""
This is a test for horizontal scaling functionality of the Configuration Controller.
It tests if two processes (in this case associated with different Session instances) only pick those requests
that have no lock on them.
"""
# Given
config = TestConfig()
config.REQUEST_PROCESSING_LIMIT = max_batch_size
session1 = Session(bind=self.engine)
session2 = Session(bind=self.engine)
consumer = RequestDBConsumer(
"someRequest", request_processing_limit=config.REQUEST_PROCESSING_LIMIT,
)
self._prepare_two_pending_requests()
# When
reqs1 = consumer.get_pending_requests(session1)
reqs2 = consumer.get_pending_requests(session2)
reqs1_list = list(reqs1.values())[0]
reqs2_list = list(reqs2.values())[0]
session1.commit()
session2.commit()
# Then
self.assertEqual(req_count_1, len(reqs1_list))
self.assertEqual(req_count_2, len(reqs2_list))
if reqs1_list and reqs2_list:
# Making sure we're not getting the same requests in both sessions
self.assertNotEqual(reqs1_list[0].cbsd_id, reqs2_list[0].cbsd_id)
session1.close()
session2.close()
def _prepare_two_pending_requests(self):
test_state = DBCbsdState(name="test_state")
cbsds = []
for i in range(1, 3):
cbsds.append(
DBCbsd(
id=int(i),
cbsd_id=f"foo{i}",
state=test_state,
desired_state=test_state,
user_id="test_user",
fcc_id=f"test_fcc_id{i}",
cbsd_serial_number=f"test_serial_nr{i}",
),
)
req_type = DBRequestType(name="someRequest")
req1 = DBRequest(
cbsd=cbsds[0], type=req_type, payload={
"some": "payload1",
},
)
req2 = DBRequest(
cbsd=cbsds[1], type=req_type, payload={
"some": "payload2",
},
)
self.session.add_all([req1, req2])
self.session.commit()
|
magma/magma
|
dp/cloud/python/magma/configuration_controller/tests/unit/test_request_consumer.py
|
test_request_consumer.py
|
py
| 3,787 |
python
|
en
|
code
| 1,605 |
github-code
|
6
|
15598827362
|
import torch
from torch import nn
from torch.nn import init
# L2 Norm: solve "feature map" scale inconsistent
class L2Norm(nn.Module):
def __init__(self, n_channels, scale):
super(L2Norm, self).__init__()
self.n_channels = n_channels
self.gamma = scale or None
self.eps = 1e-10
self.weight = nn.Parameter(torch.randn(self.n_channels)) # only Parameter can be "check"
self.reset_parameters()
def reset_parameters(self):
init.constant(self.weight, self.gamma)
def forward(self, x):
norm = x.pow(2).sum(dim=1, keepdim=True).sqrt() + self.eps
x = torch.div(x, norm)
out = self.weight.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand_as(x) * x
return out
|
AceCoooool/detection-pytorch
|
ssd/utils_ssd/L2Norm.py
|
L2Norm.py
|
py
| 752 |
python
|
en
|
code
| 24 |
github-code
|
6
|
9781742668
|
INF=float('inf')
G = [[0, 3, INF, 5],
[2, 0, INF, 4],
[INF, 1, 0, INF],
[INF, INF, 2, 0]]
nV = 4
distance = list(map(lambda i: list(map(lambda j: j, i)), G))
print(distance)
for k in range(nV):
for i in range(nV):
for j in range(nV):
distance[i][j] = min(distance[i][j], distance[i][k] + distance[k][j])
print(distance)
# skrót
def Floyd_Warshall(G):
n = len(G)
for t in range(n): # na wykladzie (1,n+) inna numeracja wierzchołkow
for u in range(n):
for w in range(n):
G[u][w] = min(G[u][w], G[u][t] + G[t][w])
|
wiksat/AlghorithmsAndDataStructures
|
ASD/BeforeExam/egzamin_2_szablony/Floyd-Warshall.py
|
Floyd-Warshall.py
|
py
| 622 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17035760804
|
# グラフのパス (paizaランク C 相当)
# https://paiza.jp/works/mondai/graph_dfs_problems/graph_dfs__path_one_step3
INPUT1 = """\
3 1 2
"""
OUTPUT1 = """\
1 2 3
"""
INPUT2 = """\
5 5 3
"""
OUTPUT2 = """\
5 4 3 2
"""
def main(input_str):
# n: 頂点数, s: 起点, k: 回数
n, s, k = map(int, input_str.split())
# 隣接リスト
ad_list = {}
for i in range(1, n + 1):
e = [j for j in range(1, n + 1) if j != i]
ad_list[i] = e
# s から k 回移動する経路
path = [s]
for _ in range(k):
# 今いる頂点
cv = path[-1]
# 移動可能な頂点を選択して walk 末尾へ追加
for nv in ad_list[cv][::-1]:
# 訪問済の頂点はスキップ
if nv in path:
continue
# path に追加してループを抜ける
path.append(nv)
break
# 経路を出力
return " ".join(map(str, path))
print(main(open(0).read()))
|
atsushi0919/paiza_workbook
|
graph_dfs_problems/01-03_path_one_step3.py
|
01-03_path_one_step3.py
|
py
| 997 |
python
|
ja
|
code
| 0 |
github-code
|
6
|
5479410467
|
import itertools
from copy import deepcopy
from random import shuffle
from .type_utils import is_seq_of
def concat_seq(in_list, dtype):
assert dtype in [list, tuple]
return dtype(itertools.chain(*in_list))
def concat_list(in_list):
return concat_seq(in_list, list)
def concat_tuple(in_list):
return concat_seq(in_list, tuple)
def auto_pad_seq(a, b):
"""
Input two sequence, then output two list of objects with the same size.
"""
a = list(a) if isinstance(a, (list, tuple)) else [a]
b = list(b) if isinstance(b, (list, tuple)) else [b]
if len(a) > len(b):
for i in range(len(a) - len(b)):
b.append(a[0])
elif len(a) < len(b):
for i in range(len(b) - len(a)):
a.append(b[0])
return a, b
def flatten_seq(x, dtype=list):
if not is_seq_of(x, (tuple, list)):
return x
return dtype(concat_list([flatten_seq(_) for _ in x]))
def split_list_of_parameters(num_procsess, *args, **kwargs):
from ..math import split_num
args = [_ for _ in args if _ is not None]
kwargs = {_: __ for _, __ in kwargs.items() if __ is not None}
assert len(args) > 0 or len(kwargs) > 0
first_item = args[0] if len(args) > 0 else kwargs[list(kwargs.keys())[0]]
n, running_steps = split_num(len(first_item), num_procsess)
start_idx = 0
paras = []
for i in range(n):
slice_i = slice(start_idx, start_idx + running_steps[i])
start_idx += running_steps[i]
args_i = list([_[slice_i] for _ in args])
kwargs_i = {_: kwargs[_][slice_i] for _ in kwargs}
paras.append([args_i, kwargs_i])
return paras
def select_by_index(files, indices):
return [files[i] for i in indices]
def random_pad_clip_list(x, num):
x = deepcopy(list(x))
if len(x) > num:
shuffle(x)
return x[:num]
else:
ret = []
for i in range(num // len(x)):
shuffle(x)
ret = ret + x
ret = ret + x[: num - len(ret)]
return ret
|
haosulab/ManiSkill2-Learn
|
maniskill2_learn/utils/data/seq_utils.py
|
seq_utils.py
|
py
| 2,031 |
python
|
en
|
code
| 53 |
github-code
|
6
|
27937206338
|
import re
from .Enums import SelectionMode
from .Exceptions import SelectionReuseException
from . import Database
__author__ = 'Riley Flynn (nint8835)'
class DatabaseSelection:
"""
Represents a selection of items from a JSON DB.
Can have items retrieved from it, or can be modified.
Upon modification the selection object becomes unusable and must be recreated by performing the selection again.
"""
def __init__(self, data: list, db: "Database.JSONDatabase"):
"""
Creates a new instance of DatabaseSelection
:param data: A list of rows from a db.
:param db: The JSONDatabase instance that the rows came from. Used to perform modifications.
"""
self.db = db
self.rows = data
self._selection_modified = False
def __getitem__(self, item):
if not self._selection_modified:
return self.rows[item]
else:
raise SelectionReuseException()
def __len__(self):
if not self._selection_modified:
return len(self.rows)
else:
raise SelectionReuseException()
def __str__(self):
if not self._selection_modified:
return "DatabaseSelection from database at {}, containing the following rows: {}".format(self.db.path, self.rows)
else:
return "Unusable DatabaseSelection object. Perform a new selection to get a usable one."
def __repr__(self):
if not self._selection_modified:
return "DatabaseSelection({}, {})".format(self.rows, self.db)
else:
return ""
def remove(self):
"""
Removes all rows contained in this selection from the DB
"""
if not self._selection_modified:
for row in self.rows:
self.db.data.remove(row)
self.db.save_db()
self._selection_modified = True
self.rows = []
else:
raise SelectionReuseException()
def update(self, key, value):
"""
Updates the value of a certain key of all rows contained in this selection
:param key: The key you wish to update
:param value: The new value
"""
if not self._selection_modified:
for row in self.rows:
self.db.data[self.db.data.index(row)][key] = value
self.db.save_db()
self._selection_modified = True
self.rows = []
else:
raise SelectionReuseException()
def select(self, mode: SelectionMode, key="", selection_var="") -> "DatabaseSelection":
"""
Further refines this selection
:param mode: The mode of selection used to refine the selection
:param key: The key to perform the selection on
:param selection_var: The variable to be used for the selection
:return: A DatabaseSelection object containing the refined selection
"""
if not self._selection_modified:
if mode == SelectionMode.VALUE_LESS_THAN:
return DatabaseSelection([row for row in self.rows if row[key] < selection_var], self.db)
if mode == SelectionMode.VALUE_GREATER_THAN:
return DatabaseSelection([row for row in self.rows if row[key] > selection_var], self.db)
if mode == SelectionMode.VALUE_EQUALS:
return DatabaseSelection([row for row in self.rows if row[key] == selection_var], self.db)
if mode == SelectionMode.VALUE_GREATER_THAN_OR_EQUAL:
return DatabaseSelection([row for row in self.rows if row[key] >= selection_var], self.db)
if mode == SelectionMode.VALUE_LESS_THAN_OR_EQUAL:
return DatabaseSelection([row for row in self.rows if row[key] <= selection_var], self.db)
if mode == SelectionMode.VALUE_NOT_EQUAL:
return DatabaseSelection([row for row in self.rows if row[key] != selection_var], self.db)
if mode == SelectionMode.REGEX_MATCH:
regex = re.compile(selection_var)
return DatabaseSelection([row for row in self.rows if regex.match(row["key"])], self.db)
if mode == SelectionMode.VALUE_IN:
return DatabaseSelection([row for row in self.rows if selection_var in row[key]], self.db)
if mode == SelectionMode.ALL:
return DatabaseSelection(self.rows, self.db)
else:
raise SelectionReuseException()
|
nint8835/NintbotForDiscord
|
libraries/JSONDB/Selection.py
|
Selection.py
|
py
| 4,508 |
python
|
en
|
code
| 1 |
github-code
|
6
|
39122705161
|
from flask import*
from database import DB,CR
teacher=Blueprint("teacher",__name__)
@teacher.route("/")
def TeacherHome():
return render_template("teacherhome.html")
@teacher.route("/answerquestion",methods=["post","get"])
def AnswerQuestion():
CR.execute("SELECT * FROM sdatabase")
qanda=CR.fetchall()
if 'submit' in request.form:
answer=request.form['ans']
id=request.form['submit']
sql="UPDATE sdatabase SET answer=%s WHERE id=%s"
val=(answer,id)
CR.execute(sql,val)
DB.commit()
flash("Answer submited")
return redirect(url_for("teacher.AnswerQuestion"))
return render_template('answerquestion.html',qanda=qanda)
@teacher.route("/deletesdatabase",methods=["post","get"])
def deletesdatabase():
CR.execute("SELECT *FROM sdatabase ")
res=CR.fetchall()
if "submit" in request.form:
id=request.form['submit']
CR.execute("DELETE FROM sdatabase WHERE id=%s",(id,))
DB.commit()
flash("Items Delete")
return redirect(url_for('teacher.deletesdatabase'))
return render_template('deletesdatabase.html',res=res)
|
ShanoliaJoseph/flask
|
teacher.py
|
teacher.py
|
py
| 1,145 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21056363812
|
import torch
import torchvision
from torchvision import models
import torchvision.transforms as transforms
from torchvision.transforms import ToPILImage
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
import numpy as np
import time
from functools import wraps
n_classes = 100
def watcher(func):
@wraps(func)
def wrapper(*args, **kwargs):
start = time.perf_counter()
result = func(*args, **kwargs)
end = time.perf_counter()
print(f" ===> took {end-start} seconds")
return result
return wrapper
# function to define an old style fully connected network (multilayer perceptrons)
class old_nn(nn.Module):
def __init__(self):
super(old_nn, self).__init__()
self.fc1 = nn.Linear(32 * 32 * 3, 4096)
self.fc2 = nn.Linear(4096, 4096)
self.fc3 = nn.Linear(4096, n_classes) # last FC for classification
def forward(self, x):
x = x.view(x.shape[0], -1)
x = F.sigmoid(self.fc1(x))
x = F.sigmoid(self.fc2(x))
x = self.fc3(x)
return x
# function to define the convolutional network
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
# conv2d first parameter is the number of kernels at input (you get it from the output value of the previous layer)
# conv2d second parameter is the number of kernels you wanna have in your convolution, so it will be the n. of kernels at output.
# conv2d third, fourth and fifth parameters are, as you can read, kernel_size, stride and zero padding :)
self.conv1 = nn.Conv2d(3, 128, kernel_size=5, stride=2, padding=0)
self.conv2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=0)
self.conv3 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=0)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.conv_final = nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=0)
self.fc1 = nn.Linear(64 * 4 * 4 * 4, 4096)
self.fc2 = nn.Linear(4096, n_classes) # last FC for classification
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.pool(self.conv_final(x)))
x = x.view(x.shape[0], -1)
x = F.relu(self.fc1(x))
# hint: dropout goes here!
x = self.fc2(x)
return x
# function to show an image
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
def plot_kernel(model):
model_weights = model.state_dict()
fig = plt.figure()
plt.figure(figsize=(10, 10))
for idx, filt in enumerate(model_weights['conv1.weight']):
# print(filt[0, :, :])
if idx >= 32: continue
plt.subplot(4, 8, idx + 1)
plt.imshow(filt[0, :, :], cmap="gray")
plt.axis('off')
plt.show()
def plot_kernel_output(model, images):
fig1 = plt.figure()
plt.figure(figsize=(1, 1))
img_normalized = (images[0] - images[0].min()) / (images[0].max() - images[0].min())
plt.imshow(img_normalized.numpy().transpose(1, 2, 0))
plt.show()
output = model.conv1(images)
layer_1 = output[0, :, :, :]
layer_1 = layer_1.data
fig = plt.figure()
plt.figure(figsize=(10, 10))
for idx, filt in enumerate(layer_1):
if idx >= 32: continue
plt.subplot(4, 8, idx + 1)
plt.imshow(filt, cmap="gray")
plt.axis('off')
plt.show()
def test_accuracy(net, dataloader):
########TESTING PHASE###########
# check accuracy on whole test set
correct = 0
total = 0
net.eval() # important for deactivating dropout and correctly use batchnorm accumulated statistics
with torch.no_grad():
for data in dataloader:
images, labels = data
images = images.cuda()
labels = labels.cuda()
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
accuracy = 100 * correct / total
print('Accuracy of the network on the test set: %d %%' % (
accuracy))
return accuracy
def show_dataset(dataiter):
images, labels = next(dataiter)
imshow(torchvision.utils.make_grid(images))
def plot_values(accuracy_values, loss_values):
fig = plt.figure(figsize=(10, 20))
ax = fig.add_subplot(211)
ax.plot(accuracy_values, '-bo', label='accuracy')
ax.set_title("Accuracy ")
ax.set_xlabel("Epochs")
ax.legend()
ax1 = fig.add_subplot(212)
ax1.plot(loss_values, '-ro', label='loss')
ax1.set_title("Loss over epochs")
ax1.set_xlabel("Epochs")
ax1.legend()
fig.show()
@watcher
def train(net, trainloader, testloader, criterion, optimizer, nepochs):
########TRAINING PHASE###########
n_loss_print = len(trainloader) # print every epoch, use smaller numbers if you wanna print loss more often!
n_epochs = nepochs
accuracy_values = []
loss_values = []
print("Starting Training")
for epoch in range(n_epochs): # loop over the dataset multiple times
net.train() # important for activating dropout and correctly train batchnorm
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs and cast them into cuda wrapper
inputs, labels = data
inputs = inputs.cuda()
labels = labels.cuda()
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % n_loss_print == (n_loss_print - 1):
loss_values.append(running_loss / n_loss_print)
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / n_loss_print))
running_loss = 0.0
accuracy_values.append(test_accuracy(net, testloader))
print('Finished Training')
plot_values(accuracy_values, loss_values)
if __name__ == '__main__':
# transform are heavily used to do simple and complex transformation and data augmentation
transform_train = transforms.Compose(
[
# transforms.Resize((40, 40)),
# transforms.RandomCrop(size=[32, 32], padding=0),
# transforms.RandomHorizontalFlip(),
transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
transform_test = transforms.Compose(
[
transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
trainset = torchvision.datasets.CIFAR100(root='./data', train=True,
download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=256,
shuffle=True, num_workers=4, drop_last=True)
testset = torchvision.datasets.CIFAR100(root='./data', train=False,
download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=256,
shuffle=False, num_workers=4, drop_last=True)
print("Dataset loaded")
dataiter = iter(trainloader)
# show images just to understand what is inside the dataset ;)
# show_dataset(dataiter)
print("NN instantiated")
# net = old_nn()
net = CNN()
####
# for Residual Network:
# net = models.resnet18(pretrained=True)
# net.fc = nn.Linear(512, n_classes) #changing the fully connected layer of the already allocated network
####
###OPTIONAL:
# print("####plotting kernels of conv1 layer:####")
# plot_kernel(net)
####
net = net.cuda()
criterion = nn.CrossEntropyLoss().cuda() # it already does softmax computation for use!
optimizer = optim.Adam(net.parameters(), lr=0.0001) # better convergency w.r.t simple SGD :)
print("Optimizer and criterion instantiated")
###OPTIONAL:
# print("####plotting output of conv1 layer:#####")
# plot_kernel_output(net,images)
###
train(net=net,
trainloader=trainloader,
testloader=testloader,
criterion=criterion,
optimizer=optimizer,
nepochs=20)
|
modusV/Machine-Learning-Homeworks
|
HW3/main.py
|
main.py
|
py
| 9,042 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71484026108
|
import sys
import heapq
if sys.version[0] == '2':
range, input = xrange, raw_input
MAX_SPEED = 30
dvs = (-1, 0, 1)
while True:
N, M = map(int, input().split())
if not (N | M):
break
S, G = map(lambda x: int(x) - 1, input().split())
edge = [[] for _ in range(N)]
for _ in range(M):
x, y, d, c = map(int, input().split())
edge[x - 1].append((y - 1, d, c))
edge[y - 1].append((x - 1, d, c))
INF = 1e9
dist = [[[INF for _ in range(N)] for _ in range(MAX_SPEED + 1)] for _ in range(N)]
que = [(0.0, S, 0, S)]
while que:
cost, now, v, prev = heapq.heappop(que)
if cost > dist[now][v][prev]:
continue
if now == G and v == 1:
print("{:.20f}".format(cost))
break
dist[now][v][prev] = cost
for x, d, c in edge[now]:
if x == prev:
continue
for dv in dvs:
nv = v + dv
if 0 < nv <= c and dist[x][nv][now] > dist[now][v][prev] + d / nv:
dist[x][nv][now] = dist[now][v][prev] + d / nv
heapq.heappush(que, (dist[x][nv][now], x, nv, now))
else:
print("unreachable")
|
knuu/competitive-programming
|
aoj/11/aoj1162.py
|
aoj1162.py
|
py
| 1,221 |
python
|
en
|
code
| 1 |
github-code
|
6
|
15447622348
|
import pyglet
class Tower:
def __init__(self, pos):
super().__init__()
self.pos = pos
class TownHall(Tower):
def __init__(self, pos):
super().__init__(pos)
self.image = pyglet.image.load('./Assets/town hall.png')
self.image.anchor_x = self.image.width // 2
self.sprite = pyglet.sprite.Sprite(self.image, x=self.pos[0], y=self.pos[1])
self.size = [3, 3]
self.tiles = [[(x + self.pos[0], y + self.pos[1]) for x in range(3)] for y in range(3)]
print(self.tiles)
|
dungcatcher/siege
|
towers.py
|
towers.py
|
py
| 543 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32285228189
|
#!/usr/bin/env python
from inv_memo import *
bin_file = './memo'
context(os = 'linux', arch = 'amd64')
# context.log_level = 'debug'
#==========
env = Environment('debug', 'local', 'remote')
env.set_item('mode', debug = 'DEBUG', local = 'PROC', remote = 'SOCKET')
env.set_item('target', debug = {'argv':[bin_file], 'aslr':False}, \
local = {'argv':[bin_file]}, \
remote = {'host':'challenge', 'port':4296})
env.set_item('libc', debug = None, \
local = None, \
remote = 'libc-2.27.so')
env.set_item('one_gadget', debug = 0xaee38c, local = 0xe4138c, remote = 0xe4138c)
env.select('remote')
#==========
binf = ELF(bin_file)
libc = ELF(env.libc) if env.libc else binf.libc
#==========
def attack(conn, **kwargs):
fake_chunk = [0]*8
fake_chunk[1] = 0x21;
fake_chunk[5] = 0x11;
fake_chunk[7] = 0x11;
iv = InvisibleMemo(conn, ''.join(map(p64, fake_chunk))[:-1])
iv.newkey(''.join(map(p64, fake_chunk))[:0x10])
for _ in range(7):
iv.add(0x18, None, 0x100)
for _ in range(7):
iv.add(0x88, None, 0x100)
iv.delete(2, -1)
iv.add(0x88, None, 0x100)
iv.newkey(p64(0)+p64(0x183))
iv.add(0x178, p32(env.one_gadget)[:3], 0x98)
conn.sendlineafter('> ', '0')
conn.sendlineafter('Bye!\n', 'id')
conn.recv(1)
#==========
if __name__=='__main__':
comn = Communicate(env.mode, **env.target)
comn.connect()
if env.check('remote'):
comn.run(attack)
comn.connection.sendline('/send_flag')
else:
comn.bruteforce(attack)
comn.connection.interactive()
#==========
|
shift-crops/CTFProblemArchive
|
2019/CODE BLUE CTF/InvisibleMemo/exploit/exploit_memo_probably_4096.py
|
exploit_memo_probably_4096.py
|
py
| 1,692 |
python
|
en
|
code
| 1 |
github-code
|
6
|
30754540175
|
for _ in range(int(input())):
n = int(input())
candles = list(map(int, input().split(' ')))
candles = sorted(candles, reverse=True)
if len(candles) == 1:
if candles[0] > 1:
print('NO')
else:
print('YES')
else:
if candles[0] > candles[1] + 1:
print('NO')
else:
print('YES')
|
Tanguyvans/Codeforces
|
780/B.py
|
B.py
|
py
| 376 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20921242416
|
import networkx as nx
from graph_manager.graph_tools import clusters_dict2clusters_list
from graph_manager.plot_tools import *
def louvain(G, resolution=1, eps=0.001):
clusters_dict = maximize(G, resolution, eps)
n = len(clusters_dict)
k = len(set(clusters_dict.values()))
while k < n:
H = aggregate(G, clusters_dict)
new_cluster = maximize(H, resolution, eps)
clusters_dict = {u: new_cluster[clusters_dict[u]] for u in G.nodes()}
n = k
k = len(set(clusters_dict.values()))
return clusters_dict2clusters_list(clusters_dict)
def maximize(G, resolution, eps):
# node weights
node_weight = {u: 0. for u in G.nodes()}
for (u, v) in G.edges():
node_weight[u] += G[u][v]['weight']
node_weight[v] += G[u][v]['weight']
# total weight
wtot = sum(list(node_weight.values()))
# clusters
cluster = {u: u for u in G.nodes()}
# total weight of each cluster
cluster_weight = {u: node_weight[u] for u in G.nodes()}
# weights in each community to which the nodes are linked
w = {u: {v: G[u][v]['weight'] for v in G.neighbors(u) if v != u} for u in G.nodes()}
increase = True
while increase:
increase = False
for u in G.nodes():
# Compute delta for every neighbor
delta = {}
for k in w[u].keys():
delta[k] = w[u][k] - resolution * node_weight[u] * cluster_weight[k] / wtot
# Compute delta for u itself (if not already done)
k = cluster[u]
if k not in w[u].keys():
delta[k] = - resolution * node_weight[u] * cluster_weight[k] / wtot
# Compare the greatest delta to epsilon
l = max(delta, key=delta.get)
if delta[l] - delta[k] > resolution * (node_weight[u] * node_weight[u] / wtot) + eps / wtot:
increase = True
cluster[u] = l
# Update information about neighbors and the community change of u
cluster_weight[k] -= node_weight[u]
cluster_weight[l] += node_weight[u]
for v in G.neighbors(u):
if v != u:
w[v][k] -= G[u][v]['weight']
if w[v][k] == 0:
w[v].pop(k)
if l not in w[v].keys():
w[v][l] = 0
w[v][l] += G[u][v]['weight']
return cluster
def aggregate(G, clusters_dict):
H = nx.Graph()
H.add_nodes_from(list(clusters_dict.values()))
for (u,v) in G.edges():
if H.has_edge(clusters_dict[u], clusters_dict[v]):
H[clusters_dict[u]][clusters_dict[v]]['weight'] += G[u][v]['weight']
else:
H.add_edge(clusters_dict[u], clusters_dict[v])
H[clusters_dict[u]][clusters_dict[v]]['weight'] = G[u][v]['weight']
return H
|
sharpenb/Multi-Scale-Modularity-Graph-Clustering
|
Scripts/clustering_algorithms/louvain.py
|
louvain.py
|
py
| 2,921 |
python
|
en
|
code
| 2 |
github-code
|
6
|
71780099388
|
from django.contrib.auth.models import AbstractUser
from django.core.validators import RegexValidator
from django.db import models
class User(AbstractUser):
'''Модель пользователя'''
email = models.EmailField(
verbose_name='Электронная почта',
max_length=254,
unique=True,
db_index=True,
)
username = models.CharField(
verbose_name='Логин',
max_length=150,
unique=True,
db_index=True,
validators=[RegexValidator(
regex=r'^[\w.@+-]+$',
message='В имени использованы запрещенные символы'
)]
)
first_name = models.CharField(
verbose_name='Имя',
max_length=150,
)
last_name = models.CharField(
verbose_name='Фамилия',
max_length=150,
)
password = models.CharField(
verbose_name='Пароль',
max_length=254,
)
is_subscribed = models.BooleanField(
default=False,
)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = 'username', 'first_name', 'last_name'
class Meta:
ordering = ['id']
verbose_name = 'Пользователь'
verbose_name_plural = 'Пользователи'
def __str__(self):
return self.email
|
GirzhuNikolay/foodgram-project-react
|
backend/users/models.py
|
models.py
|
py
| 1,353 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32724523948
|
import tornado.ioloop
import tornado.web
import hashlib
import uuid
import json
from time import mktime
from datetime import datetime
from email.utils import formatdate
up_user = ''
up_password = ''
up_method = 'PUT'
up_host = 'v1.api.upyun.com'
up_path = '/bucket/'
up_base_url = "http://bucket.b0.upaiyun.com/%s"
class MainHandler(tornado.web.RequestHandler):
def get(self):
content_md5 = self.get_argument('md5', '')
content_len = self.get_argument('len', '')
content_type = self.get_argument('type', '')
stamp = mktime(datetime.now().timetuple())
date = formatdate(timeval = stamp, localtime = False, usegmt = True)
filename = hashlib.md5(uuid.uuid1().hex).hexdigest()
base_string = "%s&%s&%s&%s&%s" % (
up_method,
up_path + filename,
date,
content_len,
hashlib.md5(up_password).hexdigest())
signature = hashlib.md5(base_string).hexdigest()
headers = {"Authorization": "UpYun %s:%s" % (up_user, signature),
"Content-Type": content_type,
"Content-MD5": content_md5,
"Date": date,
"Expect": ""}
self.write(json.dumps({
"headers": headers,
"method": up_method,
"host": up_host,
"path": up_path + filename,
"url": up_base_url % filename
}))
application = tornado.web.Application([
(r"/storage", MainHandler),
])
if __name__ == "__main__":
application.listen(8888)
tornado.ioloop.IOLoop.instance().start()
|
zhicheng/storage
|
main.py
|
main.py
|
py
| 1,410 |
python
|
en
|
code
| 1 |
github-code
|
6
|
13925195329
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 18 17:27:59 2018
@author: du
This is neural network configuration file
"""
num_epoch = 1000
batch_size = 32
milestones = [2]
max_len = 40
hidden_size2 = 50 # hidden size for image feature
hidden_size = 50 # hidden size for superimpoed text feature
def write_log(dir_,log):
"""
write log file
Input: dir_: the directory for writing the log file
log: the string log
"""
with open(dir_,'a') as file:
file.write(log)
file.write('\n')
file.close()
|
yuhaodu/TwitterMeme
|
step2_MemeClassifier/.ipynb_checkpoints/classifier_utils-checkpoint.py
|
classifier_utils-checkpoint.py
|
py
| 573 |
python
|
en
|
code
| 6 |
github-code
|
6
|
42029059098
|
import torch
import time
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
from torchvision import transforms
import os
from Network import FullyConvNet
from Network import train
from PIL import Image
import numpy as np
import argparse
import cv2
from serialTest.serialPackage import armCommunication
from collections import deque
import utils
import settings
ARM_RANGE_HEIGHT = settings.ARM_RANGE_HEIGHT
ARM_RANGE_WIDTH = settings.ARM_RANGE_WIDTH
BASE_X = settings.BASE_X
BASE_Y = settings.BASE_Y
RATIO = settings.RATIO
def update_points(points):
pointsOldDataFile = open('pointsOldData.csv','w')
for _point in points:
pointLineString = str(_point[0])+","+str(_point[1]) + "\n"
pointsOldDataFile.write(pointLineString)
pointsOldDataFile.close()
def read_savedPoints():
points = []
with open('pointsOldData.csv','r') as f:
for pointLineString_fromFile in f:
pointStrings = pointLineString_fromFile.split(",")
points.append([float(p) for p in pointStrings])
return points
def transform_by4(img, points, width, height):
""" copied from https://blanktar.jp/blog/2015/07/python-opencv-crop-box.html """
""" 4点を指定してトリミングする。 """
if len(points) != 4: #頂点の数が4つでないなら古いデータを使う
print("ないんじゃ~~")
points = read_savedPoints()
else: #頂点の数が4つなら古いデータ更新
update_points(points)
points = sorted(points, key=lambda x:x[1]) # yが小さいもの順に並び替え。
top = sorted(points[:2], key=lambda x:x[0]) # 前半二つは四角形の上。xで並び替えると左右も分かる。
bottom = sorted(points[2:], key=lambda x:x[0], reverse=True) # 後半二つは四角形の下。同じくxで並び替え。
points = np.array(top + bottom, dtype='float32') # 分離した二つを再結合。
dst = np.array([
np.array([0, 0]),
np.array([width-1, 0]),
np.array([width-1, height-1]),
np.array([0, height-1]),
], np.float32)
trans = cv2.getPerspectiveTransform(points, dst) # 変換前の座標と変換後の座標の対応を渡すと、透視変換行列を作ってくれる。(射影行列では?)
return cv2.warpPerspective(img, trans, (int(width), int(height))) #ここで影を指定のサイズで受け取る
def np_to_PIL(image):
return Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
def crop_image_along_line(image, width, height):
blue, green, red = cv2.split(image)
diff = np.where(green >= red, green - (red.astype(np.uint16) * 10 // 10).astype(np.uint8), 0)
ret, thresh = cv2.threshold(diff, 50, 255, cv2.THRESH_BINARY)
kernel = np.ones((50,50),np.uint8)
thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours.sort(key=cv2.contourArea, reverse=True)
epsilon = 0.05 * cv2.arcLength(contours[0], True)
approx = cv2.approxPolyDP(contours[0], epsilon, True)
cv2.imwrite("thresh.jpg", thresh)
return transform_by4(image, approx[:, 0, :], width, height)
cam = cv2.VideoCapture(2)
cam.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
def capture():
# cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 4000)
retval, frame = cam.read()
if not retval:
print('cannnot read')
# return Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
return frame
def get_max_dir(directory_path):
os.makedirs(directory_path, exist_ok=True)
return max([0] + [int(d.name) for d in os.scandir(directory_path) if d.is_dir() and d.name.isdigit()])
def get_max_file(directory_path):
os.makedirs(directory_path, exist_ok=True)
return max([0] + [int(f.name.split('.')[0]) for f in os.scandir(directory_path) if f.is_file() and f.name.split('.')[0].isdigit()])
def random_position(height, width, ratio):
from random import randrange
return randrange(height * ratio), randrange(width * ratio // 2)
def pick(y, x, arm, ratio):
x //= ratio
y //= ratio
y = ARM_RANGE_HEIGHT - y
arm.send_position(BASE_X + x, BASE_Y + y)
print(BASE_X + x, BASE_Y + y)
while True:
res = arm.read_one_byte()
print(res)
if res != 0:
return res == 11
def counter(res):
result = []
with open('day1.txt') as f:
for line in f:
result = [int(l) for l in line.split()]
with open('day1.txt', 'w') as f:
result[int(res)] += 1
print(*result, file=f)
def add_red_point(pil_image, h, w):
im = np.array(pil_image)
for i in range(3):
im[h][w][i] = 0
im[h][w][0] = 255
return Image.fromarray(im)
def main(model):
INPUT_SIZE = 129
BATCH = ARM_RANGE_WIDTH // 2
OBJECT_NUM = 3
picked_count = 0
indicator = 0
os.makedirs('entire', exist_ok=True)
arm = armCommunication('COM8', 115200, 20)
save_dirctory = './models/' + str(get_max_dir('./models') + 1)
# os.makedirs(save_dirctory, exist_ok=True)
net = FullyConvNet()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
net.to(device)
if model is not None:
net.load_state_dict(torch.load(model))
net.eval()
sigmoid = nn.Sigmoid()
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize(tuple([0.5] * 3), tuple([0.5] * 3))]
)
latest_positions = deque([(0, 0) for i in range(5)], maxlen=5)
for i in range(int(1e6)):
# if i != 0 and (i == 100 or i % 500 == 0):
# model_save_path = os.path.join(save_dirctory, '{}.pth'.format(i))
# train(os.path.join(model_save_path))
# net.load_state_dict(torch.load(model_save_path))
# net.eval()
if picked_count >= OBJECT_NUM:
picked_count = 0
indicator = (indicator + 1) & 1
print('cap')
image = np_to_PIL(crop_image_along_line(capture(), ARM_RANGE_WIDTH * RATIO, ARM_RANGE_HEIGHT * RATIO))
# image = Image.open('test/2539.jpg')
print(image.size)
print('done')
P = np.zeros(shape=(ARM_RANGE_HEIGHT * RATIO, ARM_RANGE_WIDTH * RATIO), dtype=np.float16)
with torch.no_grad():
P = sigmoid(net(torch.stack([transform(image)]).to(device))).cpu().numpy()[0][0]
for i, (h, w) in enumerate(latest_positions, 1):
for y in range(max(0, h - i ** 2), min(ARM_RANGE_HEIGHT * RATIO, h + i ** 2 + 1)):
for x in range(max(0, w - i ** 2), min(ARM_RANGE_WIDTH * RATIO, w + i ** 2 + 1)):
P[y][x] = 0
h, w = np.unravel_index(np.argmax(P), P.shape)
print("probability:", P[h][w])
overray = Image.fromarray(utils.probability_to_green_image_array(P))
blended = Image.blend(image, overray, alpha=0.5)
blended.show()
latest_positions.append((h, w))
time.sleep(1) # what is this?
try:
res = pick(h, w, arm, RATIO) # the position on the full image
except Exception as e:
print(e)
continue
picked_count += res
image_save_path = './images/{}/{}.jpg'.format(int(res), get_max_file('./images/{}'.format(int(res))) + 1)
utils.crop_center(image, h, w, INPUT_SIZE).save(image_save_path)
image.save('./entire/{}.jpg'.format(get_max_file('./entire') + 1))
counter(res)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model', type=str, default='no_maxpool_L1/60.pth')
args = parser.parse_args()
main(args.model)
|
qLethon/bin_picking_robot
|
main.py
|
main.py
|
py
| 8,023 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30071759044
|
import pandas as pd
from tensorflow import keras
import os
import numpy as np
from sklearn.preprocessing import MinMaxScaler
class AbundanceGenerator(keras.utils.Sequence):
def __init__(self, abundance_file,species,batch_size=32, shuffle=True,to_fit=True):
'Initialization'
self.abundance_file=abundance_file
self.dim = len(species)
self.store = pd.HDFStore(abundance_file)#
self.species=species
self.n_examples=self.store.get_storer('df').shape[0]
self.batch_size = batch_size
self.n_batches=int(np.floor(self.n_examples / self.batch_size))
self.shuffle = shuffle
self.on_epoch_end()
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(self.n_batches)
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
X=self.store.select(key="df",start=index*self.batch_size,stop=(index+1)*self.batch_size)
return X[self.species].values,X[self.species].values
def __len__(self):
'Denotes the number of batches per epoch'
return self.n_batches
|
uclchem/Chemulator
|
src/abundancegenerator.py
|
abundancegenerator.py
|
py
| 1,233 |
python
|
en
|
code
| 6 |
github-code
|
6
|
20031000434
|
import sys
from PIL import Image
Image.MAX_IMAGE_PIXELS = 1000000000
image = Image.open("WAC_TIO2_COMBINED_MAP.png")
width, height = image.size
print("width",width,end=" ")
print("height",height,end=" ")
aspect_ratio = width/height
print("aspect_ratio",aspect_ratio)
if aspect_ratio == 2:
print("aspect ratio already matching.")
exit(0)
else:
print("adapting aspect ratio to 2")
if aspect_ratio < 2:
print("Expanding width")
print("ERROR: Not implemented.")
exit(0)
if aspect_ratio > 2:
new_height = width/2
if ((int(new_height) - height)% 2) == 0 :
new_height = int(new_height)
else:
new_height = int(new_height)+1
print("Expanding height to",new_height)
add_lines = (new_height-height)/2
print("adding",add_lines,"lines to the top and bottom")
new_im = Image.new('L', (width, new_height))
x_offset = 0
y_offset = int(add_lines)
new_im.paste(image, (x_offset,y_offset))
new_im.save('WAC_TIO2_GLOBAL_MAP.png')
#new_im.save('WAC_TIO2_GLOBAL_MAP.TIF')
print('COMPLETED.')
|
Sven-J-Steinert/DLR_Paper_2023
|
maps/preparation/TiO2/old/02_place_in_global.py
|
02_place_in_global.py
|
py
| 1,072 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26042346056
|
from __future__ import annotations
from dataclasses import dataclass
from typing import Any
from pants.bsp.spec.base import BuildTargetIdentifier
# -----------------------------------------------------------------------------------------------
# Compile Request
# See https://build-server-protocol.github.io/docs/specification.html#compile-request
# -----------------------------------------------------------------------------------------------
@dataclass(frozen=True)
class CompileParams:
# A sequence of build targets to compile.
targets: tuple[BuildTargetIdentifier, ...]
# A unique identifier generated by the client to identify this request.
# The server may include this id in triggered notifications or responses.
origin_id: str | None = None
# Optional arguments to the compilation process.
arguments: tuple[str, ...] | None = ()
@classmethod
def from_json_dict(cls, d: dict[str, Any]) -> Any:
return cls(
targets=tuple(BuildTargetIdentifier.from_json_dict(x) for x in d["targets"]),
origin_id=d.get("originId"),
arguments=tuple(d["arguments"]) if "arguments" in d else None,
)
def to_json_dict(self) -> dict[str, Any]:
result: dict[str, Any] = {"targets": [tgt.to_json_dict() for tgt in self.targets]}
if self.origin_id is not None:
result["originId"] = self.origin_id
if self.arguments is not None:
result["arguments"] = self.arguments
return result
@dataclass(frozen=True)
class CompileResult:
# An optional request id to know the origin of this report.
origin_id: str | None
# A status code for the execution.
status_code: int
# Kind of data to expect in the `data` field. If this field is not set, the kind of data is not specified.
data_kind: str | None = None
# A field containing language-specific information, like products
# of compilation or compiler-specific metadata the client needs to know.
data: Any | None = None
@classmethod
def from_json_dict(cls, d: dict[str, Any]) -> Any:
return cls(
origin_id=d.get("originId"),
status_code=d["statusCode"],
data_kind=d.get("dataKind"),
data=d.get("data"),
)
def to_json_dict(self) -> dict[str, Any]:
result: dict[str, Any] = {
"statusCode": self.status_code,
}
if self.origin_id is not None:
result["originId"] = self.origin_id
if self.data_kind is not None:
result["dataKind"] = self.data_kind
if self.data is not None:
result["data"] = self.data # TODO: Enforce to_json_dict available
return result
@dataclass(frozen=True)
class CompileTask:
target: BuildTargetIdentifier
@classmethod
def from_json_dict(cls, d: dict[str, Any]) -> Any:
return cls(target=BuildTargetIdentifier.from_json_dict(d["target"]))
def to_json_dict(self) -> dict[str, Any]:
return {"target": self.target.to_json_dict()}
@dataclass(frozen=True)
class CompileReport:
# The build target that was compiled
target: BuildTargetIdentifier
# An optional request id to know the origin of this report.
origin_id: str | None
# The total number of reported errors compiling this target.
errors: int
# The total number of reported warnings compiling the target.
warnings: int
# The total number of milliseconds it took to compile the target.
time: int | None = None
# The compilation was a noOp compilation.
no_op: bool | None = None
@classmethod
def from_json_dict(cls, d: dict[str, Any]) -> Any:
return cls(
target=BuildTargetIdentifier.from_json_dict(d["target"]),
origin_id=d.get("originId"),
errors=d["errors"],
warnings=d["warnings"],
time=d.get("time"),
no_op=d.get("noOp"),
)
def to_json_dict(self) -> dict[str, Any]:
result = {
"target": self.target.to_json_dict(),
"errors": self.errors,
"warnings": self.warnings,
}
if self.origin_id is not None:
result["originId"] = self.origin_id
if self.time is not None:
result["time"] = self.time
if self.no_op is not None:
result["noOp"] = self.no_op
return result
|
pantsbuild/pants
|
src/python/pants/bsp/spec/compile.py
|
compile.py
|
py
| 4,430 |
python
|
en
|
code
| 2,896 |
github-code
|
6
|
2772686666
|
'''有K种不同的玫瑰花,现在要摆放在N个位置上,要求每种颜色的花至少出现过一次,请问有多少种不同的方案数呢?,因为答案可能很大,你只需要输出它对772235取余后的结果.
输入描述:
输入只有1行,分别有两个整数N,K( 1 <= N <= 50000 , 1 <= K <= 30 )
输出描述:
输出一行表示答案
输入例子1:
3 2
输出例子1:
6
'''
def fun(k,n): ### K 种花 无限取 求出取出总共为n种花的分布情况
res = [0]
t = math.factorial(n+k)%772235
help(k,0,n,res,t)
return res[0]
def help(k,s,n,res,t,ans=[]):
if s == k-1:
ans.append(n)
t_ans = t
for i in ans.copy():
t_ans = t_ans/(math.factorial(i+1)%772235)
res[0]+=int(t_ans)%772235
#print(res)
ans.pop()
return
for i in range(n+1):
ans.append(i)
help(k,s+1,n-i,res,t,ans)
ans.pop()
import math
### dp 不会做 遇到就放弃
if __name__ == "__main__":
ans = 0
n,k = [int(x) for x in input().strip().split()]
value=[i+1 for i in range(k)]
for j in range(1,k):
key=[i+1 for i in range(value[j])]
sum_=0
for i in range(j):
key[i]=math.factorial(value[j])/(math.factorial(value[j]-key[i])*math.factorial(key[i]))
temp1 = key[i]*value[i]%772235
sum_+=temp1
sum_=sum_%772235
temp2 = pow(value[j],n)%772235
value[j]=int((temp2-sum_)%772235)
print(value[k-1])
|
queryor/algorithms
|
gatherAlgorithms/玫瑰花.py
|
玫瑰花.py
|
py
| 1,573 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
8915987730
|
#Faça um programa que leia dois números inteiros e informe se estes são iguais ou diferentes
#Solcitando os numeros ao user e salvando nas variaveis correspondentes
n1 = int(input("Digite o primeiro número "))
n2 = int(input("Digite o segundo número "))
#Veririficando se os valores são iguals e informando ao usuário
if n1==n2:
print("Os números são iguais")
else:
print("Os números são diferentes")
|
lucasnasc46/curso-python22
|
Desafaio 1/questao2.py
|
questao2.py
|
py
| 422 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
19705741714
|
# https://www.beecrowd.com.br/judge/pt/problems/view/1173?origem=1
lista = list(range(10))
entrada = 51
while entrada > 50:
entrada = int(input())
lista[0] = entrada
print(f"N[0] = {lista[0]}")
for i in range(1, 10):
lista[i] = lista[i - 1] * 2
print(f"N[{i}] = {lista[i]}")
|
caioopra/URI-Beecrowd
|
1173.py
|
1173.py
|
py
| 291 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
72774182909
|
class Solution(object):
# brute force
def minDistanceBrute(self, word1, word2):
"""
:type word1: str
:type word2: str
:rtype: int
"""
if len(word1)==0:
return len(word2)
if len(word2)==0:
return len(word1)
if word1[0]==word2[0]:
return self.minDistanceBrute(word1[1:], word2[1:])
else:
return min(self.minDistanceBrute(word1[1:], word2), self.minDistanceBrute(word1, word2[1:]), self.minDistanceBrute(word1[1:], word2[1:])) + 1
# ^ delete word1[0] ^ insert at word1 ^ replace word1[0] with word2[0]
# DP
def minDistance(self, word1, word2):
'''
this is just modified version of longest common subsequence
we can thing of it as going to the next block must be the best of either replacing, deleting or inserting an element in w1
to correlate this to LCS let's say we have a common subsequence Z between w1 and w2
note: here we write w2 on top of matrix and w1 on the left side of matrix
if w1[i]=w2[i]=z then we move ahead, if not:
we take the minimum of these three operations:
insert: dp[i][j-1]+1
delete: dp[i-1][j]+1
replace: dp[i-1][j-1]+1
recall we 0s in the first row and column of the matrix for LCS and started with 1,1 index
because 0th row and 0th index are empty to change from an empty string to string of length i we append i at dp[i][0]
example matrix between "abc" and "adc"
0 1 2 3
1 0 2 3
2 2 1 2
3 3 2 1
'''
m = len(word1)
n = len(word2)
dp = [[0 for _ in range(n+1)] for _ in range(m+1)]
# the reason behind this is that, if we start from an empty string at [i, 0] then to change it to word2, that is the word we have kept on top, we will need to insert i elements
# thus dp[i][0] = i
for i in range(m+1):
dp[i][0] = i
for j in range(n+1):
dp[0][j] = j
for i in range(1, m+1):
for j in range(1, n+1):
if word1[i-1]==word2[j-1]:
dp[i][j] = dp[i-1][j-1]
else:
dp[i][j] = min(dp[i-1][j], dp[i][j-1], dp[i-1][j-1]) + 1
return dp[m][n]
if __name__=='__main__':
word1 = "horse"
word2 = "ros"
s = Solution()
print(s.minDistanceBrute(word1, word2))
print(s.minDistance(word1, word2))
|
V-nsh/DSA
|
leetcode/leetcode75/DP_mult/72_edit_distance.py
|
72_edit_distance.py
|
py
| 2,597 |
python
|
en
|
code
| 1 |
github-code
|
6
|
72908363068
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api, _
from dateutil.relativedelta import relativedelta
from odoo.exceptions import ValidationError, UserError
from datetime import datetime, timedelta
from odoo.http import request
class OpAdmissionRegisterCustom(models.Model):
_inherit = "op.admission.register"
batch_id = fields.Many2one(
'op.batch', 'Term', required=True)
product_id = fields.Many2one(
'product.product', 'Course Fees', required=False,
domain=[('type', '=', 'service')], readonly=True,
states={'draft': [('readonly', False)]}, track_visibility='onchange')
class OpAdmission(models.Model):
_inherit = 'op.admission'
batch_id = fields.Many2one('op.batch', 'Term', domain=[], required=True, readonly=False)
name = fields.Char(
'Name', size=128, required=False, translate=False)
readonly = fields.Boolean(compute="_compute_read_only")
class_id = fields.Many2one('op.classroom', 'Class', required=False)
birth_place = fields.Many2one('res.country.state', 'Birth Place')
payment_option = fields.Selection([('normal', 'Normal'),
('exempted', 'Exempted'),
('haft_scholarship', '50% Scholarship'),
('full_scholarship', '100% Scholarship'),
('installment', 'Installment')], default='normal')
fill_application = fields.Boolean('Fill Application')
marital_status = fields.Selection([('single', 'Single'),
('married', 'Married')])
constrains = fields.Text('Special Wishes')
shoe_size_id = fields.Many2one('pm.shoe.size')
uniform_size_id = fields.Many2one('pm.uniform.size')
shoe_size = fields.Selection([
('xxs', 'XXS'),
('xs', 'XS'),
('s', 'S'),
('m', 'M'),
('l', 'L'),
('xl', 'Xl'),
('xxl', 'XXL'),
], 'Shoe Size')
khmer_name = fields.Char('Name in Khmer')
uniform_size = fields.Selection([
('xxs', 'XXS'),
('xs', 'XS'),
('s', 'S'),
('m', 'M'),
('l', 'L'),
('xl', 'Xl'),
('xxl', 'XXL'),
], 'Uniform Size')
nationality = fields.Many2one('res.country', 'Nationality')
primary_language = fields.Many2one('pm.student.language', string='Other Language')
other_language = fields.Many2many('pm.student.language', string='Other languages', help="Other languages")
english_score = fields.Float('English Score (%)')
high_school_id = fields.Many2one('pm.high_school', 'High School')
highest_education = fields.Selection([('HS', 'High School'),
('BA', 'Bachelor Degree'),
('MA', 'Master Degree'),
('PHD', 'Doctoral Degree')])
working_experience = fields.Text('Working Experience')
job_position = fields.Text('Job Position')
enroll_reason_id = fields.Many2one('pm.enroll_reason', string='Reason to Enroll')
not_enroll_reason_id = fields.Many2one('pm.not_enroll_reason', string='Reason not to Enroll')
current_address = fields.Char('Current Address')
hobby = fields.Char('Hobby')
family_size = fields.Integer('Family Size')
family_status = fields.Selection([('p', 'Poor'),
('n', 'Normal'),
('r', 'Rich')])
campaign_id = fields.Many2one('utm.campaign', 'Campaign')
source_id = fields.Many2one('utm.source', 'Source')
referred = fields.Char('Referred By')
passport_number = fields.Char('Passport Number')
id_card = fields.Char('ID Card')
medical_checkup = fields.Boolean('Medical Check up', default=True)
motivational_letter = fields.Boolean('Motivational Letter')
special_medical = fields.Text('Special Medical Condition')
is_scholarship = fields.Boolean('Scholarship')
lead_id = fields.Integer()
p_street = fields.Char('Street...')
p_street2 = fields.Char('Street...')
p_city = fields.Char('City', size=64)
p_zip = fields.Char('Zip', size=8)
p_state_id = fields.Many2one(
'res.country.state', 'States')
p_country_id = fields.Many2one(
'res.country', 'Country', )
application_number = fields.Char(
'Application Number', copy=False, readonly=True, store=True)
# new fields
application_date = fields.Datetime(
'Application Date', required=True, copy=False,
default=lambda self: fields.Datetime.now())
application_fee = fields.Boolean('Application Fee', required=True, default=True)
scholarship_status = fields.Many2one('pm.scholarship.status', string='Scholarship Status')
status = fields.Selection([('1st_follow_up', '1st follow-up'),
('2nd_follow_up', '2nd follow-up'),
('3rd_follow_up', '3rd follow-up'),
('visited_and_toured', 'Visited & toured academy'),
('live_student', 'Live of a student'),
('pick_up_application', 'Pick up application'),
('submitted_application', 'Submitted application incomplete'),
('schedule_for_interview', 'Schedule for interview'),
('interviewed', 'interviewed'),
('acceptance_letter', 'Acceptance letter issued')])
status_detail = fields.Char('Status detail')
lead_source = fields.Selection([('social_media', 'Social media'),
('facebook', 'Facebook'),
('website', 'Website'),
('school_visit', 'School Visit'),
('acac_student', 'By ACAC student'),
('friend', 'Friend'),
('school_councelor', 'School councelor'),
('family', 'Family'),
('open_day', 'Open day'),
('fair_exhibition', 'Fair/exhibition'),
('nea', 'NEA'),
('other', 'other')])
lead_participation = fields.One2many('pm.lead.participation',
inverse_name='admission_id',
string='Participation',
help="Participation")
additional_source = fields.Char('Additional Source Info')
parents = fields.Char('Parents')
siblings = fields.Integer('Siblings')
other_depends = fields.Char('Other dependents')
application_form = fields.Boolean('Application Form', default=True, required=True)
pictures = fields.Boolean('Pictures')
schooling_year = fields.Char('No. Schooling years')
lead_educational_achievement = fields.One2many('pm.lead.educational.achievement',
inverse_name="admission_id",
string='Educational Achievements',
help="Educational Achievements")
lead_working_experience = fields.One2many('pm.lead.working.experience',
inverse_name="admission_id",
string='Working Experience',
help="Working Experience")
contact_name = fields.Many2one('res.partner', string='Emergency Contact')
email_from = fields.Char('Email', help="Email address of the contact", tracking=40, index=True)
user_id = fields.Many2one('res.users', string='ACAC Contact', index=True, tracking=True,
default=lambda self: self.env.user)
acac_contact = fields.Char('ACAC Contact')
scholar_application = fields.Boolean('Scholar Application')
financial_status = fields.Boolean('Proof of Financial Status')
family_income = fields.Float('Source of Family income')
rank = fields.Selection([('first_contact', 'First Contact'),
('potential', 'Potential'),
('high_potential', 'High Potential')])
facebook = fields.Char('Facebook')
phone = fields.Char('Mobile 1')
admission_url = fields.Char('Link', compute="_compute_admission_url", store=True)
visa_number = fields.Char('Visa Number')
visa_expiry = fields.Date('Expiry Date')
product_id = fields.Many2one(
'product.product', 'Course Fees', required=False,
domain=[('type', '=', 'service')],track_visibility='onchange')
@api.depends('state')
def _compute_read_only(self):
for rec in self:
if rec.state == 'done':
rec.readonly = True
else:
rec.readonly = False
@api.onchange('register_id')
def onchange_register(self):
print('gege')
print(self.register_id.batch_id)
self.course_id = self.register_id.course_id
self.batch_id = self.register_id.batch_id
print(self.course_id)
print(self.batch_id)
@api.onchange('course_id')
def onchange_course(self):
# self.batch_id = False
term_id = False
if self.course_id and self.course_id.fees_term_id:
term_id = self.course_id.fees_term_id.id
self.fees_term_id = term_id
@api.onchange('product_id')
def onchange_product(self):
print('gaga')
self.fees = self.product_id.lst_price
@api.depends('name')
def _compute_admission_url(self):
for record in self:
base_url = request.env['ir.config_parameter'].get_param('web.base.url')
base_url += '/web#id=%d&view_type=form&model=op.admission' % (record.id)
record.admission_url = base_url
def submit_form(self):
ir_model_data = self.env['ir.model.data']
try:
template_id = ir_model_data.get_object_reference('pm_admission', 'student_admission_submission')[1]
except ValueError:
template_id = False
self.env['mail.template'].browse(template_id).send_mail(self.id, force_send=True)
self.state = 'submit'
action = self.env.ref("crm.crm_lead_all_leads").read()[0]
return action
def confirm_in_progress(self):
ir_model_data = self.env['ir.model.data']
try:
template_id = ir_model_data.get_object_reference('pm_admission', 'student_payment_confirm')[1]
except ValueError:
template_id = False
self.env['mail.template'].browse(template_id).send_mail(self.id, force_send=True)
self.state = 'confirm'
action = self.env.ref("crm.crm_lead_all_leads").read()[0]
return action
def admission_confirm(self):
ir_model_data = self.env['ir.model.data']
try:
template_id = ir_model_data.get_object_reference('pm_admission', 'student_admission_confirm')[1]
except ValueError:
template_id = False
self.env['mail.template'].browse(template_id).send_mail(self.id, force_send=True)
self.state = 'admission'
action = self.env.ref("crm.crm_lead_all_leads").read()[0]
return action
def confirm_cancel(self):
lead = self.env['crm.lead'].browse(self.lead_id)
lead.type = 'lead'
self.unlink()
action = self.env.ref("crm.crm_lead_all_leads").read()[0]
return action
@api.onchange('student_id')
def onchange_student_id(self):
student = self.env['op.student'].search(
[('id', '=', self.student_id.id)])
print(student)
if self.student_id and self.is_student:
self['prev_course_id'] = student['prev_course_id']
self['high_school_id'] = student['high_school_id']
self['english_score'] = student['english_score']
# additional information
self['khmer_name'] = student['khmer_name']
self['id_card'] = student['id_card']
self['passport_number'] = student['passport_number']
self['marital_status'] = student['marital_status']
self['nationality'] = student['nationality']
self['primary_language'] = student['primary_language']
self['other_language'] = student['other_language']
self['shoe_size'] = student['shoe_size']
self['uniform_size'] = student['uniform_size']
self['job_position'] = student['job_position']
self['working_experience'] = student['working_experience']
self['constrains'] = student['constrains']
self['hobby'] = student['hobby']
self['facebook'] = student['facebook']
self['visa_number'] = student['visa_number']
self['visa_expiry'] = student['visa_expiry']
self['image'] = student['image_1920']
# family info
self['family_status'] = student['family_status']
self['family_business'] = student['family_business']
self['family_income'] = student['family_income']
self['family_size'] = student['family_size']
#
self['campaign_id'] = student['campaign_id']
self['source_id'] = student['source_id']
self['referred'] = student['referred']
# Extra
self['medical_checkup'] = student['medical_checkup']
self['special_medical'] = student['special_medical']
self['motivational_letter'] = student['motivational_letter']
@api.onchange('is_student')
def onchange_is_student(self):
if not self.is_student:
self['prev_course_id'] = False
self['high_school_id'] = False
self['english_score'] = False
# additional information
self['khmer_name'] = False
self['id_card'] = False
self['passport_number'] = False
self['marital_status'] = False
self['nationality'] = False
self['primary_language'] = False
self['other_language'] = False
self['shoe_size'] = False
self['uniform_size'] = False
self['job_position'] = False
self['working_experience'] = False
self['constrains'] = False
self['hobby'] = False
self['facebook'] = False
self['visa_number'] = False
self['visa_expiry'] = False
# family info
self['family_status'] = False
self['family_business'] = False
self['family_income'] = False
self['family_size'] = False
#
self['campaign_id'] = False
self['source_id'] = False
self['referred'] = False
# Extra
self['medical_checkup'] = False
self['special_medical'] = False
self['motivational_letter'] = False
@api.model
def create(self, val):
student = self.env['op.student'].search(
[('id', '=', self.student_id.id)])
if self.student_id and self.is_student:
self['prev_course_id'] = student['prev_course_id']
self['high_school_id'] = student['high_school_id']
self['english_score'] = student['english_score']
# additional information
self['khmer_name'] = student['khmer_name']
self['id_card'] = student['id_card']
self['passport_number'] = student['passport_number']
self['marital_status'] = student['marital_status']
self['nationality'] = student['nationality']
self['primary_language'] = student['primary_language']
self['other_language'] = student['other_language']
self['shoe_size'] = student['shoe_size']
self['uniform_size'] = student['uniform_size']
self['job_position'] = student['job_position']
self['working_experience'] = student['working_experience']
self['constrains'] = student['constrains']
self['hobby'] = student['hobby']
self['facebook'] = student['facebook']
self['visa_number'] = student['visa_number']
self['visa_expiry'] = student['visa_expiry']
# family info
self['family_status'] = student['family_status']
self['family_business'] = student['family_business']
self['family_income'] = student['family_income']
self['family_size'] = student['family_size']
#
self['campaign_id'] = student['campaign_id']
self['source_id'] = student['source_id']
self['referred'] = student['referred']
# Extra
self['medical_checkup'] = student['medical_checkup']
self['special_medical'] = student['special_medical']
self['motivational_letter'] = student['motivational_letter']
@api.onchange('is_student')
def onchange_is_student(self):
if not self.is_student:
self['prev_course_id'] = False
self['high_school_id'] = False
self['english_score'] = False
# additional information
self['khmer_name'] = False
self['id_card'] = False
self['passport_number'] = False
self['marital_status'] = False
self['nationality'] = False
self['primary_language'] = False
self['other_language'] = False
self['shoe_size'] = False
self['uniform_size'] = False
self['job_position'] = False
self['working_experience'] = False
self['constrains'] = False
self['hobby'] = False
# family info
self['family_status'] = False
self['family_business'] = False
self['family_income'] = False
self['family_size'] = False
#
self['campaign_id'] = False
self['source_id'] = False
self['referred'] = False
# Extra
self['medical_checkup'] = False
self['special_medical'] = False
self['motivational_letter'] = False
# @api.onchange('batch_id')
# def onchange_batch_id(self):
# if self.batch_id and self.batch_id.state != 'active':
# msg = 'The selected term is not active: (%s) state: (%s)' % (self.batch_id.name,
# self.batch_id.state)
# raise ValidationError(_(msg))
@api.model
def create(self, val):
print('=====batch=====')
print(val['batch_id'])
if val['batch_id']:
print('hit 1')
batch = self.env['op.batch'].browse(val['batch_id'])
if batch.state != 'active':
print('hit 2')
msg = 'The selected term is not active:- (%s)' % (
batch.name)
raise ValidationError(_(msg))
lead_id = val.get('lead_id')
if lead_id:
lead_ref = self.env['crm.lead'].browse(lead_id)
lead_ref.type = "admission"
res = super(OpAdmission, self).create(val)
attachment = self.env['ir.attachment'].search([('res_model', '=', 'crm.lead'), ('res_id', '=', lead_id)])
if attachment:
for att in attachment:
att.write({
'res_model': 'op.admission',
'res_id': res.id
})
return res
def enroll_student(self):
for record in self:
messages = ''
if not record.class_id:
messages += 'Class | '
if not record.contact_name:
messages += 'Emergency Contact | '
if len(messages):
notification = {
'type': 'ir.actions.client',
'tag': 'display_notification',
'params': {
'title': 'Please fill in the following fields:',
'message': _(messages),
'type': 'danger', # types: success,warning,danger,info
'sticky': True, # True/False will display for few seconds if false
},
}
return notification
if record.register_id.max_count:
total_admission = self.env['op.admission'].search_count(
[('register_id', '=', record.register_id.id),
('state', '=', 'done')])
if not total_admission < record.register_id.max_count:
msg = 'Max Admission In Admission Register :- (%s)' % (
record.register_id.max_count)
raise ValidationError(_(msg))
if not record.student_id:
vals = record.get_student_vals()
record.partner_id = vals.get('partner_id')
record.student_id = student_id = self.env[
'op.student'].create(vals).id
else:
record.student_id.course_detail_ids.p_active = False
student_id = record.student_id.id
record.student_id.write({
'course_detail_ids': [[0, False, {
'course_id':
record.course_id and record.course_id.id or False,
'batch_id':
record.batch_id and record.batch_id.id or False,
'p_active': True,
}]],
})
attachment = self.env['ir.attachment'].search([('res_model', '=', 'op.admission'), ('res_id', '=', record.id)])
print(attachment)
if attachment:
for att in attachment:
attchment_clone = att.copy()
print('******')
print(attchment_clone)
attchment_clone.write({
'res_model': 'op.student',
'res_id': student_id
})
print('true true')
if record.fees_term_id:
val = []
product = self.env['product.product'].search([('barcode', '=', '168@168')])
print('...........product............')
product_id = product.id
for line in record.fees_term_id.line_ids:
no_days = line.due_days
no_alert_days = no_days - 7
state = 'draft'
price = line.total
print(price)
amount = price
date = (datetime.today() + relativedelta(
days=no_days)).date()
alert_date = (datetime.today() + relativedelta(
days=no_alert_days)).date()
dict_val = {
'semester': line.semester,
'fees_line_id': line.id,
'amount': amount,
'date': date,
'alert_date': alert_date,
'product_id': product_id,
'state': state,
}
val.append([0, False, dict_val])
print(val)
record.student_id.write({
'fees_detail_ids': val
})
record.write({
'nbr': 1,
'state': 'done',
'admission_date': fields.Date.today(),
'student_id': student_id,
'is_student': True,
})
def get_student_vals(self):
for student in self:
langs = [[6, False, student.other_language.mapped('id')]]
educat = [[6, False, student.lead_educational_achievement.mapped('id')]]
working = [[6, False, student.lead_working_experience.mapped('id')]]
partition = [[6, False, student.lead_participation.mapped('id')]]
student_user = self.env['res.users'].with_context(no_reset_password=False).create({
'name': student.name,
'login': student.email,
'image_1920': self.image or False,
'is_student': True,
'company_id': self.env.ref('base.main_company').id,
'groups_id': [
(6, 0,
[self.env.ref('base.group_portal').id])]
})
details = {
'phone': student.phone,
'mobile': student.mobile,
'email': student.email,
'street': student.street,
'street2': student.street2,
'city': student.city,
'country_id':
student.country_id and student.country_id.id or False,
'state_id': student.state_id and student.state_id.id or False,
# 'image_1920': student.image,
'zip': student.zip
}
student_user.partner_id.write(details)
student_user.with_context(create_user=True).action_reset_password()
details.update({
'title': student.title and student.title.id or False,
'first_name': student.first_name,
'birth_place': student.birth_place.id,
'middle_name': student.middle_name,
'khmer_name': student.khmer_name,
'last_name': student.last_name,
'birth_date': student.birth_date,
'gender': student.gender,
# 'image_1920': student.image or False,
'course_detail_ids': [[0, False, {
'course_id':
student.course_id and student.course_id.id or False,
'batch_id':
student.batch_id and student.batch_id.id or False,
'class_ids': [[6, 0, [student.class_id.id]]],
}]],
'user_id': student_user.id,
'partner_id': student_user.partner_id.id,
'batch_id':
student.batch_id and student.batch_id.id or False,
'fill_application': student.marital_status,
'marital_status': student.marital_status,
'constrains': student.constrains,
'shoe_size': student.shoe_size,
'shoe_size_id': student.shoe_size_id.id,
'uniform_size': student.uniform_size,
'uniform_size_id': student.uniform_size_id.id,
'primary_language': student.primary_language.id,
'other_language': langs,
'english_score': student.english_score,
'highest_education': student.highest_education,
'working_experience': student.working_experience,
'job_position': student.job_position,
'current_address': student.current_address,
'hobby': student.hobby,
'family_size': student.family_size,
'family_status': student.family_status,
'passport_number': student.passport_number,
'id_card': student.id_card,
'campaign_id': student.campaign_id.id,
'source_id': student.source_id.id,
'referred': student.referred,
'medical_checkup': student.medical_checkup,
'is_scholarship': student.is_scholarship,
'scholarship_status': student.scholarship_status.id,
'motivational_letter': student.motivational_letter,
'special_medical': student.special_medical,
'enroll_reason_id': student.enroll_reason_id.id,
'high_school_id': student.high_school_id.id,
'facebook': student.facebook,
'visa_number': student.visa_number,
'visa_expiry': student.visa_expiry,
'nationality': student.nationality.id,
'rank': student.rank,
'status_detail': student.status_detail,
'lead_source': student.lead_source,
'additional_source': student.additional_source,
'parents': student.parents,
'siblings': student.siblings,
'other_depends': student.other_depends,
'application_form': student.application_form,
'pictures': student.pictures,
'schooling_year': student.schooling_year,
'lead_educational_achievement': educat,
'lead_working_experience': working,
'lead_participation': partition,
'family_income': student.family_income,
'scholar_application': student.scholar_application,
'financial_status': student.financial_status,
'contact_name': student.contact_name.id,
'application_fee': student.application_fee,
'p_street': student.p_street,
'p_street2': student.p_street2,
'p_city': student.p_city,
'p_zip': student.p_zip,
'p_state_id': student.p_state_id.id,
'p_country_id': student.p_country_id.id,
})
print('*&(7e2132')
print(details)
return details
def write(self, vals):
# Temporarily fixing image issue when update a record
if 'image' in vals and vals['image']:
self.env.cr.execute("""DELETE FROM ir_attachment WHERE res_model = '%s' AND res_field = '%s' AND res_id = %d""" % (self._name, 'image', self.id))
return super(OpAdmission, self).write(vals)
|
mrrtmob/odoo_acac
|
local-addon/pm_admission/models/pm_admission.py
|
pm_admission.py
|
py
| 30,867 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27920291546
|
import os
import time
import numpy as np
import pandas as pd
import logging
import shutil
from pathlib import Path
from deep_squeeze.disk_storing import calculate_compression_ratio
def repeat_n_times(n):
"""
A decorator that repeats a decorated function (in our case the compression pipeline) n times and returns
its mean and its std of its return values.
Note that the decorated function must return a number.
"""
def decorator(func):
def wrapper(*args, **kwargs):
comp_ratios = [func(*args) for _ in range(n)]
comp_ratios = np.array(comp_ratios)
return np.mean(comp_ratios), np.std(comp_ratios)
return wrapper
return decorator
def display_compression_results(mean_ratio, std_ratio, repeats):
print(f"\n>>> Final results after {repeats} executions:")
print(f"\tMean compression ratio: {mean_ratio:.3f}")
print(f"\tStd of compression ratio: {std_ratio:.3f}")
def run_full_experiments(pipeline_func, dataset_paths, errors, params, save_path, repeats):
results_df = pd.DataFrame(columns=['Data', 'Error', 'MeanRatio', 'StdRatio', 'Time'])
for dataset in dataset_paths:
params['data_path'] = dataset
dataset_name = dataset.split('/')[-1]
for error in errors:
start_time = time.time()
params['error_threshold'] = error
mean_ratio, std_ratio = pipeline_func(params)
results_df = results_df.append({'Data': dataset_name,
'Error': error,
'MeanRatio': mean_ratio,
'StdRatio': std_ratio,
'Time': np.round((time.time() - start_time) / repeats, 2)},
ignore_index=True)
logging.info(f">>> Completed {dataset_name} with {error} error threshold.")
results_df.to_csv(save_path)
def run_scaling_experiment(sample_sizes, pipeline_func, dataset_path, params, save_path, repeats):
"""
We run the compression pipeline on increasing size samples of the same dataset to examine the time scaling.
"""
# Create a temporary directory that will hold the sample csv files and the compressed outputs
Path("storage/temporary_time_exp/").mkdir(parents=True, exist_ok=True)
# Init the results df
results_df = pd.DataFrame(columns=['SampleSize', 'DeepSqueeze', 'Gzip', 'Parquet'])
# Read the dataset
df_full = pd.read_csv(dataset_path)
params['data_path'] = 'storage/temporary_time_exp/temp.csv'
for sample_size in sample_sizes:
sample_df = df_full.sample(frac=sample_size)
# We have to store the file, for our experiment to take into account reading time
sample_df.to_csv('storage/temporary_time_exp/temp.csv', header=None, index=False)
# Run and time the DeepSqueeze compression pipeline
start_time = time.time()
_, _ = pipeline_func(params)
deep_squeeze_time = np.round((time.time() - start_time) / repeats, 2)
# Gzip time
start_time = time.time()
sample_df.to_csv("storage/temporary_time_exp/gzip_temp.csv.zip",
index=False,
compression="zip")
gzip_time = np.round((time.time() - start_time), 2)
# Parquet time
start_time = time.time()
sample_df.to_parquet("storage/temporary_time_exp/parquet_temp.parquet", index=False, compression='brotli')
parquet_time = np.round((time.time() - start_time), 2)
results_df = results_df.append({'SampleSize': sample_size,
'DeepSqueeze': deep_squeeze_time,
'Gzip': gzip_time,
'Parquet': parquet_time},
ignore_index=True)
# Delete created temp files
shutil.rmtree('storage/temporary_time_exp')
results_df.to_csv(save_path)
def baseline_compression_ratios(datasets, results_path):
"""
Calculate the baseline compression ratios of gzip and parquet
"""
results_df = pd.DataFrame(columns=['Dataset', 'Gzip', 'Parquet'])
Path("storage/temporary_baseline/").mkdir(parents=True, exist_ok=True)
for dataset_path in datasets:
pd.read_csv(dataset_path).to_csv("storage/temporary_baseline/gzip_temp.csv.zip",
index=False,
compression="zip")
gzip_comp_ratio, _, _ = calculate_compression_ratio(dataset_path,
"storage/temporary_baseline/gzip_temp.csv.zip")
pd.read_csv(dataset_path).to_parquet("storage/temporary_baseline/parquet_temp.parquet", index=False,
compression='brotli')
parquet_comp_ratio, _, _ = calculate_compression_ratio(dataset_path,
"storage/temporary_baseline/parquet_temp.parquet")
results_df = results_df.append({'Dataset': dataset_path.split('/')[-1],
'Gzip': gzip_comp_ratio,
'Parquet': parquet_comp_ratio},
ignore_index=True)
shutil.rmtree('storage/temporary_baseline')
results_df.to_csv(results_path)
|
MikeXydas/DeepSqueeze
|
deep_squeeze/experiment.py
|
experiment.py
|
py
| 5,487 |
python
|
en
|
code
| 10 |
github-code
|
6
|
40176552944
|
import os
import sys
import cv2
import PIL
import pprint
import pytesseract
import time
SRC_DIR = os.path.dirname(os.path.realpath(__file__))
sys.path.append(SRC_DIR)
#print(sys.path)
import fetch
import display
import filter
page_seg_mode = 11 # Parse sparse text
def group_names(data):
d2 = dict2list(data)
non_empty_blocks = [b for b in d2 if b['text']]
block_nums = set([b['block_num'] for b in non_empty_blocks])
names = []
for bn in block_nums:
this_block = [b for b in non_empty_blocks if b['block_num'] == bn]
names.append({
'block_num': bn,
'text': " ".join([b['text'] for b in this_block]),
'left': min([b['left'] for b in this_block]),
'top': max([b['top'] for b in this_block]),
'right': max([b['left'] + b['width'] for b in this_block]),
'bottom': max([b['top'] + b['height'] for b in this_block])
})
return names
def dict2list(d):
"""
Assumes list for each key is same length.
"""
return [{k: d[k][i] for k in d} for i in range(len(list(d.values())[0]))]
def add_rating(name, score):
ratings[name] = score
print("Added {}: {}".format(name, score))
def extract(image_file):
perf = {}
start_t = time.time()
output = {}
image = cv2.imread(image_file)
#cv2.imshow("Rating", image)
#cv2.waitKey(1)
ocr_start_t = time.time()
data = pytesseract.image_to_data(PIL.Image.open(image_file),
config='--psm {}'.format(page_seg_mode),
output_type=pytesseract.Output.DICT)
#pprint.pprint(data, indent=7)
ocr_end_t = time.time()
perf["ocr_t"] = (ocr_end_t - ocr_start_t) * 1000
names = group_names(data)
#print("names:", [n['text'] for n in names])
box_image = image.copy()
display.draw_boxes(box_image, names)
#cv2.imshow("Rating", box_image)
#cv2.waitKey(1)
names = filter.clean_names(names)
#pprint.pprint(cleaned_names)
filtered_names = filter.filter_abv(names)
filtered_names = filter.filter_styles_re(filtered_names)
filtered_names = filter.filter_breweries(filtered_names)
filtered_box_image = image.copy()
#print("filtered_names:", filtered_names)
display.draw_boxes(filtered_box_image, filtered_names)
#cv2.imshow("Rating", filtered_box_image)
#cv2.waitKey(1)
output["names"] = filtered_names
fetch_start_t = time.time()
#ratings = fetch.async_search_beers([n['clean_text'] for n in filtered_names])
ratings = fetch.async_search_beers(filtered_names)
#longest = max([len(ratings[r]["rating"]) for r in ratings])
#for n in sorted(ratings, key=lambda n: ratings[n], reverse=True):
# print("{}:{}\t{}".format(n, ' '*(longest-len(n)), ratings[n]))
fetch_end_t = time.time()
perf["fetch_t"] = (fetch_end_t - fetch_start_t) * 1000
filtered_box_image2 = image.copy()
"""
for n in ratings:
box = next(b for b in filtered_names if b['clean_text'] == n)
display.write_rating(filtered_box_image, (box['right'], box['bottom']), ratings[n]["rating"])
"""
for n in ratings:
display.write_rating(filtered_box_image, (n['right'], n['bottom']), n["rating"])
#cv2.imshow("Rating", filtered_box_image)
#cv2.waitKey(1)
end_t = time.time()
perf["total_t"] = (end_t - start_t) * 1000
output["img"] = filtered_box_image
#output["ratings"] = ratings
output["perf"] = perf
return output
def main(image_file):
#pytesseract.pytesseract.tesseract_cmd = 'D:/Program Files (x86)/Tesseract-OCR/tesseract'
image = cv2.imread(image_file)
cv2.imshow("Rating", image)
cv2.waitKey(1)
"""
print("OCR (STRING)")
text = pytesseract.image_to_string(PIL.Image.open(image_file),
config='--psm {}'.format(page_seg_mode),
output_type=pytesseract.Output.DICT)
lines = text['text'].split('\n')
lines_stripped = [l for l in lines if l]
print("\toutput:\t\t", text)
print("\tlines:\t\t", lines)
print("\tnon-empty lines:", lines_stripped)
"""
"""
print("BOXES")
boxes = pytesseract.image_to_boxes(PIL.Image.open(image_file), output_type=pytesseract.Output.DICT)
pprint.pprint(boxes)
"""
print("OCR (DATA)")
data = pytesseract.image_to_data(PIL.Image.open(image_file),
config='--psm {}'.format(page_seg_mode),
output_type=pytesseract.Output.DICT)
pprint.pprint(data, indent=7)
"""
print("OSD")
osd = pytesseract.image_to_osd(PIL.Image.open(image_file), output_type=pytesseract.Output.DICT)
pprint.pprint(osd)
"""
# Simple approach to forming beer names from words returned by tesseract by
# grouping by blocks.
names = group_names(data)
print("names:", [n['text'] for n in names])
box_image = image.copy()
display.draw_boxes(box_image, names)
cv2.imshow("Rating", box_image)
cv2.waitKey(1)
cleaned_names = filter.clean_names(names)
pprint.pprint(cleaned_names)
filtered_names = filter.filter_abv(cleaned_names)
filtered_names = filter.filter_styles_re(filtered_names)
filtered_names = filter.filter_breweries(filtered_names)
filtered_box_image = image.copy()
print("filtered_names:", filtered_names)
display.draw_boxes(filtered_box_image, filtered_names)
cv2.imshow("Rating", filtered_box_image)
cv2.waitKey(1)
ratings = fetch.async_search_beers([n['clean_text'] for n in filtered_names])
longest = max([len(r) for r in ratings])
for n in sorted(ratings, key=lambda n: ratings[n], reverse=True):
print("{}:{}\t{}".format(n, ' '*(longest-len(n)), ratings[n]))
filtered_box_image2 = image.copy()
for n in ratings:
box = next(b for b in cleaned_names if b['clean_text'] == n)
display.write_rating(filtered_box_image, (box['right'], box['bottom']), ratings[n])
cv2.imshow("Rating", filtered_box_image)
cv2.waitKey(1)
"""
sync_ratings = {}
for n in filtered_names:
sync_ratings[n['text']] = fetch.search_beers(n['text'])
if not sync_ratings[n['text']]:
continue
display.write_rating(filtered_box_image2, (n['right'], n['top']), sync_ratings[n['text']])
cv2.imshow("Rating 2", filtered_box_image2)
cv2.waitKey(1)
print(sync_ratings)
"""
cv2.waitKey(0)
if __name__ == "__main__":
main(sys.argv[1])
|
JohnMcAninley/beer-goggles
|
goggles/extract.py
|
extract.py
|
py
| 6,035 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16414819917
|
from sklearn import cross_validation
f = open("Pubmed-Diabetes/data/Pubmed-Diabetes.DIRECTED.cites.tab")
m = open("Pubmed-Diabetes/data/Pubmed-diabetes.NODE.paper.tab")
#define the dataset
dataList = []
# dataList[i] = [paper_id + class_label + word_attributed + [citing paper + cited paper]]
#construct the data from the content file:
def buildCon():
m.readline()
basic = []
firstLine = m.readline().split()
for i in range(len(firstLine)-2):
basic.append(firstLine[i+1].split(":")[1])
for line in m:
sonList = []
c = line.split()
sonList.append(c[0])
sonList.append(int(c[1].split("=")[1])-1)
lenUnit = len(c)
sonList.append([])
for i in range(500):
sonList[2].append(0.0)
for i in range(lenUnit-3):
test = c[i+2].split("=")[0]
for j in range(500):
if(basic[j] == test):
sonList[2][j] = c[i+2].split("=")[1]
break
sonList.append([])
sonList[3].append([])
sonList[3].append([])
dataList.append(sonList)
##transfer the label to the number
# if(c[n-1] == "Agents"):
# q = 0
# elif(c[n-1] == "AI"):
# q = 1
# elif(c[n-1] == "DB"):
# q = 2
# elif(c[n-1] == "IR"):
# q = 3
# elif(c[n-1] == "ML"):
# q = 4
# elif(c[n-1] == "HCI"):
# q = 5
# sonList.append(q)
# sonList.append([])
# for i in range(n-2):
# sonList[2].append(float(c[i+1]))
# dataList.append(sonList)
#
#construct the data from the cite file
def buildCite():
f.readline()
f.readline()
for line in f:
p = line.split()[1].split(":")[1]
q = line.split()[3].split(":")[1]
for i in range(len(dataList)):
if(dataList[i][0] == p and p != q):
dataList[i][3][0].append(q)
if(dataList[i][0] == q and p != q):
dataList[i][3][1].append(p)
buildCon()
buildCite()
print(1)
|
randywhisper/DataAnalyst_py583
|
code/structed_Pub.py
|
structed_Pub.py
|
py
| 1,727 |
python
|
en
|
code
| 2 |
github-code
|
6
|
26804210661
|
inside_edges = []
edge_to_pen = {}
num_pens = int(input())
for pen_idx in range(num_pens):
data = [int(data) for data in input().split()]
num_edges = data[0]
corners = data[1: num_edges + 1]
edges = [tuple(sorted([corners[idx], corners[(idx + 1) % num_edges]]))
for idx in range(num_edges)]
edge_costs = data[num_edges + 1:]
for idx in range(num_edges):
if edges[idx] in edge_to_pen:
other_pen_idx, _ = edge_to_pen.pop(edges[idx])
inside_edges.append((edge_costs[idx], pen_idx, other_pen_idx))
else:
edge_to_pen[edges[idx]] = (pen_idx, edge_costs[idx])
outside_edges = []
outside_idx = num_pens
for pen_idx, cost in edge_to_pen.values():
outside_edges.append((cost, pen_idx, outside_idx))
def kruskal(edges, n):
edges.sort()
parents = [edge for edge in range(n)]
def find(x):
while x != parents[x]:
x = parents[x]
return x
total_cost = 0
for cost, x, y in edges:
x_root = find(x)
y_root = find(y)
if x_root != y_root:
total_cost += cost
parents[x_root] = y_root
return total_cost
print(min(kruskal(inside_edges, num_pens),
kruskal(inside_edges + outside_edges, num_pens + 1)))
|
Stevan-Zhuang/DMOJ
|
CCC/CCC '10 S4 - Animal Farm.py
|
CCC '10 S4 - Animal Farm.py
|
py
| 1,289 |
python
|
en
|
code
| 1 |
github-code
|
6
|
72509864189
|
from flask import (
Blueprint,
render_template,
request, redirect,
session,
flash,
url_for,
abort,
)
from .models import *
from flask_mail import Message
from flask_login import current_user, login_required
from sqlalchemy.exc import SQLAlchemyError
from Hispanist_flask import mail
from Hispanist_flask.my_app.main_page import log_error
module = Blueprint('pages', __name__, template_folder='./templates/pages', static_folder='./static/pages', url_prefix='/')
@module.route('/rating')
def rating():
"""Page that shows rating of Spanish schools and universities."""
schools = School.query.all()
universities = University.query.all()
return render_template("my_app/pages/rating.html", schools=schools, universities=universities)
@module.route('/books')
def books():
books = Book.query.all()
return render_template('my_app/pages/books.html', books=books)
@module.route('/videos')
def videos():
"""Page that shows rating of Spanish schools and universities."""
channels = Video.query.filter(Video.type=='канал').all()
videos = Video.query.filter(Video.type=='видео').all()
return render_template("my_app/pages/videos.html", channels=channels, videos=videos)
@module.route('/article/<id>', methods=['GET', 'POST'])
def article(id):
"""
article: instance of article that the method gets from the form in html to render one article.
Page that renders one article.
"""
article_object = Article.query.filter(Article.id == id).one()
return render_template('my_app/pages/article.html', article=article_object)
@module.route('/learn_words', methods=['GET', 'POST'])
@login_required
def learn_words():
words = Word.query.filter(Word.users.any(User.username == current_user.username)).all()
if request.method == 'POST':
word = request.form.get('word')
translation = request.form.get('translation')
print(word)
print(translation)
print(request.form)
word_obj = Word.query.filter(Word.word==word).all()
if not word_obj:
word_obj = Word(word=word, translation=translation)
db.session.add(word_obj)
user = User.query.filter(User.username == current_user.username).one()
word_obj.users.append(user)
print(word_obj)
try:
db.session.commit()
except SQLAlchemyError as e:
log_error('Error while querying database', exc_info=e)
flash('Добавление слова не удалось', 'danger')
abort(500)
session.modified = True
return render_template('my_app/pages/learn_words.html', words=words)
@module.route('/olimpiads')
def olimpiads():
return render_template('my_app/pages/olimpiads.html')
@module.route('/lessons', methods=["GET", "POST"])
def lessons():
if request.method == 'POST':
email = request.form.get('email')
phone = request.form.get('phone')
message = request.form.get('message')
msg = Message('Клиент оставил обращение на сайте', recipients=[email])
msg.body = f'Номер телефона клиента: {phone}, сообщение от клиента: {message}'
mail.send(msg)
flash('менеджер свяжется с вами в течение суток')
return redirect(url_for('pages.lessons'))
return render_template('my_app/pages/lessons.html')
|
vecherninanika/Hispanist_Flask
|
Hispanist_flask/my_app/pages.py
|
pages.py
|
py
| 3,473 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18476191196
|
import tornado.httpserver
import tornado.ioloop
import tornado.web
import json
import webapp
import RF24module
import time
import database
global radioNodi
global dbn
class GetListaNodiSettingHandler(tornado.web.RequestHandler):
def get(self):
#***************************************
#***************************************
#***************************************
#recupera nodi dal server e inviala al main.html
#leggi il setting dal database
#***************************************
#***************************************
#***************************************
#test --->
"""
nodo_new = {
"55754":{"Tipo":"5", "Descrizione":"Dimmer", "funzionamento" : [{"ch1":"false","ch2":"true","ch3":"false","ch4":"true"}] , "stato" : [ { "ch1b" : "false" , "ch2b" : "true" , "ch3b" : "true" , "ch4b" : "true" , "ch1d" : "40" , "ch2d" : "80" , "ch3d" : "50" , "ch4d" : "20" } ] },
"55753":{"Tipo":"5", "Descrizione":"Dimmer", "funzionamento" : [{"ch1":"false","ch2":"true","ch3":"true","ch4":"true"}] , "stato" : [ { "ch1b" : "true" , "ch2b" : "false" , "ch3b" : "true" , "ch4b" : "true" , "ch1d" : "40" , "ch2d" : "80" , "ch3d" : "50" , "ch4d" : "20" } ] },
"55752":{"Tipo":"5", "Descrizione":"Dimmer", "funzionamento" : [{"ch1":"false","ch2":"true","ch3":"true","ch4":"true"}] , "stato" : [ { "ch1b" : "true" , "ch2b" : "false" , "ch3b" : "false" , "ch4b" : "true" , "ch1d" : "40" , "ch2d" : "80" , "ch3d" : "50" , "ch4d" : "100" } ] },
}
"""
nodi_new = webapp.dbn.Read_Lista_Nodi_return_JSON()
#aggiungi ordine
#nodo_new['OrdineNodi'] = ['addrss_A','addrss_B']
numberorder = []
for item in nodi_new:
numberorder.append(nodi_new[item]['Ordine'])
numberorder.sort()
list_Address = []
for idn in numberorder:
for item in nodi_new:
if(nodi_new[item]['Ordine'] == idn):
list_Address.append(item)
nodi_new['OrdineNodi'] = list_Address
self.write(json.dumps(nodi_new))
class AggiungiNodiHandler(tornado.web.RequestHandler):
def get(self):
#***************************************
#***************************************
#***************************************
#recupera dati con il cordinatore dal wireless nodi
#***************************************
#***************************************
#***************************************
#test --->
nodo = webapp.radioNodi.find_nodo()
#verifica se gia ce nel daatabase
a = webapp.dbn.Is_AddressNodo_inDataabase(nodo)
if(a==False):
time.sleep(0.1)
nodo_new = {}
if (nodo == None):
self.write(json.dumps(nodo_new))
else:
#richiede descrizione nodo
tipo = webapp.radioNodi.get_tipo_nodo(nodo)
if (tipo == 5):
#un dimmer (x ora solo dimmer)
nodo_new = { nodo :{"Tipo": str(tipo), "Descrizione":"Dimmer"}}
#aggiungi in database
webapp.dbn.Aggiungi_Nodo_inDatabase(nodo, str(tipo))
else:
nodo_new = {}
self.write(json.dumps(nodo_new))
#nodo_new = {"55754":{"Tipo":"5", "Descrizione":"Dimmer"}}
self.write(json.dumps(nodo_new))
else:
print("Nodo Gia esiste!")
nodo_new = { "Errore" : "Nodo Esiste" }
self.write(json.dumps(nodo_new))
class RimuoviNodoHandler(tornado.web.RequestHandler):
def post(self):
data = json.loads(self.request.body)
#***************************************
#***************************************
#***************************************
#rimuovi nodo dal database
#***************************************
#***************************************
#***************************************
#test --->
webapp.dbn.Remove_Nodo(str(data["Nodo"]))
#print('remove: ' + data['Nodo'])
class OrdineNodiHandler(tornado.web.RequestHandler):
def post(self):
data = json.loads(self.request.body)
#***************************************
#***************************************
#***************************************
#ordini nodi nel database
#***************************************
#***************************************
#***************************************
#test --->
webapp.dbn.Set_Ordine_Nodi(data["Nodi"])
class FunzionamentoNodoHandler(tornado.web.RequestHandler):
def post(self):
data = json.loads(self.request.body)
#***************************************
#***************************************
#***************************************
#Funzionamento dimmer, Setting del nodo da impostare nel database
#***************************************
#***************************************
#***************************************
#test --->
nodi_new = webapp.dbn.Write_Setting_Nodo(str(data["Nodo"]),str(data["checkbox"]),str(data["value"]))
#print(data)
|
salviador/LightHub
|
raspberry/app/AggiungiNodi.py
|
AggiungiNodi.py
|
py
| 5,476 |
python
|
it
|
code
| 0 |
github-code
|
6
|
11506766967
|
# брой правоъгълни маси
# дължина на масите
# ширина на масите
# размер на покривки = дължина + ширина на масите + 120
# карета = дължина на маса / 2
# долар = 1.85
tables_all = int(input())
tables_length = float(input())
tables_width = float(input())
covers = tables_all * (tables_length + 2 * 0.30) * (tables_width + 2 * 0.30)
squares = tables_all * (tables_length / 2 * tables_length / 2)
covers_price_per_meter = 7
squares_price_per_meter = 9
price_usd = covers * covers_price_per_meter + squares * squares_price_per_meter
price_bgn = price_usd * 1.85
print(f"{price_usd:.2f} USD")
print(f"{price_bgn:.2f} BGN")
|
PIvanov94/SoftUni-Software-Engineering
|
PB-Python April 2020 Part 2/Tailoring Workshop.py
|
Tailoring Workshop.py
|
py
| 752 |
python
|
bg
|
code
| 0 |
github-code
|
6
|
40696903853
|
import argparse
import logging
import sys
def create_parser():
parser = argparse.ArgumentParser(
"Get magma managed configs for the specified service. (mconfig)",
)
parser.add_argument(
"-s", "--service",
required=True,
help="Magma service name",
)
parser.add_argument(
"-v", "--variable",
help="Config variable name. "
"If not specified, then JSON dump all configs for this service.",
)
parser.add_argument(
"-t", "--test", action="store_true",
help="Do a truthy test on v. "
"If True then return code is 0, otherwise return code is 2",
)
return parser
def main():
parser = create_parser()
args = parser.parse_args()
# import after parsing command line because import is sluggish
from magma.configuration.mconfig_managers import (
load_service_mconfig_as_json,
)
# set up logging
logging.basicConfig(
level=logging.INFO,
format='[%(asctime)s %(levelname)s %(name)s] %(message)s',
)
mconfig_json = load_service_mconfig_as_json(args.service)
# if a variable was not specified, pretty print config and exit
if args.variable is None:
for k, v in mconfig_json.items():
# Keys shouldn't have spaces in them, but just in case
# Values also shouldn't have newlines, but if they do, this will
# print differently than if called with --variable
print(k.replace(" ", "_"), str(v).replace("\n", r"\n"))
sys.exit(0)
var = mconfig_json[args.variable]
if args.test:
if var:
# if true, then return 0 (zero means success)
sys.exit(0)
# exit code 2 to distinguish from exit code 1,
# which is returned after python exceptions.
sys.exit(2)
# not a boolean, print the config
print(var)
sys.exit(0)
if __name__ == "__main__":
main()
|
magma/magma
|
orc8r/gateway/python/scripts/magma_get_config.py
|
magma_get_config.py
|
py
| 1,967 |
python
|
en
|
code
| 1,605 |
github-code
|
6
|
6701340278
|
import cv2
import numpy as np
import imgaug.augmenters as iaa
import imgaug as ia
import torchvision
from torchvision import transforms
from PIL import Image, ImageEnhance, ImageOps
from RandAugment.augmentations import Lighting, RandAugment
class ResizeImage(object):
def __init__(self, height=256, width=256):
self.height = height
self.width = width
def __call__(self, img):
img = np.array(img)
h, w = img.shape[:2]
if h < w:
w = int(self.height*w*1.0/h)
h = self.height
else:
h = int(self.width*h*1.0/w)
w = self.width
img = cv2.resize(img, (w, h), interpolation=cv2.INTER_CUBIC)
return Image.fromarray(img)
class ResizeImageVal(object):
def __init__(self, height=256, width=256):
self.height = height
self.width = width
self.pad_fix = iaa.PadToFixedSize(width=width, height=height)
def __call__(self, img):
img = np.array(img)
h, w = img.shape[:2]
if h > w:
w = int(self.height*w*1.0/h)
h = self.height
else:
h = int(self.width*h*1.0/w)
w = self.width
img = cv2.resize(img, (w, h), interpolation=cv2.INTER_CUBIC)
img = self.pad_fix.augment_image(img)
return Image.fromarray(img)
def sometimes(aug): return iaa.Sometimes(0.5, aug)
class imgaugAugment(object):
def __init__(self):
super(imgaugAugment, self).__init__()
self.seq = iaa.Sequential(
[
# apply the following augmenters to most images
iaa.Fliplr(0.5), # horizontally flip 50% of all images
iaa.Flipud(0.2), # vertically flip 20% of all images
# crop images by -5% to 10% of their height/width
sometimes(iaa.CropAndPad(
percent=(-0.05, 0.1),
pad_mode=ia.ALL,
pad_cval=(0, 255)
)),
sometimes(iaa.Affine(
# scale images to 80-120% of their size, individually per axis
scale={"x": (0.8, 1.2), "y": (0.8, 1.2)},
# translate by -20 to +20 percent (per axis)
translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)},
rotate=(-45, 45), # rotate by -45 to +45 degrees
shear=(-16, 16), # shear by -16 to +16 degrees
# use nearest neighbour or bilinear interpolation (fast)
order=[0, 1],
# if mode is constant, use a cval between 0 and 255
cval=(0, 255),
# use any of scikit-image's warping modes (see 2nd image from the top for examples)
mode=ia.ALL
)),
# execute 0 to 5 of the following (less important) augmenters per image
# don't execute all of them, as that would often be way too strong
iaa.SomeOf((0, 5),
[
# convert images into their superpixel representation
sometimes(iaa.Superpixels(
p_replace=(0, 1.0), n_segments=(20, 200))),
iaa.OneOf([
# blur images with a sigma between 0 and 3.0
iaa.GaussianBlur((0, 3.0)),
# blur image using local means with kernel sizes between 2 and 7
iaa.AverageBlur(k=(2, 7)),
# blur image using local medians with kernel sizes between 2 and 7
iaa.MedianBlur(k=(3, 11)),
]),
iaa.Sharpen(alpha=(0, 1.0), lightness=(
0.75, 1.5)), # sharpen images
iaa.Emboss(alpha=(0, 1.0), strength=(
0, 2.0)), # emboss images
# search either for all edges or for directed edges,
# blend the result with the original image using a blobby mask
iaa.SimplexNoiseAlpha(iaa.OneOf([
iaa.EdgeDetect(alpha=(0.5, 1.0)),
iaa.DirectedEdgeDetect(
alpha=(0.5, 1.0), direction=(0.0, 1.0)),
])),
# add gaussian noise to images
iaa.AdditiveGaussianNoise(loc=0, scale=(
0.0, 0.05*255), per_channel=0.5),
iaa.OneOf([
# randomly remove up to 10% of the pixels
iaa.Dropout((0.01, 0.1), per_channel=0.5),
iaa.CoarseDropout((0.03, 0.15), size_percent=(
0.02, 0.05), per_channel=0.2),
]),
# invert color channels
iaa.Invert(0.05, per_channel=True),
# change brightness of images (by -10 to 10 of original value)
iaa.Add((-10, 10), per_channel=0.5),
# change hue and saturation
iaa.AddToHueAndSaturation((-20, 20)),
# either change the brightness of the whole image (sometimes
# per channel) or change the brightness of subareas
iaa.OneOf([
iaa.Multiply((0.5, 1.5), per_channel=0.5),
iaa.FrequencyNoiseAlpha(
exponent=(-4, 0),
first=iaa.Multiply((0.5, 1.5), per_channel=True),
second=iaa.LinearContrast((0.5, 2.0))
)
]),
# improve or worsen the contrast
iaa.LinearContrast((0.5, 2.0), per_channel=0.5),
iaa.Grayscale(alpha=(0.0, 1.0)),
# move pixels locally around (with random strengths)
sometimes(iaa.ElasticTransformation(
alpha=(0.5, 3.5), sigma=0.25)),
# sometimes move parts of the image around
sometimes(iaa.PiecewiseAffine(scale=(0.01, 0.05))),
sometimes(iaa.PerspectiveTransform(scale=(0.01, 0.1)))
],
random_order=True
)
],
random_order=True
)
def __call__(self, img):
img = self.seq.augment_image(img)
return Image.fromarray(img)
_IMAGENET_PCA = {
'eigval': [0.2175, 0.0188, 0.0045],
'eigvec': [
[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203],
]
}
class Augment(object):
def __init__(self, width=320, height=320, phase='train'):
super(Augment, self).__init__()
self.phase = phase
self.widht = width
self.height = height
# self.transform_train = torchvision.transforms.Compose([
# imgaugAugment(),
# ])
self.transform_train = transforms.Compose([
imgaugAugment(),
RandAugment(n=3, m=9),
transforms.RandomResizedCrop(self.height, scale=(0.08, 1.0), interpolation=Image.BICUBIC),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(
brightness=0.4,
contrast=0.4,
saturation=0.4,
),
transforms.ToTensor(),
Lighting(0.1, _IMAGENET_PCA['eigval'], _IMAGENET_PCA['eigvec']),
transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225)),
])
self.transform_test = transforms.Compose([
transforms.Resize(self.height+32, interpolation=Image.BICUBIC),
transforms.CenterCrop(self.height),
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225)),
])
def __call__(self, image):
if self.phase == 'train':
image = self.transform_train(image)
elif self.phase == 'valid' or self.phase=='test':
image = self.transform_test(image)
return image
|
toandaominh1997/ProductDetectionShopee
|
datasets/augment.py
|
augment.py
|
py
| 8,295 |
python
|
en
|
code
| 0 |
github-code
|
6
|
75131969148
|
# -*- coding: utf-8 -*-
"""https://blog.csdn.net/zwq912318834/article/details/79870432"""
import scrapy
from selenium import webdriver
import time
from scrapy import signals # scrapy 信号相关库
from pydispatch import dispatcher # scrapy最新采用的方案
class LoginBlibliSpider(scrapy.Spider):
name = 'login_blibli'
allowed_domains = ['bilibili.com/']
start_urls = ['https://api.bilibili.com/x/web-interface/nav']
def __init__(self):
super(LoginBlibliSpider, self).__init__()
print(33333333333333333333)
# 这个路径指向我们电脑使用的cookies以及localstorage等一大些登陆信息,从而可以很方便的实现
profile_directory = r'--user-data-dir=C:\Users\acer\AppData\Local\Google\Chrome\User Data'
# 实例化一个浏览器对象(实例化一次)
options = webdriver.ChromeOptions()
options.add_argument(profile_directory)
self.driver = webdriver.Chrome(chrome_options=options)
self.driver.get("https://space.bilibili.com/")
self.seleniumCookies = self.driver.get_cookies()
print(f"seleniumCookies = {self.driver.get_cookies()}")
# time.sleep(3)
# self.driver.quit()
# 设置信号量,当收到spider_closed信号时,调用mySpiderCloseHandle方法,关闭chrome
dispatcher.connect(receiver=self.mySpiderCloseHandle,
signal=signals.spider_closed
)
# 信号量处理函数:关闭chrome浏览器
def mySpiderCloseHandle(self, spider): # 不知道为啥,例子中这里给了参数spider
self.driver.quit()
print("1", "-------------------")
def parse(self, response):
print(response.text)
|
hahahei957/NewProject_Opencv2
|
use_of_selenium/use_of_selenium/spiders/login_blibli.py
|
login_blibli.py
|
py
| 1,782 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18244671014
|
from os import path, mkdir, listdir
import configparser
import utils
def default_config(config):
"""Put here the content of the default configuration file"""
config['vosk'] = {'project_name': 'vosk',
'model_name': '',
'models_url': 'https://alphacephei.com/vosk/models'}
config['fastpunct'] = {'project_name': 'fastpunct',
'model_name': '',
'models_url': 'https://github.com/notAI-tech/fastPunct'}
class Settings:
__config = configparser.ConfigParser()
__config_path = path.join(utils.project_root, "settings")
def __init__(self):
# Check if the settings folder already exist
if not path.exists(self.__config_path):
mkdir(self.__config_path)
# Check if the config file already exist else fill it with default settings
if "config" in listdir(self.__config_path):
self.__config.read(path.join(self.__config_path, "config"))
self.__add_default_params()
else:
default_config(self.__config)
self.write_config()
def __getitem__(self, sections):
"""Get the item according to the section(s) given\n
Example :\n
> settings ["vosk", "model_name"]\n
"model_name" \n
> settings ["vosk"]\n
{"model_name" : "model_name, \n
"" : ""}"""
if isinstance(sections, tuple):
section, property = sections
return self.__config[section][property]
else:
return self.__config[sections]
def __setitem__(self, tuple, data):
"""Set the item according to the tuple given\n
Example : settings ["vosk", "model_name"] = "model_name" """
section, property = tuple
self.__config[section][property] = data
def write_config(self):
"""Write the config to the file"""
with open(path.join(self.__config_path, "config"), 'w') as configfile:
self.__config.write(configfile)
def __add_default_params(self):
"""If the default settings are modified in term of slots,
then apply it to the existing config\n
NOTE: it only works with 1 or 2 dimensions dictionnary"""
default_dict = {}
default_config(default_dict)
stored_dict = dict(self.__config._sections)
for key1 in default_dict.keys():
if isinstance(default_dict[key1], dict):
for key2 in default_dict[key1].keys():
if key1 in stored_dict.keys() and key2 in stored_dict[key1]:
default_dict[key1][key2] = stored_dict[key1][key2]
else:
if key1 in stored_dict.keys():
default_dict[key1] = stored_dict[key1]
self.__config.read_dict(default_dict)
self.write_config()
def dl_model_path(project):
"""Return the DeepLearning model path corresponding to the poject.R
Args:
project (dict): Project informations
Returns:
str: path to the model directory
"""
model_name = project["model_name"]
project_name = project["project_name"]
def error(e):
print(f" Could not access deeplearning model '{model_name}' of project '{project_name}'.")
print(" " + e)
return None
if not model_name:
error("Model name empty")
path_models = path.join(utils.project_root, "models")
if not path.exists(path_models):
mkdir(path_models)
error("Model folder unexisting. Creating one at : " + path_models)
path_model = path.join(path_models, project_name, model_name)
if path.exists(path_model):
if (listdir(path_model) != []):
print(f"Model '{model_name}' of project '{project_name}' found")
return path_model
else:
error("Model seems empty. Check the contents of : " + path_model)
else:
if not path.exists(path.join(path_models, project_name)):
mkdir(path.join(path_models, project_name))
print(f"Project is unexisting in {path_models}. Creating the folder.")
error("Model unexisting. Please")
|
cg-Kdaf/Zacharias
|
src/private_data.py
|
private_data.py
|
py
| 4,247 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18049656660
|
#! /usr/bin/env python3
# pyre-strict
import os
from common_tests import CommonTestDriver
from test_case import TestCase
class HierarchyTests(TestCase[CommonTestDriver]):
@classmethod
def get_template_repo(cls) -> str:
return "hphp/hack/test/integration/data/hierarchy"
def test_inheritance(self) -> None:
"""
Test --inheritance-ancestors and --inheritance-children
"""
self.test_driver.start_hh_server()
self.test_driver.check_cmd(
[
'File "{root}foo.php", line 3, characters 7-9: Foo',
' inherited by File "{root}bar.php", line 3, characters 7-9: Bar',
'File "{root}foo.php", line 4, characters 19-19: Foo::f',
' inherited by File "{root}bar.php", line 4, characters 19-19: Bar::f',
'File "{root}foo.php", line 3, characters 7-9: Foo',
' inherited by File "{root}baz.php", line 3, characters 7-9: Baz',
'File "{root}foo.php", line 5, characters 19-19: Foo::g',
' inherited by File "{root}baz.php", line 4, characters 19-19: Baz::g',
],
options=["--inheritance-children", "Foo"],
)
self.test_driver.check_cmd(
[
'File "{root}baz.php", line 3, characters 7-9: Baz',
' inherited from File "{root}foo.php", line 3, characters 7-9: Foo',
'File "{root}baz.php", line 4, characters 19-19: Baz::g',
' inherited from File "{root}foo.php", line 5, characters 19-19: Foo::g',
'File "{root}baz.php", line 3, characters 7-9: Baz',
' inherited from File "{root}bar.php", line 3, characters 7-9: Bar',
],
options=["--inheritance-ancestors", "Baz"],
)
def test_inheritance_filter(self) -> None:
self.test_driver.start_hh_server()
self.test_driver.check_cmd(
[
'File "{root}filter.php", line 15, characters 7-12: Filter',
' inherited from File "{root}filter.php", line 3, characters 7-13: CFilter',
'File "{root}filter.php", line 18, characters 19-31: Filter::cfilterMethod',
' inherited from File "{root}filter.php", line 4, characters 19-31: CFilter::cfilterMethod',
],
options=["--inheritance-ancestor-classes", "Filter"],
)
self.test_driver.check_cmd(
[
'File "{root}filter.php", line 15, characters 7-12: Filter',
' inherited from File "{root}filter.php", line 7, characters 11-17: IFilter',
'File "{root}filter.php", line 19, characters 19-31: Filter::ifilterMethod',
' inherited from File "{root}filter.php", line 8, characters 19-31: IFilter::ifilterMethod',
],
options=["--inheritance-ancestor-interfaces", "Filter"],
)
self.test_driver.check_cmd(
[
'File "{root}filter.php", line 15, characters 7-12: Filter',
' inherited from File "{root}filter.php", line 11, characters 7-13: TFilter',
'File "{root}filter.php", line 20, characters 19-31: Filter::tfilterMethod',
' inherited from File "{root}filter.php", line 12, characters 19-31: TFilter::tfilterMethod',
],
options=["--inheritance-ancestor-traits", "Filter"],
)
def test_method_signature_change(self) -> None:
with open(os.path.join(self.test_driver.repo_dir, "qux.php"), "w") as f:
f.write(
"""<?hh //partial
class Qux {
public function f() {
$x = new Foo();
$x->f();
}
}
"""
)
self.test_driver.start_hh_server(changed_files=["qux.php"])
self.test_driver.check_cmd(["No errors!"])
debug_sub = self.test_driver.subscribe_debug()
with open(os.path.join(self.test_driver.repo_dir, "foo.php"), "w") as f:
f.write(
"""<?hh //partial
class Foo {
public function f(): void {}
public final function g() {}
}
"""
)
msgs = debug_sub.get_incremental_logs()
self.assertEqual(set(msgs["to_redecl_phase1"]["files"]), set(["foo.php"]))
# FIXME: redeclaring qux.php is unnecessary
self.assertEqual(
set(msgs["to_redecl_phase2"]["files"]),
set(["foo.php", "bar.php", "baz.php", "qux.php"]),
)
self.assertEqual(
set(msgs["to_recheck"]["files"]),
set(["foo.php", "bar.php", "baz.php", "qux.php"]),
)
|
WeilerWebServices/Facebook
|
hhvm/hphp/hack/test/integration/hierarchy_tests.py
|
hierarchy_tests.py
|
py
| 4,662 |
python
|
en
|
code
| 3 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.