id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
16358
|
from django.apps import AppConfig
from django.db.models.signals import post_migrate
from django.utils.translation import gettext_lazy as _
class SitesConfig(AppConfig):
name = 'src.base'
verbose_name = _("Modulo de Frontend")
|
StarcoderdataPython
|
12813584
|
import sys
from pony_barn import client as pony
from base import GitBuild
class PonyBuild(GitBuild):
def __init__(self):
super(PonyBuild, self).__init__()
self.name = "surlex"
self.repo_url = 'git://github.com/codysoyland/surlex.git'
if __name__ == '__main__':
build = PonyBuild()
sys.exit(build.execute(sys.argv))
|
StarcoderdataPython
|
93840
|
<reponame>TakesxiSximada/dumpcar<filename>scripts/get-db-raw-snapshot-mysql.py
import getpass
import argparse
import subprocess
parser = argparse.ArgumentParser()
parser.add_argument('host')
parser.add_argument('user')
parser.add_argument('db')
args = parser.parse_args()
child = subprocess.run('mysqldump -h {} -u {} {}'.format(
args.host, args.user, args.db), shell=True)
child.wait()
child = subprocess.run('mysqldump -h {} -u {} {}'.format(
args.host, args.user, args.db), shell=True)
child.wait()
|
StarcoderdataPython
|
3422660
|
<reponame>baiyanquan/k8sTools
# -*- coding: utf-8 -*
class K8sRepository(object):
def __init__(self):
pass
@staticmethod
def create_k8s_namespace_view_model(result):
success = []
try:
for each_host in result['success']:
for each_resource in result['success'][each_host.encode('raw_unicode_escape')]['resources']:
temp = dict()
temp['creationTimestamp'] = each_resource['metadata']['creationTimestamp']
temp['name'] = each_resource['metadata']['name']
temp['uid'] = each_resource['metadata']['uid']
success.append(temp)
except:
success = {}
result['detail'] = result['success']
result['success'] = success
return result
@staticmethod
def create_k8s_node_view_model(result):
success = []
try:
for each_host in result['success']:
for each_resource in result['success'][each_host.encode('raw_unicode_escape')]['resources']:
temp = dict()
temp['creationTimestamp'] = each_resource['metadata']['creationTimestamp']
temp['labels'] = each_resource['metadata']['labels']['kubernetes.io/role']
temp['name'] = each_resource['metadata']['name']
temp['uid'] = each_resource['metadata']['uid']
success.append(temp)
except:
success = {}
result['detail'] = result['success']
result['success'] = success
return result
@staticmethod
def create_k8s_svc_view_model(result):
success = []
try:
for each_host in result['success']:
for each_resource in result['success'][each_host.encode('raw_unicode_escape')]['resources']:
temp = dict()
temp['creationTimestamp'] = each_resource['metadata']['creationTimestamp']
temp['labels'] = each_resource['metadata']['labels']
temp['name'] = each_resource['metadata']['name']
temp['namespace'] = each_resource['metadata']['namespace']
temp['clusterIP'] = each_resource['spec']['clusterIP']
success.append(temp)
except:
success = {}
result['detail'] = result['success']
result['success'] = success
return result
@staticmethod
def create_k8s_deployment_view_model(result):
success = []
try:
for each_host in result['success']:
for each_resource in result['success'][each_host.encode('raw_unicode_escape')]['resources']:
temp = dict()
temp['creationTimestamp'] = each_resource['metadata']['creationTimestamp']
temp['labels'] = each_resource['metadata']['labels']
temp['name'] = each_resource['metadata']['name']
temp['namespace'] = each_resource['metadata']['namespace']
temp['container_spec'] = each_resource['spec']['template']['spec']['containers']
success.append(temp)
except:
success = {}
result['detail'] = result['success']
result['success'] = success
return result
@staticmethod
def create_k8s_pods_view_model(result):
success = []
try:
for each_host in result['success']:
for each_resource in result['success'][each_host.encode('raw_unicode_escape')]['resources']:
temp = dict()
temp['creationTimestamp'] = each_resource['metadata']['creationTimestamp']
temp['labels'] = each_resource['metadata']['labels']
temp['name'] = each_resource['metadata']['name']
temp['namespace'] = each_resource['metadata']['namespace']
temp['nodeName'] = each_resource['spec']['nodeName']
temp['hostIP'] = each_resource['status']['hostIP']
temp['podIP'] = each_resource['status']['podIP']
success.append(temp)
except:
success = {}
result['detail'] = result['success']
result['success'] = success
return result
|
StarcoderdataPython
|
1651285
|
<reponame>DiceNameIsMy/recruiting<gh_stars>0
# Generated by Django 3.2.4 on 2021-06-27 14:44
from django.db import migrations, models
import recruiting.utils.handler
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Experience',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('company', models.CharField(max_length=256, verbose_name='Компания')),
('position', models.CharField(max_length=128, verbose_name='Должность')),
('start_date', models.DateField(verbose_name='Дата начала работы')),
('end_date', models.DateField(blank=True, null=True, verbose_name='Дата окончания работы')),
('to_present', models.BooleanField(verbose_name='Работает по настоящее время:')),
],
options={
'verbose_name': 'Опыт работы',
'verbose_name_plural': 'Опыт работы',
},
),
migrations.CreateModel(
name='KeySkill',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=64, verbose_name='Название')),
],
options={
'verbose_name': 'Навык',
'verbose_name_plural': 'Навыки',
},
),
migrations.CreateModel(
name='Respond',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cover_letter', models.CharField(blank=True, max_length=256, verbose_name='Приложенное письмо')),
('date', models.DateTimeField(auto_now_add=True)),
('invited', models.BooleanField(default=False)),
('text', models.CharField(blank=True, max_length=256)),
],
options={
'verbose_name': 'Отклик',
'verbose_name_plural': 'Отклики',
},
),
migrations.CreateModel(
name='Resume',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('avatar', models.ImageField(blank=True, upload_to='recruiting/resume/avatar', verbose_name='Аватар')),
('header', models.CharField(max_length=128)),
('text', models.TextField(max_length=8192)),
('education', models.CharField(choices=[('SE', 'Среднее'), ('SS', 'Среднее специальное'), ('BC', 'Бакалавр'), ('MS', 'Магистратура'), ('DC', 'Докторантур наук')], max_length=2, null=True, verbose_name='Образование')),
('edu_institution', models.CharField(blank=True, max_length=64, verbose_name='Учебное заведение')),
('specialization', models.CharField(blank=True, max_length=64)),
('edu_end_year', models.IntegerField(blank=True, default=recruiting.utils.handler.current_year, verbose_name='Год окончания')),
('is_open', models.BooleanField(default=False, verbose_name='Виден ли всему интернету')),
('last_modified', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'Резюме',
'verbose_name_plural': 'Резюме',
},
),
migrations.CreateModel(
name='Vacancy',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('header', models.CharField(max_length=128, verbose_name='Заголовок')),
('text', models.TextField(max_length=8192, verbose_name='Основное описание')),
('salary', models.IntegerField(blank=True, null=True, verbose_name='Зарплата')),
('employment', models.CharField(choices=[('FT', 'Full time'), ('PT', 'Part time'), ('WP', 'Work placement'), ('PW', 'Project work'), ('VW', 'Volunteering')], default='FD', max_length=2, verbose_name='Занятость')),
('schedule', models.CharField(choices=[('FD', 'Full day'), ('RM', 'Remote work'), ('SH', 'Shift schedule'), ('RB', 'Rotation based'), ('FS', 'Flexible schedule')], default='FT', max_length=2, verbose_name='График работы')),
],
options={
'verbose_name': 'Вакансия',
'verbose_name_plural': 'Вакансии',
},
),
]
|
StarcoderdataPython
|
5134549
|
"""
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
"""
# Write your code here
t = int(input())
for _ in range(t):
costs = list(map(int, input().strip().split()))
while True:
reduced = False
for i in range(10):
for j in range(i, 10):
k = (i + j) % 10
if costs[k] > costs[i] + costs[j]:
reduced = True
costs[k] = costs[i] + costs[j]
if not reduced:
break
target = int(input())
s = input()
ans = 0
for i in s:
ans += costs[ord(i) - 48] # ord(0) = 48
print(ans)
|
StarcoderdataPython
|
3361442
|
import time
import ttn
app_id = "solar-pi0-ws-app"
access_key = "<KEY>"
#to send the reconfiguration message, to see where we do it
def send_reconfiguration_message(seconds_until_next_meassure):
try:
message = "reconfig_sleep_time;" + str(seconds_until_next_meassure)
publish.single("config_sensor_node_1",message,retain=True,hostname="192.168.0.145",port=1881)
except Exception as err:
print("Couldn't send the message to " + "192.168.0.145" + ":" + str(1881))
print(sys.exc_info())
traceback.print_tb(err.__traceback__)
def uplink_callback(msg, client):
print("Received uplink from ", msg.dev_id)
print(msg)
handler = ttn.HandlerClient(app_id, access_key)
# using mqtt client
mqtt_client = handler.data()
mqtt_client.set_uplink_callback(uplink_callback)
mqtt_client.connect()
time.sleep(60)
mqtt_client.close()
|
StarcoderdataPython
|
1750538
|
<gh_stars>0
import os
from multime.auxotroph_analysis import load_model
me = load_model.load_me_model(json=True)
aerobicity='anaerobic'
if aerobicity == 'anaerobic':
prefix = '_anaerobic'
else:
prefix = ''
for gene_obj in list(me.translation_data):
gene = gene_obj.id
source_dir = os.getcwd() + '/knockout_sims/'
if not os.path.isdir(source_dir):
os.mkdir(source_dir)
if os.path.exists(os.path.join(source_dir, gene + '%s_sol.json' % prefix)):
print(gene, 'already solved')
continue
os.system("sbatch edison_submit_job %s %s" % (gene, aerobicity))
|
StarcoderdataPython
|
3429628
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 13 18:41:38 2021
@author: divyoj
"""
## importing libraries:
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.animation import FuncAnimation
import os
# # note that this must be executed before 'import numba'
# os.environ['NUMBA_DISABLE_INTEL_SVML'] = '1'
from numba import njit
import time as process_time
import plotting_gradient
from scipy.integrate import solve_ivp
## functions:
@njit
def do_timestep(t,z,aT,bT,alpha, beta, gamma, zeta):
''' function to give dxdt at a time step '''
aL = z[0*(nx*ny):1*(nx*ny)].reshape((ny,nx))
bL = z[1*(nx*ny):2*(nx*ny)].reshape((ny,nx))
aR = z[2*(nx*ny):3*(nx*ny)].reshape((ny,nx))
bR = z[3*(nx*ny):4*(nx*ny)].reshape((ny,nx))
# total membrane protein concentration:
a0 = aL + aR
b0 = bL + bR
# intitating dxdt to arrays of zeros:
daL=np.zeros((ny,nx));daR=np.zeros((ny,nx));dbL=np.zeros((ny,nx));dbR=np.zeros((ny,nx));
## Equations for al,aR,bl and bR:
# aL
daL[0,1:nx-1]=alpha*(aT[0,1:nx-1]-a0[0,1:nx-1])-beta*aL[0,1:nx-1]+beta*gamma*( aL[0,1:nx-1]*bR[0,1-1:nx-1-1] ) -zeta*(aL[0,1:nx-1]-aR[0,1:nx-1])**3;
# aR
daR[0,1:nx-1]=alpha*(aT[0,1:nx-1]-a0[0,1:nx-1])-beta*aR[0,1:nx-1]+beta*gamma*( aR[0,1:nx-1]*bL[0,1+1:nx-1+1] ) -zeta*(aR[0,1:nx-1]-aL[0,1:nx-1])**3;
# bL
dbL[0,1:nx-1]=alpha*(bT[0,1:nx-1]-b0[0,1:nx-1])-beta*bL[0,1:nx-1]+beta*gamma*( bL[0,1:nx-1]*aR[0,1-1:nx-1-1] ) -zeta*(bL[0,1:nx-1]-bR[0,1:nx-1])**3;
# bR
dbR[0,1:nx-1]=alpha*(bT[0,1:nx-1]-b0[0,1:nx-1])-beta*bR[0,1:nx-1]+beta*gamma*( bR[0,1:nx-1]*aL[0,1+1:nx-1+1] ) -zeta*(bR[0,1:nx-1]-bL[0,1:nx-1])**3;
# Boundary Conditions:
#aL
daL[0,0]=daL[0,1];
daL[0,nx-1]=alpha*(aT[0,nx-1]-a0[0,nx-1])-beta*aL[0,nx-1]+beta*gamma*(aL[0,nx-1]*bR[0,nx-1-1])-zeta*(aL[0,nx-1]-aR[0,nx-1])**3;
#aR
daR[0,0]=alpha*(aT[0,0]-a0[0,0])-beta*aR[0,0]+beta*gamma*( aR[0,0]*bL[0,1] ) -zeta*(aR[0,0]-aL[0,0])**3;
daR[0,nx-1]=daR[0,nx-2];
#bL
dbL[0,0]=dbL[0,1];
dbL[0,nx-1]=alpha*(bT[0,nx-1]-b0[0,nx-1])-beta*bL[0,nx-1]+beta*gamma*(bL[0,nx-1]*aR[0,nx-1-1])-zeta*(bL[0,nx-1]-bR[0,nx-1])**3;
#bR
dbR[0,0]=alpha*(bT[0,0]-b0[0,0])-beta*bR[0,0]+beta*gamma*( bR[0,0]*aL[0,1] ) -zeta*(bR[0,0]-bL[0,0])**3;
dbR[0,nx-1]=dbR[0,nx-2];
daL=daL*((aT>=a0) | (daL<0)); daR=daR*((aT>=a0) | (daR<0))
dbL=dbL*((bT>=b0) | (dbL<0)); dbR=dbR*((bT>=b0) | (dbR<0))
daL=daL*((aL>=0) | (daL>0)); daR=daR*((aR>=0) | (daR>0))
dbL=dbL*((bL>=0) | (dbL>0)); dbR=dbR*((bR>=0) | (dbR>0))
#return np.array(daL.flatten().tolist()+dbL.flatten().tolist()+daR.flatten().tolist()+dbR.flatten().tolist())
return np.concatenate((daL.flatten(),dbL.flatten(),daR.flatten(),dbR.flatten()))
#@njit
def simulate(rho,epsilon,alpha, beta, gamma, zeta):
''' function to iterate over time and return arrays with the result '''
## initilizing the arrays to store the values over time:
aL_t = np.zeros((T_max+1,ny,nx)); aR_t = np.zeros((T_max+1,ny,nx));
bL_t = np.zeros((T_max+1,ny,nx)); bR_t = np.zeros((T_max+1,ny,nx));
# total proteins in the cells
aT = rho + np.zeros((ny,nx))+epsilon*rho*np.linspace(-0.5,0.5,nx)*np.ones((ny,nx))
bT = rho + np.zeros((ny,nx))
## initializing aL,bR,bL,aR
aL = np.zeros((ny,nx)) + 0.1*rho; aR = np.zeros((ny,nx)) + 0.100001*rho
bL = np.zeros((ny,nx)) + 0.100001*rho; bR = np.zeros((ny,nx)) + 0.1*rho
## Collecting the initial conditions into a single array:
ic = np.array(aL.flatten().tolist()+bL.flatten().tolist()+aR.flatten().tolist()+bR.flatten().tolist())
## Solving the initial value problem:
sol = solve_ivp(lambda t,y: do_timestep(t,y,aT,bT,alpha, beta, gamma, zeta),t_span=[0,T_max],y0=ic,t_eval=list(np.linspace(0,T_max,T_max+1)))
t=sol.t
aball=sol.y
for t_index, ts in enumerate(t):
aball_at_ts = aball[:,t_index]
aL_t[t_index]= aball_at_ts[0*(nx*ny):1*(nx*ny)].reshape((ny,nx));
bL_t[t_index]= aball_at_ts[1*(nx*ny):2*(nx*ny)].reshape((ny,nx));
aR_t[t_index]= aball_at_ts[2*(nx*ny):3*(nx*ny)].reshape((ny,nx));
bR_t[t_index]= aball_at_ts[3*(nx*ny):4*(nx*ny)].reshape((ny,nx));
#return (aL_t[:,:,10:nx-10],aR_t[:,:,10:nx-10],bL_t[:,:,10:nx-10],bR_t[:,:,10:nx-10])
return (aL_t,bL_t,aR_t,bR_t)
if __name__ == "__main__":
# Lattice:
w,h = 10,2;
dx,dy=0.01,1;
nx=int(w/dx)
ny=1;#int(h/dx);
# time:
T_max=500;
# parameters:
alpha=10;
gamma=1 ;beta=1;
zeta=0.0;
#epsilon=0.1;
main_folder="./aR greater than aL/"
# #%% Characterisation over epsilon for multiple small values of rho:
rho_array=[0.2,0.1]
f, axs = plt.subplots(3,1,figsize=(4,9))
for rhoi, rho in enumerate(rho_array):
print("rho=",rho)
#folder for storing the data:
folder=main_folder+"zeta="+str(zeta)+"_alpha="+str(alpha)+"_rho="+str(rho)+"/"
if not os.path.exists(folder):
os.makedirs(folder)
epsilons = np.around(np.linspace(-1,1,21),5);rho0_array=epsilons.copy();
pa=epsilons.copy();pb=epsilons.copy()
for ri, epsilon in enumerate(epsilons):
print (ri, epsilon)
aL_t, bL_t, aR_t, bR_t = simulate(rho,epsilon,alpha, beta, gamma, zeta)
# Plotting at each rho
rho0_array[ri],pa[ri],pb[ri]=plotting_gradient.plots_at_rho(aL_t,bL_t,aR_t,bR_t,epsilon,folder)
## rho0 vs rho
axs[0].plot(epsilons,rho0_array,'.-',label=str(rho));
axs[0].set_title(r"$\rho_{0} \ v/s \ \epsilon$");
axs[0].set_ylabel(r"$\rho_{0}$")
axs[0].set_xlabel(r"$\epsilon$")
axs[0].legend(ncol=2)
## rho0 vs rho
axs[1].plot(epsilons,pa,'.-',label=str(rho));
axs[1].set_title(r'$p_{a}\ v/s \ \epsilon$');
axs[1].set_ylabel(r"$p_{a}$")
axs[1].set_xlabel(r"$\epsilon$")
axs[1].legend(ncol=2)
## rho0 vs rho
axs[2].plot(epsilons,pb,'.-',label=str(rho));
axs[2].set_title(r'$p_{b} \ v/s \ \epsilon $');
axs[2].set_ylabel(r"$p_{b} $")
axs[2].set_xlabel(r"$\epsilon$")
axs[2].legend(ncol=2)
f.suptitle(r"zeta="+str(zeta))
f.subplots_adjust(top=0.85, bottom=0.20, left=0.20, right=0.95, hspace=0.50,wspace=0.50)
f.savefig(main_folder+"Gradient_over_epsilon_low_rho_zeta="+str(zeta)+".png",dpi=500)
plt.close()
#%% Characterisation over epsilon for multiple large values of rho::
rho_array=[0.9,1.0,1.1,1.2]
f, axs = plt.subplots(3,1,figsize=(4,9))
for rhoi, rho in enumerate(rho_array):
print("rho=",rho)
#folder for storing the data:
folder=main_folder+"zeta="+str(zeta)+"_alpha="+str(alpha)+"_rho="+str(rho)+"/"
if not os.path.exists(folder):
os.makedirs(folder)
epsilons = np.sort(np.around(np.concatenate((np.linspace(-1,1,51),np.linspace(-0.1,0.1,21))),5));
rho0_array=epsilons.copy();
pa=epsilons.copy();pb=epsilons.copy()
for ri, epsilon in enumerate(epsilons):
print (ri, epsilon)
aL_t, bL_t, aR_t, bR_t = simulate(rho,epsilon,alpha, beta, gamma, zeta)
# Plotting at each rho
rho0_array[ri],pa[ri],pb[ri]=plotting_gradient.plots_at_rho(aL_t,bL_t,aR_t,bR_t,epsilon,folder)
## rho0 vs rho
axs[0].plot(epsilons,rho0_array,'.-',label=str(rho));
axs[0].set_title(r"$\rho_{0} \ v/s \ \epsilon$");
axs[0].set_ylabel(r"$\rho_{0}$")
axs[0].set_xlabel(r"$\epsilon$")
axs[0].legend(ncol=2)
## rho0 vs rho
axs[1].plot(epsilons,pa,'.-',label=str(rho));
axs[1].set_title(r'$p_{a}\ v/s \ \epsilon$');
axs[1].set_ylabel(r"$p_{a}$")
axs[1].set_xlabel(r"$\epsilon$")
axs[1].legend(ncol=2)
## rho0 vs rho
axs[2].plot(epsilons,pb,'.-',label=str(rho));
axs[2].set_title(r'$p_{b} \ v/s \ \epsilon $');
axs[2].set_ylabel(r"$p_{b} $")
axs[2].set_xlabel(r"$\epsilon$")
axs[2].legend(ncol=2)
f.suptitle(r"zeta="+str(zeta))
f.subplots_adjust(top=0.85, bottom=0.20, left=0.20, right=0.95, hspace=0.50,wspace=0.50)
f.savefig(main_folder+"Gradient_over_epsilon_fine_high_rho_zeta="+str(zeta)+".png",dpi=500)
plt.close()
# #%% Characterisation over rho:
epsilon_array=[0.5,0.1,0.01,0]
f, axs = plt.subplots(3,1,figsize=(4,9))
for epsi, epsilon in enumerate(epsilon_array):
print("epsilon=",epsilon)
#folder for storing the data:
folder=main_folder+"zeta="+str(zeta)+"_alpha="+str(alpha)+"_epsilon="+str(epsilon)+"/"
if not os.path.exists(folder):
os.makedirs(folder)
rhos = np.sort(np.around(np.concatenate((np.linspace(0.8,1.2,21),np.linspace(0.95,1.05,26))),5));rho0_array=rhos.copy();
pa=rhos.copy();pb=rhos.copy()
for ri, rho in enumerate(rhos):
print (ri, rho)
aL_t, bL_t, aR_t, bR_t = simulate(rho,epsilon,alpha, beta, gamma, zeta)
#% Plotting at each rho:
rho0_array[ri],pa[ri],pb[ri]=plotting_gradient.plots_at_rho(aL_t,bL_t,aR_t,bR_t,rho,folder)
## rho0 vs rho
axs[0].plot(rhos,rho0_array,'.-',label=str(epsilon));
axs[0].set_title(r"$\rho_{0} \ v/s \ \rho$");
axs[0].set_ylabel(r"$\rho_{0}$")
axs[0].set_xlabel(r"$\rho$")
axs[0].legend(ncol=2)
## rho0 vs rho
axs[1].plot(rhos,pa,'.-',label=str(epsilon));
axs[1].set_title(r'$p_{a} \ v/s \ \rho$');
axs[1].set_ylabel(r"$p_{a}$")
axs[1].set_xlabel(r"$\rho$")
axs[1].legend(ncol=2)
## rho0 vs rho
axs[2].plot(rhos,pb,'.-',label=str(epsilon));
axs[2].set_title(r'$p_{b} \ v/s \ \rho $');
axs[2].set_ylabel(r"$p_{b} $")
axs[2].set_xlabel(r"$\rho$")
axs[2].legend(ncol=2)
f.suptitle(r"zeta="+str(zeta))
f.subplots_adjust(top=0.85, bottom=0.20, left=0.20, right=0.95, hspace=0.50,wspace=0.50)
f.savefig(main_folder+"Gradient_over_rho_zeta="+str(zeta)+".png",dpi=500)
plt.close()
|
StarcoderdataPython
|
6418326
|
<filename>2020/18/solution.py
from typing import Tuple
def apply_operator(lv, rv, operator) -> float:
return lv + rv if operator == "+" else lv * rv
def log(*kargs):
pass
def eval_expression(exp_str: str, start: int = 0) -> Tuple[float, int]:
log(f"==> eval_expression(\"{exp_str}\", {start})")
exp_value = 0
cur_operator = '+'
cur_term = None
ptr = start
while ptr < len(exp_str):
c = exp_str[ptr]
ptr += 1
if c == '(':
v, p = eval_expression(exp_str, ptr)
exp_value = apply_operator(exp_value, v, cur_operator)
ptr += p
elif c == ')':
break
elif c in "0123456789":
cur_term = c
elif c in '+*':
if cur_term:
v = float(cur_term)
exp_value = apply_operator(exp_value, v, cur_operator)
cur_term = None
cur_operator = c
if cur_term:
v = float(cur_term)
exp_value = apply_operator(exp_value, v, cur_operator)
log(f"<== eval_expression(\"{exp_str}\", {start}) = {exp_value}")
return exp_value, (ptr - start)
def modify_expression(exp_str: str, start: int = 0) -> Tuple[str, int]:
log(f"==> modify_expression(\"{exp_str}\", {start})")
exp = ""
operands = []
ptr = start
while ptr < len(exp_str):
c = exp_str[ptr]
ptr += 1
if c == '(':
sub_exp, p = modify_expression(exp_str, ptr)
operands.append(f"({sub_exp})")
ptr += p
elif c == ')':
break
elif c == "*":
exp += f"{operands[0]} * "
operands = []
elif c in "0123456789":
operands.append(c)
if len(operands) == 2:
operands = [f"({operands[0]} + {operands[1]})"]
exp += operands[0]
log(f"<== modify_expression: {exp}")
return exp, (ptr - start)
if __name__ == "__main__":
expressions = [line[:-1] for line in open("2020/18/input.txt", "r").readlines()]
# Part 1
s = sum([eval_expression(e)[0] for e in expressions])
print(f"Sum of expressions: {s}")
# Part 2
modified_expressions = [modify_expression(e)[0] for e in expressions]
s = sum([eval_expression(e)[0] for e in modified_expressions])
print(f"Sum of expressions: {s}")
|
StarcoderdataPython
|
9728346
|
<reponame>shikharmn/lightly
import sys
import tempfile
from lightly.utils import save_custom_metadata
from tests.api_workflow.mocked_api_workflow_client import MockedApiWorkflowSetup, MockedApiWorkflowClient
class TestCLICrop(MockedApiWorkflowSetup):
@classmethod
def setUpClass(cls) -> None:
sys.modules["lightly.cli.upload_cli"].ApiWorkflowClient = MockedApiWorkflowClient
def test_save_metadata(self):
metadata = [("filename.jpg", {"random_metadata": 42})]
metadata_filepath = tempfile.mktemp('.json', 'metadata')
save_custom_metadata(metadata_filepath, metadata)
|
StarcoderdataPython
|
6649682
|
frase = str(input('Digite uma frase: ')).strip().upper()
palavras = frase.split()
junto = ''.join(palavras)
inverso = ''
for letra in range(len(junto) - 1, -1, -1):
inverso += junto[letra]
if inverso != junto:
print('NÃO É UM PALÍNDROMO')
else:
print('PALÍNDROMO')
"""from unidecode import unidecode
x = 'palíndromo'
frase = str(input('Digite uma frase: '))
frase = frase.replace(" ", "")
frase2 = frase[::-1]
frase = unidecode(frase).lower()
frase2 = unidecode(frase2).lower()
print(frase)
print(frase2)
if frase == frase2:
print('Essa frase é um palíndromo')
else:
print('Essa frase não é um palíndromo')"""
|
StarcoderdataPython
|
155697
|
from simupy.block_diagram import BlockDiagram
import simupy_flight
import numpy as np
from nesc_testcase_helper import plot_nesc_comparisons, int_opts, benchmark
from nesc_testcase_helper import ft_per_m, kg_per_slug
Ixx = 3.6*kg_per_slug/(ft_per_m**2) #slug-ft2
Iyy = 3.6*kg_per_slug/(ft_per_m**2) #slug-ft2
Izz = 3.6*kg_per_slug/(ft_per_m**2) #slug-ft2
Ixy = 0.0*kg_per_slug/(ft_per_m**2) #slug-ft2
Iyz = 0.0*kg_per_slug/(ft_per_m**2) #slug-ft2
Izx = 0.0*kg_per_slug/(ft_per_m**2) #slug-ft2
m = 1.0*kg_per_slug #slug
x = 0.
y = 0.
z = 0.
S_A = 0.1963495/(ft_per_m**2)
b_l = 1.0
c_l = 1.0
a_l = b_l
lat_ic = 0.*np.pi/180
long_ic = 0.*np.pi/180
h_ic = 0./ft_per_m
V_N_ic = 1000./ft_per_m
V_E_ic = 000./ft_per_m
V_D_ic = -1000./ft_per_m
psi_ic = 0.*np.pi/180
theta_ic = 0.*np.pi/180
phi_ic = 0.*np.pi/180
p_b_ic = 0.*np.pi/180
q_b_ic = 0.*np.pi/180
r_b_ic = 0.*np.pi/180
# omega_X_ic = 0.004178073*np.pi/180
# omega_Y_ic = 0.*np.pi/180
# omega_Z_ic = 0.*np.pi/180
planet = simupy_flight.Planet(
gravity=simupy_flight.earth_J2_gravity,
winds=simupy_flight.get_constant_winds(),
atmosphere=simupy_flight.atmosphere_1976,
planetodetics=simupy_flight.Planetodetic(
a=simupy_flight.earth_equitorial_radius,
omega_p=simupy_flight.earth_rotation_rate,
f=simupy_flight.earth_f
)
)
vehicle = simupy_flight.Vehicle(base_aero_coeffs=simupy_flight.get_constant_aero(CD_b=0.1), m=m, I_xx=Ixx, I_yy=Iyy, I_zz=Izz, I_xy=Ixy, I_yz=Iyz, I_xz=Izx, x_com=x, y_com=y, z_com=z, x_mrc=x, y_mrc=y, z_mrc=z, S_A=S_A, a_l=a_l, b_l=b_l, c_l=c_l, d_l=0.,)
BD = BlockDiagram(planet, vehicle)
BD.connect(planet, vehicle, inputs=np.arange(planet.dim_output))
BD.connect(vehicle, planet, inputs=np.arange(vehicle.dim_output))
planet.initial_condition = planet.ic_from_planetodetic(
lamda_E=long_ic, phi_E=lat_ic, h=h_ic,
V_N=V_N_ic, V_E=V_E_ic, V_D=V_D_ic,
psi=psi_ic, theta=theta_ic, phi=phi_ic,
p_B=p_b_ic, q_B=q_b_ic, r_B=r_b_ic,)
# planet.initial_condition[-3:] = omega_X_ic, omega_Y_ic, omega_Z_ic
planet.initial_condition[-2] = 0.
with benchmark() as b:
res = BD.simulate(30, integrator_options=int_opts)
b.tfinal = res.t[-1]
plot_nesc_comparisons(res, '10')
|
StarcoderdataPython
|
9726224
|
<gh_stars>0
# Generated by Django 2.2.4 on 2019-12-16 19:03
from django.db import migrations, models
import phonenumber_field.modelfields
class Migration(migrations.Migration):
dependencies = [
('property', '0013_auto_20191202_2055'),
]
operations = [
migrations.RemoveField(
model_name='flat',
name='owner',
),
migrations.RemoveField(
model_name='flat',
name='owner_phone_pure',
),
migrations.RemoveField(
model_name='flat',
name='owners_phonenumber',
),
migrations.AlterField(
model_name='owner',
name='owner_phone_pure',
field=phonenumber_field.modelfields.PhoneNumberField(blank=True, db_index=True, max_length=128, region=None, verbose_name='Нормализованный номер владельца:'),
),
migrations.AlterField(
model_name='owner',
name='owners_phonenumber',
field=models.CharField(db_index=True, max_length=20, verbose_name='Номер владельца:'),
),
]
|
StarcoderdataPython
|
5098211
|
# -*- coding: utf-8 -*-
import argparse
import json
import sys
from ..googlenews import get_news_by_geolocation
FIELDS = ['title', 'url', 'description']
def execute(args):
if args.geolocation:
city, state = args.geolocation
result = get_news_by_geolocation(city, state)
if args.fields:
d = args.d
for item in result:
print(d.join([getattr(item, field) for field in args.fields]))
else:
print(json.dumps([item._asdict() for item in result]))
def main():
parser = argparse.ArgumentParser(
prog='requests_googlenews',
description='A command line tool for parsing google news'
)
parser.add_argument('-g', '--geolocation', nargs=2, metavar=('city', 'state'),
help='geographic location')
parser.add_argument('-d', metavar='delim', default='\t',
help='field delimiter character (default: tab)')
parser.add_argument('-f', '--fields', nargs='+',
metavar=('field1', 'field2'), choices=FIELDS,
help=('list of output fields, separated by the ' +
'field delimiter character (see the -d option).' +
' Allowed fields are: ' + ', '.join(FIELDS)))
args = parser.parse_args()
execute(args)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
6675625
|
# -*- coding: utf8 -*-
# Filename: renameFiles.py
#
########################################################################
# This is a program to rename files and folders in
# a given folder by applying a set of renaming rules
#
# <NAME> (<EMAIL>)
#
# Specify the path with the variable path
# To see if rules work without actually renaming, set rename to false
#
########################################################################
from __future__ import unicode_literals
from builtins import list, str
from io import open
import os, os.path
import unicodedata
import re
# list of files that should be ignored, such as system files like 'Thumbs.db'
ignoreFiles = ['Thumbs.db', '.DS_Store']
# list of file formats in lower case that should be ignored, such as 'xlsx'
ignoreFileExtensions = []
def normaliseName(value):
"""
Normalises or renames a string, replaces 'something' with 'something else'
add anything else you need; examples are commented
Order of rules does matter for custom replacements.
At the end all remaining invalid characters are removed (i.e. replaced with '')
"""
# split into name and extension
newValue, fileExt = os.path.splitext(value)
# replace umlauts with two letters
newValue = newValue.replace('ä','ae')
newValue = newValue.replace('ö','oe')
newValue = newValue.replace('ü','ue')
newValue = newValue.replace('Ä','Ae')
newValue = newValue.replace('Ö','Oe')
newValue = newValue.replace('Ü','Ue')
newValue = newValue.replace('ß','ss')
# replace all other special characters
# normalise, i. e. replace e.g. é with e
newValue = unicodedata.normalize('NFKD', newValue).encode('ascii', 'ignore')
newValue = newValue.decode('utf-8')
# some custom rules to serve as example
# newValue = newValue.replace(', ','_')
# newValue = newValue.replace(',','_')
# newValue = newValue.replace('+','_')
# newValue = newValue.replace(' - ','-')
# newValue = newValue.replace(' ','_')
# you can also use regular expressions e. g.:
# newValue = str(re.sub(r'(\()([\d]\))', r'-\2', newValue))
# '( one number )' becomes '-number)'
# all remaining invalid characters are removed
# \ and / are kept to keep the path
newValue = str(re.sub('[^a-zA-Z0-9_\-/\\\]', '', newValue))
return newValue+fileExt
if __name__=="__main__":
# set path
#path = u'../data'
#path = u'../../testData'
# set rename to 'true' if renaming should be done
rename = 'false'
print ("######################################################")
print ("Working with directory: "+path+"\n")
# scan full directory and create list with files and list with
# directories, that serve as basis for further processing.
# the lists are saved in files for later reference.
fileDirList = []
dirList = []
for root, dirs, files in os.walk(path):
for dir in dirs:
dirList.append(os.path.join(root,dir).encode('utf-8'))
for file in files:
fileDirList.append(os.path.join(root,file).encode('utf-8'))
# write file with list of files
outFile1 = open('filelist.csv','wb')
for line in fileDirList:
writeLine = line.decode('utf-8')
outFile1.write((writeLine+'\n').encode('utf-8'))
outFile1.close()
# write file with list of directories
outFile2 = open('dirlist.csv','wb')
for line in dirList:
writeLine = line.decode('utf-8')
outFile2.write((writeLine+'\n').encode('utf-8'))
outFile2.close()
# rename files
# has to be done before directories are renamed,
# otherwise they won't be found anymore
fileCounter = 0
fileRenameCounter = 0
exceptedFiles = []
renamedFiles = []
for file in fileDirList:
oldPath, origFileName = os.path.split(file)
fileCounter+= 1
# ignore system files according to list
if origFileName.decode('utf-8') in ignoreFiles:
print('ignoring: '+os.path.join(oldPath.decode('utf-8'),origFileName.decode('utf-8')))
continue
# ignore files with extension according to list
# can only be done when there is an extension
origFileNamePart, origFileExtension = os.path.splitext(origFileName.decode('utf-8'))
if len(origFileExtension) == 0:
print('File does not have extension:', origFileName.decode('utf-8'))
else:
if origFileName.decode('utf-8').rsplit('.', 1)[1].lower() in ignoreFileExtensions:
print('ignoring: '+os.path.join(oldPath.decode('utf-8'),origFileName.decode('utf-8')))
continue
# get normalised (renamed) file name
newFile = normaliseName(origFileName.decode('utf-8'))
newFilePath = os.path.join(oldPath.decode('utf-8'), newFile)
# append old file with path and new file with path to list
# new file path might be same as before, also append new file name
renamedFiles.append([file.decode('utf-8'), newFilePath, newFile])
if newFile != origFileName.decode('utf-8'):
fileRenameCounter+=1
print('Normalised "', origFileName.decode('utf-8'), '" to ', newFile)
# rename file if rename was set to true
# collect files that cannot be renamed due to file name duplication
if rename == 'true':
try:
os.rename(file, newFilePath)
except FileExistsError:
print('The file could not be renamed, because a file with the same name already exists!')
exceptedFiles.append(file)
# rename directories, starting from within, i.e. reverse order of dirList
dirCounter = 0
dirRenameCounter = 0
renamedDirs = []
for dir in dirList[::-1]:
dirCounter+= 1
oldPath, oldDir = os.path.split(dir)
newDir = normaliseName(oldDir.decode('utf-8'))
newDirPath = os.path.join(oldPath.decode('utf-8'), newDir)
# append old directories with path and new directories with path to list
# new path might be same as before, also append new directory name
renamedDirs.append([dir.decode('utf-8'), newDirPath, newDir])
if newDir != oldDir.decode('utf-8'):
dirRenameCounter+=1
print('Normalised "', oldDir.decode('utf-8'), '" to ', newDir)
if rename == 'true':
os.rename(dir, newDirPath)
actualFileRenameCounter = fileRenameCounter - len(exceptedFiles)
print('Renamed ', actualFileRenameCounter, ' files of a total of ', fileCounter, 'files.')
print('Renamed ', dirRenameCounter, ' directories of a total of ', dirCounter, 'directories.')
if len(exceptedFiles)<1:
print('No errors in renaming.')
else:
print('Some files could not be renamed. Manual action is required.')
print(exceptedFiles.join('\n'))
print('Creating file and directory list with new names')
# write file with list of files and new names
outFile3 = open('renamedFilelist.csv','wb')
outFile3.write(('Old file;New file;New file name'+'\n').encode('utf-8'))
for entry in renamedFiles:
writeLine = ';'.join(entry) #entry.decode('utf-8')
outFile3.write((writeLine+'\n').encode('utf-8'))
outFile3.close()
# write file with list of directories and new names
# writing reverse order of renamedDirs
outFile4 = open('renamedDirlist.csv','wb')
outFile4.write(('Old path;New path;New folder name'+'\n').encode('utf-8'))
for entry in renamedDirs[::-1]:
writeLine = ';'.join(entry)
outFile4.write((writeLine+'\n').encode('utf-8'))
outFile4.close()
print('Done')
|
StarcoderdataPython
|
5117914
|
<reponame>ealogar/servicedirectory<gh_stars>0
'''
(c) Copyright 2013 Telefonica, I+D. Printed in Spain (Europe). All Rights
Reserved.
The copyright to the software program(s) is property of Telefonica I+D.
The program(s) may be used and or copied only with the express written
consent of Telefonica I+D or in accordance with the terms and conditions
stipulated in the agreement/contract under which the program(s) have
been supplied.
'''
from os.path import abspath, dirname, join, normpath
# Django settings for ebooks project.
DJANGO_ROOT = dirname(dirname(abspath(__file__)))
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('<NAME>', '<EMAIL>'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # default database, just for testing purposes if needed
'NAME': normpath(join(DJANGO_ROOT, 'sd.db')), # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
normpath(join(DJANGO_ROOT, 'static')),
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '<KEY>'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
# This should point to urls entry point
ROOT_URLCONF = 'urls'
# In mac this line should be overriden if not using wsgi
# WSGI_APPLICATION = 'books.wsgi.application'
TEMPLATE_DIRS = (
normpath(join(join(DJANGO_ROOT, 'web'), 'templates')),
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'web'
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': 'service_directory.log',
'formatter': 'verbose',
}
},
'loggers': {
'': {
'handlers': ['file'],
'propagate': True,
'level': 'INFO',
}
}
}
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',)
# Do not leave django append slash if a url don't provide slash at the end
# Redirect when put methods become a GET
APPEND_SLASH = False
# Url in case of success login
LOGIN_REDIRECT_URL = '/sd/web/home'
# Url in case of success logout
LOGOUT_REDIRECT_URL = '/sd/web/login'
# Url
LOGGING_URL = '/sd/web/login'
# Use cookies-based sessions. The session data will be stored using Django's tools for cryptographic signing"
SESSION_ENGINE = 'django.contrib.sessions.backends.signed_cookies'
SESSION_COOKIE_HTTPONLY = True
# Avoid reading cookies from javascript
SESSION_COOKIE_PATH = '/;HttpOnly'
# Automatic loggin configuration
# Define which layer we want to automatically log methods
LOG_LAYERS = ('services',)
# Define here the default log method wich will be used (debug, info, warning, error)
DEFAULT_LOG_METHOD = 'debug'
# Defining LOG_METHOD_SERVICES we override default log method defined uper
# You can also define aditional layers methods using the name LOG_METHOD_<LAYER> like LOG_METHOD_DAOS
LOG_METHOD_SERVICES = 'info'
|
StarcoderdataPython
|
6541300
|
import unittest
import sys
sys.path.append('../')
import service
import installer
import yaml
class TestStringMethods(unittest.TestCase):
def test_russian(self):
p1 = Test1()
txt = service.handler(p1, None)
self.assertEqual(len(txt), 7)
def test_other_russian(self):
p1 = Test2()
txt = service.handler(p1, None)
self.assertEqual(len(txt), 9)
def test_duplicated_russian(self):
p1 = Test3()
txt = service.handler(p1, None)
self.assertEqual(len(txt), 9)
def test_get_queue_name(self):
y = { "queues" : { "words_name" : "words_queue" } }
txt = installer.getQueueName(y)
self.assertEquals(txt, 'words_queue')
def test_queue_from_file(self):
stream = open('config.yaml', 'r')
y = yaml.load(stream)
stream.close()
txt = installer.getLemmasQueueName(y)
self.assertEquals(txt, 'lemmas_queue')
def test_get_function_name(self):
y = { "function_name" : "fn" }
txt = installer.getFunctionName(y)
self.assertEquals(txt, 'fn')
def test_differ_by_aspect_russian(self):
p1 = Test4()
txt = service.handler(p1, None)
print(txt)
self.assertEqual(len(txt), 2)
class Test1:
def get(self, dropped):
return "Ну что сказать, я вижу кто-то наступил на грабли, Ты разочаровал меня, ты был натравлен."
class Test2:
def get(self, dropped):
return "По асфальту мимо цемента, Избегая зевак под аплодисменты. Обитатели спальных аррондисманов"
class Test3:
def get(self, dropped):
return "По асфальту мимо цемента цементу, Избегая зевак под аплодисменты. Обитатели спальных аррондисманов"
class Test4:
def get(self, dropped):
return "Я пил и она выпила."
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
6615546
|
"""https://github.com/RonTang/SimpleTimsort/blob/master/SimpleTimsort.py
"""
import time
import random
"""
二分搜索用于插入排序寻找插入位置
"""
def binary_search(the_array, item, start, end):
if start == end:
if the_array[start] > item:
return start
else:
return start + 1
if start > end:
return start
mid = round((start + end)/ 2)
if the_array[mid] < item:
return binary_search(the_array, item, mid + 1, end)
elif the_array[mid] > item:
return binary_search(the_array, item, start, mid - 1)
else:
return mid
"""
插入排序用于生成mini run
"""
def insertion_sort(the_array):
l = len(the_array)
for index in range(1, l):
value = the_array[index]
pos = binary_search(the_array, value, 0, index - 1)
the_array[pos+1:index+1] = the_array[pos:index]
the_array[pos] = value
return the_array
"""
归并,将两个有序的list合并成新的有序list
"""
def merge(left, right):
if not left:
return right
if not right:
return left
l_len = len(left)
r_len = len(right)
result = [None]*(l_len+r_len)
i, j, k= 0,0,0
while i < l_len and j< r_len:
if left[i] <= right[j]:
result[k] = left[i]
i+=1
else:
result[k] = right[j]
j+=1
k+=1
while i<l_len:
result[k]=left[i];
k+=1
i+=1
while j<r_len:
result[k]=right[j]
k+=1
j+=1
return result
def timsort(the_array):
runs = []
length = len(the_array)
new_run = [the_array[0]]
new_run_reverse = False
# 将the_array拆分成多了(递增或严格递减)list并将严格递减的list反转后存入runs。
for i in range(1, length):
if len(new_run) == 1:
if the_array[i] < the_array[i-1]:
new_run_reverse = True
else:
new_run_reverse = False
new_run.append(the_array[i])
elif new_run_reverse:
if the_array[i] < the_array[i-1]:
new_run.append(the_array[i])
else:
new_run.reverse()
runs.append(new_run)
#print(new_run)
new_run=[]
new_run.append(the_array[i])
else:
if the_array[i] >= the_array[i-1]:
new_run.append(the_array[i])
else:
runs.append(new_run)
#print(new_run)
new_run=[]
new_run.append(the_array[i])
if i == length - 1:
runs.append(new_run)
#print(new_run)
mini_run = 32
sorted_runs=[]
cur_run=[]
# 对runs中的每一项list长度不足mini_run用插入排序进行扩充,存入sorted_runs
for item in runs:
if len(cur_run) > mini_run:
sorted_runs.append(insertion_sort(cur_run))
cur_run = item
else:
cur_run.extend(item)
sorted_runs.append(insertion_sort(cur_run))
# 依次将run压入栈中,若栈顶run X,Y,Z。
# 违反了X>Y+Z 或 Y>Z 则Y与较小长度的run合并,并再次放入栈中。
# 依据这个法则,能够尽量使得大小相同的run合并,以提高性能。
# Timsort是稳定排序故只有相邻的run才能归并。
run_stack = []
sorted_array = []
for run in sorted_runs:
run_stack.append(run)
stop = False
while len(run_stack) >= 3 and not stop:
X = run_stack[len(run_stack)-1]
Y = run_stack[len(run_stack)-2]
Z = run_stack[len(run_stack)-3]
if (not len(X)>len(Y)+len(Z)) or (not len(Y)>len(Z)):
run_stack.pop()
run_stack.pop()
run_stack.pop()
if len(X) < len(Z):
YX = merge(Y,X)
run_stack.append(Z)
run_stack.append(YX)
else:
ZY = merge(Z,Y)
run_stack.append(ZY)
run_stack.append(X)
else:
stop =True
#将栈中剩余的run归并
for run in run_stack:
sorted_array = merge(sorted_array, run)
return sorted_array
#print(sorted_array)
l = timsort([3,1,5,7,9,2,4,6,8])
print(timsort(l))
# for x in range(0,100):
# data.append(random.randint(0,10000))
# start = time.process_time()
# timsort(data)
# end = time.process_time()
# print(end-start)
|
StarcoderdataPython
|
326501
|
<reponame>srg91/salt
# -*- coding: utf-8 -*-
'''
Common code shared between the nacl module and runner.
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import base64
import logging
import os
# Import Salt libs
from salt.ext import six
import salt.syspaths
import salt.utils.files
import salt.utils.platform
import salt.utils.stringutils
import salt.utils.versions
import salt.utils.win_functions
import salt.utils.win_dacl
log = logging.getLogger(__name__)
REQ_ERROR = None
try:
import libnacl.secret
import libnacl.sealed
except (ImportError, OSError) as e:
REQ_ERROR = 'libnacl import error, perhaps missing python libnacl package or should update.'
__virtualname__ = 'nacl'
def __virtual__():
return check_requirements()
def check_requirements():
'''
Check required libraries are available
'''
return (REQ_ERROR is None, REQ_ERROR)
def _get_config(**kwargs):
'''
Return configuration
'''
config = {
'box_type': 'sealedbox',
'sk': None,
'sk_file': os.path.join(kwargs['opts'].get('pki_dir'), 'master/nacl'),
'pk': None,
'pk_file': os.path.join(kwargs['opts'].get('pki_dir'), 'master/nacl.pub'),
}
config_key = '{0}.config'.format(__virtualname__)
try:
config.update(__salt__['config.get'](config_key, {}))
except (NameError, KeyError) as e:
# likely using salt-run so fallback to __opts__
config.update(kwargs['opts'].get(config_key, {}))
# pylint: disable=C0201
for k in set(config.keys()) & set(kwargs.keys()):
config[k] = kwargs[k]
return config
def _get_sk(**kwargs):
'''
Return sk
'''
config = _get_config(**kwargs)
key = None
if config['sk']:
key = salt.utils.stringutils.to_str(config['sk'])
sk_file = config['sk_file']
if not key and sk_file:
try:
with salt.utils.files.fopen(sk_file, 'rb') as keyf:
key = salt.utils.stringutils.to_unicode(keyf.read()).rstrip('\n')
except (IOError, OSError):
raise Exception('no key or sk_file found')
return base64.b64decode(key)
def _get_pk(**kwargs):
'''
Return pk
'''
config = _get_config(**kwargs)
pubkey = None
if config['pk']:
pubkey = salt.utils.stringutils.to_str(config['pk'])
pk_file = config['pk_file']
if not pubkey and pk_file:
try:
with salt.utils.files.fopen(pk_file, 'rb') as keyf:
pubkey = salt.utils.stringutils.to_unicode(keyf.read()).rstrip('\n')
except (IOError, OSError):
raise Exception('no pubkey or pk_file found')
pubkey = six.text_type(pubkey)
return base64.b64decode(pubkey)
def keygen(sk_file=None, pk_file=None, **kwargs):
'''
Use libnacl to generate a keypair.
If no `sk_file` is defined return a keypair.
If only the `sk_file` is defined `pk_file` will use the same name with a postfix `.pub`.
When the `sk_file` is already existing, but `pk_file` is not. The `pk_file` will be generated
using the `sk_file`.
CLI Examples:
.. code-block:: bash
salt-call nacl.keygen
salt-call nacl.keygen sk_file=/etc/salt/pki/master/nacl
salt-call nacl.keygen sk_file=/etc/salt/pki/master/nacl pk_file=/etc/salt/pki/master/nacl.pub
salt-call --local nacl.keygen
'''
if 'keyfile' in kwargs:
salt.utils.versions.warn_until(
'Neon',
'The \'keyfile\' argument has been deprecated and will be removed in Salt '
'{version}. Please use \'sk_file\' argument instead.'
)
sk_file = kwargs['keyfile']
if sk_file is None:
kp = libnacl.public.SecretKey()
return {'sk': base64.b64encode(kp.sk), 'pk': base64.b64encode(kp.pk)}
if pk_file is None:
pk_file = '{0}.pub'.format(sk_file)
if sk_file and pk_file is None:
if not os.path.isfile(sk_file):
kp = libnacl.public.SecretKey()
with salt.utils.files.fopen(sk_file, 'wb') as keyf:
keyf.write(base64.b64encode(kp.sk))
if salt.utils.platform.is_windows():
cur_user = salt.utils.win_functions.get_current_user()
salt.utils.win_dacl.set_owner(sk_file, cur_user)
salt.utils.win_dacl.set_permissions(sk_file, cur_user, 'full_control', 'grant', reset_perms=True, protected=True)
else:
# chmod 0600 file
os.chmod(sk_file, 1536)
return 'saved sk_file: {0}'.format(sk_file)
else:
raise Exception('sk_file:{0} already exist.'.format(sk_file))
if sk_file is None and pk_file:
raise Exception('sk_file: Must be set inorder to generate a public key.')
if os.path.isfile(sk_file) and os.path.isfile(pk_file):
raise Exception('sk_file:{0} and pk_file:{1} already exist.'.format(sk_file, pk_file))
if os.path.isfile(sk_file) and not os.path.isfile(pk_file):
# generate pk using the sk
with salt.utils.files.fopen(sk_file, 'rb') as keyf:
sk = salt.utils.stringutils.to_unicode(keyf.read()).rstrip('\n')
sk = base64.b64decode(sk)
kp = libnacl.public.SecretKey(sk)
with salt.utils.files.fopen(pk_file, 'wb') as keyf:
keyf.write(base64.b64encode(kp.pk))
return 'saved pk_file: {0}'.format(pk_file)
kp = libnacl.public.SecretKey()
with salt.utils.files.fopen(sk_file, 'wb') as keyf:
keyf.write(base64.b64encode(kp.sk))
if salt.utils.platform.is_windows():
cur_user = salt.utils.win_functions.get_current_user()
salt.utils.win_dacl.set_owner(sk_file, cur_user)
salt.utils.win_dacl.set_permissions(sk_file, cur_user, 'full_control', 'grant', reset_perms=True, protected=True)
else:
# chmod 0600 file
os.chmod(sk_file, 1536)
with salt.utils.files.fopen(pk_file, 'wb') as keyf:
keyf.write(base64.b64encode(kp.pk))
return 'saved sk_file:{0} pk_file: {1}'.format(sk_file, pk_file)
def enc(data, **kwargs):
'''
Alias to `{box_type}_encrypt`
box_type: secretbox, sealedbox(default)
'''
if 'keyfile' in kwargs:
salt.utils.versions.warn_until(
'Neon',
'The \'keyfile\' argument has been deprecated and will be removed in Salt '
'{version}. Please use \'sk_file\' argument instead.'
)
kwargs['sk_file'] = kwargs['keyfile']
if 'key' in kwargs:
salt.utils.versions.warn_until(
'Neon',
'The \'key\' argument has been deprecated and will be removed in Salt '
'{version}. Please use \'sk\' argument instead.'
)
kwargs['sk'] = kwargs['key']
box_type = _get_config(**kwargs)['box_type']
if box_type == 'secretbox':
return secretbox_encrypt(data, **kwargs)
return sealedbox_encrypt(data, **kwargs)
def enc_file(name, out=None, **kwargs):
'''
This is a helper function to encrypt a file and return its contents.
You can provide an optional output file using `out`
`name` can be a local file or when not using `salt-run` can be a url like `salt://`, `https://` etc.
CLI Examples:
.. code-block:: bash
salt-run nacl.enc_file name=/tmp/id_rsa
salt-call nacl.enc_file name=salt://crt/mycert out=/tmp/cert
salt-run nacl.enc_file name=/tmp/id_rsa box_type=secretbox \
sk_file=/etc/salt/pki/master/nacl.pub
'''
try:
data = __salt__['cp.get_file_str'](name)
except Exception as e: # pylint: disable=broad-except
# likly using salt-run so fallback to local filesystem
with salt.utils.files.fopen(name, 'rb') as f:
data = salt.utils.stringutils.to_unicode(f.read())
d = enc(data, **kwargs)
if out:
if os.path.isfile(out):
raise Exception('file:{0} already exist.'.format(out))
with salt.utils.files.fopen(out, 'wb') as f:
f.write(salt.utils.stringutils.to_bytes(d))
return 'Wrote: {0}'.format(out)
return d
def dec(data, **kwargs):
'''
Alias to `{box_type}_decrypt`
box_type: secretbox, sealedbox(default)
'''
if 'keyfile' in kwargs:
salt.utils.versions.warn_until(
'Neon',
'The \'keyfile\' argument has been deprecated and will be removed in Salt '
'{version}. Please use \'sk_file\' argument instead.'
)
kwargs['sk_file'] = kwargs['keyfile']
# set boxtype to `secretbox` to maintain backward compatibility
kwargs['box_type'] = 'secretbox'
if 'key' in kwargs:
salt.utils.versions.warn_until(
'Neon',
'The \'key\' argument has been deprecated and will be removed in Salt '
'{version}. Please use \'sk\' argument instead.'
)
kwargs['sk'] = kwargs['key']
# set boxtype to `secretbox` to maintain backward compatibility
kwargs['box_type'] = 'secretbox'
box_type = _get_config(**kwargs)['box_type']
if box_type == 'secretbox':
return secretbox_decrypt(data, **kwargs)
return sealedbox_decrypt(data, **kwargs)
def dec_file(name, out=None, **kwargs):
'''
This is a helper function to decrypt a file and return its contents.
You can provide an optional output file using `out`
`name` can be a local file or when not using `salt-run` can be a url like `salt://`, `https://` etc.
CLI Examples:
.. code-block:: bash
salt-run nacl.dec_file name=/tmp/id_rsa.nacl
salt-call nacl.dec_file name=salt://crt/mycert.nacl out=/tmp/id_rsa
salt-run nacl.dec_file name=/tmp/id_rsa.nacl box_type=secretbox \
sk_file=/etc/salt/pki/master/nacl.pub
'''
try:
data = __salt__['cp.get_file_str'](name)
except Exception as e: # pylint: disable=broad-except
# likly using salt-run so fallback to local filesystem
with salt.utils.files.fopen(name, 'rb') as f:
data = salt.utils.stringutils.to_unicode(f.read())
d = dec(data, **kwargs)
if out:
if os.path.isfile(out):
raise Exception('file:{0} already exist.'.format(out))
with salt.utils.files.fopen(out, 'wb') as f:
f.write(salt.utils.stringutils.to_bytes(d))
return 'Wrote: {0}'.format(out)
return d
def sealedbox_encrypt(data, **kwargs):
'''
Encrypt data using a public key generated from `nacl.keygen`.
The encryptd data can be decrypted using `nacl.sealedbox_decrypt` only with the secret key.
CLI Examples:
.. code-block:: bash
salt-run nacl.sealedbox_encrypt datatoenc
salt-call --local nacl.sealedbox_encrypt datatoenc pk_file=/etc/salt/pki/master/nacl.pub
salt-call --local nacl.sealedbox_encrypt datatoenc pk='vrwQF7cNiNAVQVAiS3bvcbJUnF0cN6fU9YTZD9mBfzQ='
'''
# ensure data is in bytes
data = salt.utils.stringutils.to_bytes(data)
pk = _get_pk(**kwargs)
b = libnacl.sealed.SealedBox(pk)
return base64.b64encode(b.encrypt(data))
def sealedbox_decrypt(data, **kwargs):
'''
Decrypt data using a secret key that was encrypted using a public key with `nacl.sealedbox_encrypt`.
CLI Examples:
.. code-block:: bash
salt-call nacl.sealedbox_decrypt pEXHQM6cuaF7A=
salt-call --local nacl.sealedbox_decrypt data='pEXHQM6cuaF7A=' sk_file=/etc/salt/pki/master/nacl
salt-call --local nacl.sealedbox_decrypt data='pEXHQM6cuaF7A=' sk='YmFkcGFzcwo='
'''
if data is None:
return None
# ensure data is in bytes
data = salt.utils.stringutils.to_bytes(data)
sk = _get_sk(**kwargs)
keypair = libnacl.public.SecretKey(sk)
b = libnacl.sealed.SealedBox(keypair)
return b.decrypt(base64.b64decode(data))
def secretbox_encrypt(data, **kwargs):
'''
Encrypt data using a secret key generated from `nacl.keygen`.
The same secret key can be used to decrypt the data using `nacl.secretbox_decrypt`.
CLI Examples:
.. code-block:: bash
salt-run nacl.secretbox_encrypt datatoenc
salt-call --local nacl.secretbox_encrypt datatoenc sk_file=/etc/salt/pki/master/nacl
salt-call --local nacl.secretbox_encrypt datatoenc sk='YmFkcGFzcwo='
'''
# ensure data is in bytes
data = salt.utils.stringutils.to_bytes(data)
sk = _get_sk(**kwargs)
b = libnacl.secret.SecretBox(sk)
return base64.b64encode(b.encrypt(data))
def secretbox_decrypt(data, **kwargs):
'''
Decrypt data that was encrypted using `nacl.secretbox_encrypt` using the secret key
that was generated from `nacl.keygen`.
CLI Examples:
.. code-block:: bash
salt-call nacl.secretbox_decrypt pEXHQM6cuaF7A=
salt-call --local nacl.secretbox_decrypt data='pEXHQM6cuaF7A=' sk_file=/etc/salt/pki/master/nacl
salt-call --local nacl.secretbox_decrypt data='pEXHQM6cuaF7A=' sk='YmFkcGFzcwo='
'''
if data is None:
return None
# ensure data is in bytes
data = salt.utils.stringutils.to_bytes(data)
key = _get_sk(**kwargs)
b = libnacl.secret.SecretBox(key=key)
return b.decrypt(base64.b64decode(data))
|
StarcoderdataPython
|
336369
|
<reponame>rutgerhartog/apocrypha
from scipy.stats import chisquare as chi2
def calculate_chisquare(text: bytes) -> float:
return chi2(text).statistics
|
StarcoderdataPython
|
3342947
|
# Copyright 2017-present <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import discord
from redbot.core import commands
__red_end_user_data_statement__ = (
"This extension does not persistently store data or metadata about users."
)
async def before_invoke_hook(ctx: commands.Context):
guild = ctx.guild
if not guild:
return
if guild.me == guild.owner:
return
if await ctx.bot.is_owner(guild.owner):
return
author, me = ctx.author, guild.me
assert isinstance(author, discord.Member) # nosec
if me.guild_permissions.administrator:
if (
author.top_role > me.top_role or author == guild.owner
) and author.guild_permissions.manage_roles:
with contextlib.suppress(Exception):
await ctx.send(
"This bot refuses to work with admin permissions. "
"They are dangerous and lazy to give out."
)
raise commands.CheckFailure()
async def setup(bot):
bot.before_invoke(before_invoke_hook)
def teardown(bot):
bot.remove_before_invoke_hook(before_invoke_hook)
|
StarcoderdataPython
|
5116650
|
<reponame>titanous/fdb-document-layer
#!/usr/bin/python
#
# setup_mongo.py
#
# This source file is part of the FoundationDB open source project
#
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# MongoDB is a registered trademark of MongoDB, Inc.
#
from pymongo import MongoClient, errors
import pymongo
import argparse
import preload_database
def init_replica_set(shard_port, shard_addresses, index):
repl_set = {}
try:
shard = MongoClient(shard_addresses[0], shard_port)
members = []
for shard_id in range(len(shard_addresses)):
members.append({"_id": shard_id, "host": shard_addresses[shard_id] + ":" + str(shard_port)})
repl_set = {"_id": "rs" + str(index) + ".0", "members": members}
shard.admin.command('replSetInitiate', repl_set)
print 'Replica set initialized with: '
print repl_set
except errors.OperationFailure as e:
if 'already initialized' in str(e.message):
print 'Replica set already initialized, continuing.'
else:
raise e
return repl_set
def add_shard(mongos, replSet):
try:
mongos.admin.command('addShard', replSet['_id'] + "/" + replSet['members'][0]['host'])
print 'Shard added.'
except errors.OperationFailure as e:
if 'duplicate key' in str(e.message):
print 'Shard already added, continuing.'
elif 'exists in another' in str(e.message):
print 'Shard already added and enabled for DB, continuing.'
else:
raise e
def enable_sharding_on_d_b(mongos, db_name):
try:
mongos.admin.command('enableSharding', db_name)
print 'Sharding enabled on DB.'
except errors.OperationFailure as e:
if 'already enabled' in str(e.message):
print 'Sharding already enabled on DB, continuing.'
else:
raise e
def enable_sharding_on_collection(mongos, db_name, collection_name):
try:
collection = mongos[db_name][collection_name]
collection.ensure_index([("_id", pymongo.HASHED)])
mongos.admin.command('shardCollection', db_name + "." + collection_name, key={"_id": "hashed"})
print 'Sharded collection.'
except errors.OperationFailure as e:
if 'already sharded' in str(e.message):
print 'Collection already sharded, continuing.'
else:
raise e
def group(lst, n):
for i in range(0, len(lst), n):
val = lst[i:i + n]
if len(val) == n:
yield tuple(val)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--shard-port', type=int, default=27018)
parser.add_argument('-m', '--mongos-port', type=int, default=27017)
parser.add_argument('-d', '--db_name', default='test')
parser.add_argument('-c', '--collection', default='test')
parser.add_argument('-a', '--addresses', nargs='+', help="shard addresses (also used for mongos)")
parser.add_argument('-l', '--load-data', default=False, help="Load seed data")
parser.add_argument('--subnet', help="used to calculate shard addresses")
parser.add_argument('--address-count', type=int, help="used to calculate shard addresses")
ns = vars(parser.parse_args())
shard_port = 27018
mongos_port = 27017
db_name = 'test'
collection_name = 'abc'
shard_addresses = ns['addresses']
if ns['addresses'] is None and ns['subnet'] is not None and ns['address_count']:
shard_addresses = []
for index in range(1, ns['address_count'] + 1):
shard_addresses.append(ns['subnet'] + str(index))
grouped_shard_addresses = list(group(shard_addresses, 3))
if len(grouped_shard_addresses) > 0:
mongos = MongoClient(shard_addresses[0], mongos_port)
for index in range(len(grouped_shard_addresses)):
replSet = init_replica_set(shard_port, grouped_shard_addresses[index], index)
print("Giving the replica set a few seconds to initialize...")
import time
time.sleep(10)
add_shard(mongos, replSet)
enable_sharding_on_d_b(mongos, db_name)
enable_sharding_on_collection(mongos, db_name, collection_name)
if (ns['load_data']):
preload_database.preload_database({
"host": shard_addresses[0],
"port": mongos_port,
"collection": collection_name,
"number": 1,
"no_numeric_fieldnames": True,
"no_nulls": True,
"big_documents": True
})
else:
print "Incorrected or missing shard addresses - exiting.."
|
StarcoderdataPython
|
9651377
|
<filename>hostlock/apps.py
from django.apps import AppConfig
class HostlockConfig(AppConfig):
name = 'hostlock'
|
StarcoderdataPython
|
4938603
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-04-12 09:47
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cms_test2', '0006_auto_20180412_0942'),
]
operations = [
migrations.CreateModel(
name='MovieInfo',
fields=[
('movie_info_id', models.AutoField(primary_key=True, serialize=False)),
],
),
migrations.CreateModel(
name='MovieLangPack',
fields=[
('movie_langpack_id', models.AutoField(primary_key=True, serialize=False)),
('is_default', models.PositiveIntegerField(blank=True, choices=[(0, 'not default language'), (1, 'is default language')], null=True)),
('title', models.CharField(blank=True, max_length=100, null=True)),
('description', models.CharField(blank=True, max_length=1000, null=True)),
('actors', models.ManyToManyField(related_name='_movielangpack_actors_+', to='cms_test2.Actor')),
('directors', models.ManyToManyField(related_name='_movielangpack_directors_+', to='cms_test2.Director')),
('language', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='cms_test2.Language')),
('movie_info', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='cms_test2.MovieInfo')),
('thumbnails', models.ManyToManyField(related_name='_movielangpack_thumbnails_+', to='cms_test2.Thumbnail')),
],
),
migrations.AddField(
model_name='video',
name='episode_info',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='cms_test2.EpisodeInfo'),
),
]
|
StarcoderdataPython
|
4971971
|
# Authors: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import numpy as np
from ..utils import logger, verbose
from ..parallel import parallel_func
from ..io.pick import channel_type, pick_types
def _time_gen_one_fold(clf, X, y, train, test, scoring):
"""Aux function of time_generalization"""
from sklearn.metrics import SCORERS
n_times = X.shape[2]
scores = np.zeros((n_times, n_times))
scorer = SCORERS[scoring]
for t_train in range(n_times):
X_train = X[train, :, t_train]
clf.fit(X_train, y[train])
for t_test in range(n_times):
X_test = X[test, :, t_test]
scores[t_test, t_train] += scorer(clf, X_test, y[test])
return scores
@verbose
def time_generalization(epochs_list, clf=None, cv=5, scoring="roc_auc",
shuffle=True, random_state=None, n_jobs=1,
verbose=None):
"""Fit decoder at each time instant and test at all others
The function returns the cross-validation scores when the train set
is from one time instant and the test from all others.
The decoding will be done using all available data channels, but
will only work if 1 type of channel is availalble. For example
epochs should contain only gradiometers.
Parameters
----------
epochs_list : list of Epochs
The epochs in all the conditions.
clf : object | None
A object following scikit-learn estimator API (fit & predict).
If None the classifier will be a linear SVM (C=1.) after
feature standardization.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 5).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
scoring : {string, callable, None}, optional, default: "roc_auc"
A string (see model evaluation documentation in scikit-learn) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
shuffle : bool
If True, shuffle the epochs before splitting them in folds.
random_state : None | int
The random state used to shuffle the epochs. Ignored if
shuffle is False.
n_jobs : int
Number of jobs to eggie in parallel. Each fold is fit
in parallel.
Returns
-------
scores : array, shape (n_times, n_times)
The scores averaged across folds. scores[i, j] contains
the generalization score when learning at time j and testing
at time i. The diagonal is the cross-validation score
at each time-independant instant.
Notes
-----
The function implements the method used in:
<NAME>, <NAME>, <NAME>, <NAME>
and <NAME>, "Two distinct dynamic modes subtend the detection of
unexpected sounds", PLOS ONE, 2013
"""
from sklearn.base import clone
from sklearn.utils import check_random_state
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import check_cv
if clf is None:
scaler = StandardScaler()
svc = SVC(C=1, kernel='linear')
clf = Pipeline([('scaler', scaler), ('svc', svc)])
info = epochs_list[0].info
data_picks = pick_types(info, meg=True, eeg=True, exclude='bads')
# Make arrays X and y such that :
# X is 3d with X.shape[0] is the total number of epochs to classify
# y is filled with integers coding for the class to predict
# We must have X.shape[0] equal to y.shape[0]
X = [e.get_data()[:, data_picks, :] for e in epochs_list]
y = [k * np.ones(len(this_X)) for k, this_X in enumerate(X)]
X = np.concatenate(X)
y = np.concatenate(y)
cv = check_cv(cv, X, y, classifier=True)
ch_types = set([channel_type(info, idx) for idx in data_picks])
logger.info('Running time generalization on %s epochs using %s.' %
(len(X), ch_types.pop()))
if shuffle:
rng = check_random_state(random_state)
order = np.argsort(rng.randn(len(X)))
X = X[order]
y = y[order]
parallel, p_time_gen, _ = parallel_func(_time_gen_one_fold, n_jobs)
scores = parallel(p_time_gen(clone(clf), X, y, train, test, scoring)
for train, test in cv)
scores = np.mean(scores, axis=0)
return scores
|
StarcoderdataPython
|
4936293
|
<reponame>sulealothman/arbraille<filename>braille/__init__.py<gh_stars>1-10
from .BrailleToAlphabet import BrailleToAlphabet
from .AlphabetToBraille import AlphabetToBraille
from .BrailleFile import BrailleFile
#from .ConvertToArabic import BrailleToArabic
from .Main import Main
|
StarcoderdataPython
|
1792920
|
<filename>Python-Data-Structures-and-Algorithms-master/Chapter05/stack_queue_1.py
class Queue:
def __init__(self):
self.inbound_stack = []
self.outbound_stack = []
def dequeue(self):
if not self.outbound_stack:
while self.inbound_stack:
self.outbound_stack.append(self.inbound_stack.pop())
return self.outbound_stack.pop()
def enqueue(self, data):
self.inbound_stack.append(data)
queue = Queue()
queue.enqueue(5)
queue.enqueue(6)
queue.enqueue(7)
print(queue.inbound_stack)
queue.dequeue()
print(queue.inbound_stack)
print(queue.outbound_stack)
queue.dequeue()
print(queue.outbound_stack)
"""
import time
start_time = time.time()
for i in range(100000):
#print i
array_queue.enqueue(i)
for i in range(100000):
#print i
array_queue.dequeue()
print("--- %s seconds ---" % (time.time() - start_time))
import time
start_time = time.time()
for i in range(10000):
for j in range(100):
array_queue.push(i)
for k in range(10):
array_queue.pop()
print("--- %s seconds ---" % (time.time() - start_time))
"""
|
StarcoderdataPython
|
5019189
|
from .pkg import a
|
StarcoderdataPython
|
4892187
|
<reponame>jcferrara/fantasy-football-start-or-sit
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 1 01:25:36 2021
@author: JustinFerrara
"""
import pandas as pd
def get_gamelog(player_code, year):
player_url = 'https://www.pro-football-reference.com' + player_code[0:len(player_code)-4] + '/gamelog/' + str(year) + '/'
table = pd.read_html(player_url)
table = table[0]
col_names = []
for col in table.columns:
col_names.append(col[0] + "_" + col[1])
table.columns = col_names
table = table.rename(columns={"Unnamed: 0_level_0_Rk":"row_tracker",
"Unnamed: 1_level_0_Date":"date",
"Unnamed: 2_level_0_G#":"game_number",
"Unnamed: 3_level_0_Week":"week_number",
"Unnamed: 4_level_0_Age":"player_age",
"Unnamed: 5_level_0_Tm":"player_team",
"Unnamed: 6_level_0_Unnamed: 6_level_1":"game_setting",
"Unnamed: 7_level_0_Opp":"game_opponent",
"Unnamed: 8_level_0_Result":"game_result",
"Unnamed: 9_level_0_GS":"game_started"})
table.drop(table.tail(1).index, inplace = True)
table['game_started'] = table['game_started'].astype(str)
table['game_played'] = table['game_started'].apply(lambda x: "*" if x == "nan" else x)
table = table[table['game_played'] == "*"]
table = table.astype(str)
table['player_code'] = player_code
table['game_setting'] = table['game_setting'].apply(lambda x: "Away" if x == "@" else "Home")
table = table.replace({'%': ''}, regex=True)
return(table)
game_stats = pd.DataFrame()
num = 0
for i in players['player_code']:
for y in ['2020', '2019']:
try:
gamelog = get_gamelog(i, y)
game_stats = pd.concat([game_stats, gamelog])
except:
continue
num += 1
print(num)
game_stats = game_stats[['date', 'week_number', 'player_team', 'game_setting', 'game_opponent', 'game_result',
'Passing_Cmp', 'Passing_Att', 'Passing_Cmp%', 'Passing_Yds', 'Passing_TD', 'Passing_Int', 'Passing_Rate', 'Passing_Sk', 'Passing_Y/A',
'Rushing_Att', 'Rushing_Yds', 'Rushing_Y/A', 'Rushing_TD',
'Receiving_Tgt', 'Receiving_Rec', 'Receiving_Yds', 'Receiving_Y/R', 'Receiving_TD', 'Receiving_Ctch%', 'Receiving_Y/Tgt',
'Scoring_TD', 'Scoring_Pts',
'Fumbles_Fmb', 'Fumbles_FL', 'Fumbles_FR',
'Off. Snaps_Num', 'Off. Snaps_Pct', 'ST Snaps_Num', 'ST Snaps_Pct',
'player_code']]
game_stats.columns = ['date', 'week_number', 'player_team', 'game_setting', 'game_opponent', 'game_result',
'passing_completions', 'passing_attempts', 'passing_completion_pct', 'passing_yards', 'passing_td', 'passing_int', 'passing_qbr', 'passing_sacks', 'passing_yards_per_att',
'rushing_attempts', 'rushing_yards', 'rushing_yards_per_att', 'rushing_td',
'receiving_targets', 'receiving_receptions', 'receiving_yards', 'receiving_yards_per_reception', 'receiving_td', 'receiving_catch_pct', 'receiving_yards_per_target',
'scoring_total_td', 'scoring_total_points',
'fumbles_num', 'fumbles_num_lost', 'fumbles_num_recovered',
'num_off_snaps', 'pct_off_snaps', 'num_st_snaps', 'pct_st_snaps',
'player_code']
game_stats = game_stats.replace("nan", "0")
game_stats = game_stats.fillna("0")
game_stats[[ 'passing_completions',
'passing_attempts',
'passing_completion_pct',
'passing_yards',
'passing_td',
'passing_int',
'passing_qbr',
'passing_sacks',
'passing_yards_per_att',
'rushing_attempts',
'rushing_yards',
'rushing_yards_per_att',
'rushing_td',
'receiving_targets',
'receiving_receptions',
'receiving_yards',
'receiving_yards_per_reception',
'receiving_td',
'receiving_catch_pct',
'receiving_yards_per_target',
'scoring_total_td',
'scoring_total_points',
'fumbles_num',
'fumbles_num_lost',
'fumbles_num_recovered',
'num_off_snaps',
'pct_off_snaps',
'num_st_snaps',
'pct_st_snaps']] = game_stats[[ 'passing_completions',
'passing_attempts',
'passing_completion_pct',
'passing_yards',
'passing_td',
'passing_int',
'passing_qbr',
'passing_sacks',
'passing_yards_per_att',
'rushing_attempts',
'rushing_yards',
'rushing_yards_per_att',
'rushing_td',
'receiving_targets',
'receiving_receptions',
'receiving_yards',
'receiving_yards_per_reception',
'receiving_td',
'receiving_catch_pct',
'receiving_yards_per_target',
'scoring_total_td',
'scoring_total_points',
'fumbles_num',
'fumbles_num_lost',
'fumbles_num_recovered',
'num_off_snaps',
'pct_off_snaps',
'num_st_snaps',
'pct_st_snaps']].apply(pd.to_numeric)
game_stats.to_csv('game_stats_db.csv', index = False)
|
StarcoderdataPython
|
6662149
|
<reponame>Saebasol/Heliotrope
"""
MIT License
Copyright (c) 2021 SaidBySolo
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from dataclasses import dataclass
from typing import Literal, Optional
from heliotrope.types import HitomiTagJSON
@dataclass
class Tag:
index_id: int
male: Optional[Literal["", "1"]]
female: Optional[Literal["", "1"]]
tag: str
url: str
id: Optional[int] = None
def to_dict(self) -> HitomiTagJSON:
hitomi_tag_json = HitomiTagJSON(url=self.url, tag=self.tag)
if self.male is not None:
hitomi_tag_json["male"] = self.male
if self.female is not None:
hitomi_tag_json["female"] = self.female
return hitomi_tag_json
@classmethod
def from_dict(cls, index_id: int, d: HitomiTagJSON) -> "Tag":
return cls(
index_id=index_id,
male=d.get("male"),
female=d.get("female"),
tag=d["tag"],
url=d["url"],
)
|
StarcoderdataPython
|
5026364
|
<reponame>synxlin/chinese-chat-bot
#/usr/bin/env python3
#-*- coding: utf-8 -*-
import gc
import pyaudio
import wave
import numpy as np
from os import path
import subprocess
from queue import Queue, Empty
from threading import Thread, Lock
from time import sleep
from .recognizer import Recognizer
from .jarvis import Jarvis
class VoiceRecorder(object):
""" Realtime Recorder """
def __init__(self):
self.status = 'off'
self._pyaudio = pyaudio.PyAudio()
self._stream = None
self._speech_queue = Queue()
self._frame_queue = Queue()
self._save_root = 'audio/'
# voice format
self._format = pyaudio.paInt16
self._threshold = 500
self._rate = 16000
self._frame_size = 1024 # 1024 / 16000 = 0.064s
self._channels = 1
self._frame_length = float(self._frame_size) / float(self._rate)
# speech
self._min_sentence_length = 0.5 # sec
self._min_sentence_frame_num = int(self._min_sentence_length / self._frame_length)
self._min_pause_length = 0.5 # pause between sentences, sec
self._min_pause_frame_num = int(self._min_pause_length / self._frame_length)
# self._max_buffer_length = 2
# self._max_buffer_frame_num = self._max_buffer_length / self._frame_length
self._power_threshold = 0.0002
self._zcr_threshold = 0.05
self._auto_threshold_length = 2 # sec
self._auto_threshold_frame_num = int(self._auto_threshold_length / self._frame_length)
self._auto_threshold_dropout = 0.5
self._auto_threshold_power_mult = 3
self._auto_threshold_zcr_mult = 3
self._noise = []
self._noise_frame_num = 10
# stream lock
self.lock = Lock()
def save(self, frame, filename):
path = self._save_root + filename
with wave.open(path, 'wb') as fout:
fout.setparams((self._channels, 2, self._rate, 0, 'NONE', 'not compressed'))
fout.writeframes(frame)
return path
def on(self, frame_preprocess=True):
assert self.status == 'off'
# start audio stream
self._stream = self._pyaudio.open(format=self._format, \
channels=self._channels, rate=self._rate, input=True, \
output=False, frames_per_buffer=self._frame_size)
# start recording
self.status = 'on'
Thread(target=self._record).start()
if frame_preprocess:
Thread(target=self._frame_preprocess).start()
def off(self):
# assert self.status == 'on'
self.status = 'off'
#if self._stream is not None:
# self._stream.close()
# self._stream = None
# clear queue
try:
while True:
self._frame_queue.get_nowait()
except Empty:
pass
try:
while True:
self._speech_queue.get_nowait()
except Empty:
pass
def auto_set_threshold(self):
assert self.status == 'off'
print('auto setting threshold.')
self.on(frame_preprocess=False)
powers = []
zcrs = []
for i in range(self._auto_threshold_frame_num):
frame = self._frame_queue.get()
power, zcr = self._frame_power_zcr(frame)
powers.append(power)
zcrs.append(zcr)
self.off()
powers.sort()
zcrs.sort()
dropout = self._auto_threshold_dropout
dropout_st = int(len(powers)*dropout*0.5)
dropout_ed = int(len(powers)*(1 - dropout*0.5))
powers = powers[dropout_st:dropout_ed]
zcrs = zcrs[dropout_st:dropout_ed]
self._power_threshold = self._auto_threshold_power_mult * sum(powers) / len(powers)
self._zcr_threshold = self._auto_threshold_zcr_mult * sum(zcrs) / len(zcrs)
print('power threshold:', self._power_threshold)
print('zcr threshold:', self._zcr_threshold)
def get_speech_nowait(self):
return self._speech_queue.get_nowait()
def set_save_root(self, root):
self._save_root = root
def _record(self):
while self.status == 'on': # read only, thread safe
assert self._stream is not None
frame = self._stream.read(self._frame_size)
self._frame_queue.put(frame)
if self._stream is not None:
self._stream.close()
self._stream = None
def _frame_preprocess(self): # frame -> sentences
speech_frames = []
background_frames = []
while self.status == 'on':
try:
while True:
frame = self._frame_queue.get_nowait()
is_speech = self._is_speech(frame)
if is_speech:
if len(speech_frames) == 0 or len(background_frames) == 0:
speech_frames.append(frame)
background_frames.clear()
elif len(speech_frames) > 0 and len(background_frames) > 0:
speech_frames.extend(background_frames)
speech_frames.append(frame)
background_frames.clear()
else:
assert False # impossible
if not is_speech:
if len(self._noise) == self._noise_frame_num:
self._noise = self._noise[1:]
self._noise.append(frame) # modeling background noise
if len(speech_frames) == 0:
pass # Do nothing
elif len(speech_frames) > 0:
background_frames.append(frame)
if len(background_frames) > self._min_pause_frame_num:
if len(speech_frames) > self._min_sentence_frame_num:
sentence = self._concat_frames(speech_frames)
# denoise
if self._noise:
sentence = self._denoise(sentence)
self._speech_queue.put(sentence)
self.status = 'off'
background_frames.clear()
speech_frames.clear()
except Empty:
sleep(self._frame_length)
def _frame_power_zcr(self, frame):
numdata = self._frame_to_nparray(frame)
power = self._power(numdata)
zcr = self._zcr(numdata)
return power, zcr
def _frame_to_nparray(self, frame):
assert self._format == pyaudio.paInt16
numdata = np.fromstring(frame, dtype=np.int16)
numdata = numdata / 2**15 # max val of int16 = 2**15-1
return numdata
def _nparray_to_frame(self, numdata):
numdata = numdata * 2**15
numdata = numdata.astype(np.int16)
frame = numdata.tobytes()
return frame
def _power(self, numdata):
return np.mean(numdata**2)
def _zcr(self, numdata):
zc = numdata[1:] * numdata[:-1] < 0
zcr = sum(zc) / len(zc)
return zcr
def _is_speech(self, frame):
power, zcr = self._frame_power_zcr(frame)
voiced_sound = power > self._power_threshold
unvoiced_sound = zcr > self._zcr_threshold
return voiced_sound or unvoiced_sound
def _concat_frames(self, frames):
return b''.join(frames)
def _denoise(self, speech):
# Spectral Subtraction
speech_val = self._frame_to_nparray(speech)
noise_val = self._frame_to_nparray(b''.join(self._noise))
speech_fft_mag = np.abs(np.fft.fft(speech_val))
noise_fft_mag = np.abs(np.fft.fft(noise_val))
speech_freq = np.linspace(0, self._rate, len(speech_val))
noise_freq = np.linspace(0, self._rate, len(noise_val))
noise_fft_interp = np.interp(speech_freq, noise_freq, noise_fft_mag)
denoised_fft_mag = np.maximum(speech_fft_mag - noise_fft_interp, np.zeros(speech_fft_mag.shape))
denoised_fft = np.fft.fft(speech_val) * denoised_fft_mag / speech_fft_mag
denoised_val = np.real(np.fft.ifft(denoised_fft))
denoised = self._nparray_to_frame(denoised_val)
return denoised
class Controller(object):
def __init__(self):
self.recognizer = Recognizer()
self.recorder = VoiceRecorder()
self.jarvis = Jarvis()
self.timer = 0
self._status = None # None, 'online'
self._texts = []
self._responses = []
self._cnt = 0
self._interval = 0.1
def get_texts(self):
return self._texts[:]
def get_response(self):
return self._responses[:]
def clear_texts(self):
assert self._status == None # or use a mutex
self._texts = []
self._responses = []
def online(self):
assert self._status == None
self._cnt = 0
self._status = 'online'
self.recorder.on()
self.recognizer.on()
Thread(target=self._online_loop).start()
def stop(self):
status = self._status
self._status = None
if status == 'online':
self.recorder.off()
self.recognizer.off()
self.jarvis.off()
def _online_loop(self):
while self._status == 'online':
result = None
speech = None
try:
result = self.recognizer.get_result_nowait()
except Empty:
pass
try:
speech = self.recorder.get_speech_nowait()
except Empty:
pass
if result:
self._texts.append(result)
self.jarvis.off()
self.jarvis.put_question(result)
response = self.jarvis.get_response()
self._responses.append(response + '\n')
gc.collect()
subprocess.run(['ekho','\" %s\"' % response])
sleep(self._interval)
# self.stop()
if speech and self._cnt == 0:
filename = 'data.wav'
filepath = self.recorder.save(speech, filename)
print('saving to ', filename)
self.recognizer.put_speech(filepath)
self._cnt += 1
if not result and not speech:
sleep(self._interval)
|
StarcoderdataPython
|
6619919
|
import numpy as np
from sklearn.svm import LinearSVC
from sklearn.datasets import load_svmlight_file
import random
#data
data = load_svmlight_file("leu")
# subSampling
l =len(data[1])
start = int(round(l*0.70,0))
#N = random.sample(range(start,l), 1)
N = int(round(l*0.80,0))
print("Number of sub sample %d" %N)
i = np.random.choice(np.arange(data[0].shape[0]), N, replace=False)
sub_data = data[0][i.tolist()]
sub_sample = data[1][i.tolist()] # check for this step
X_1 = sub_data.todense().tolist()
y_1 = map(int,sub_sample)
#L1 SVM
l1svc = LinearSVC(penalty='l1', dual=False).fit(X_1, y_1)
#print(len(l1svc.coef_[0]))
coef = l1svc.coef_.tolist()[0]
#print(coef[0])
#print(l1svc.coef_.tolist()[0])
#print[i for i, j in enumerate(coef) if j 0]
#print(len(l1svc.coef_.tolist()[0]))
print("Number of features have non-zero weight vector coefficients %d " %sum(1 for i in coef if i != 0))
#For each feature compute a score that is the number of sub-samples for which that feature yielded a non-zero weight vector coefficient
'''
sampleListCoef = []
print(len(l1svc.coef_[0].tolist()))
for k in range(0,len(l1svc.coef_[0].tolist())):
for j in range(start,l):
i = np.random.choice(np.arange(data[0].shape[0]), j, replace=False)
sub_data = data[0][i.tolist()]
sub_sample = data[1][i.tolist()] # check for this step
X_1 = sub_data.todense().tolist() # samples 72 features above 7129
y_1 = map(int,sub_sample) # classes 2
#L1 SVM
l1svc = LinearSVC(penalty='l1', dual=False).fit(X_1, y_1)
coef = map(int,np.asarray(l1svc.coef_[0]))
if(coef[k] > 0):
sampleListCoef.append[j]
else:
sampleListCoef + [0]
print("Number of sub-samples for which that feature yielded a non-zero weight vector coefficient :")
print(sampleListCoef)
'''
|
StarcoderdataPython
|
5184463
|
"""
This example demonstrates the use of a newly implemented oak-d worker: hand_asl
To implement a new worker, please refer to the following steps:
1. Define a new oak-d node type in:
<cep_root>/src/curt/curt/modules/vision/oakd_node_types.py
In this example, the new type is "hand_asl"
2. Implement the actual logic of the worker in:
<cep_root>/src/curt/curt/modules/vision/oakd_hand_asl.py
3. Add this new woker in:
<cep_root>/src/curt/curt/module_configs.json
In this example, a new worker "oakd_hand_asl" and its class
name "OAKDASL" is added.
4. When curt backend restarts, the newly implemented worker will
be advertised and available to use.
The code below demonstrates its use.
"""
from curt.command import CURTCommands
# Modify these to your own workers
# Format is "<host_name>/<module_type>/<service_name>/<worker_name>"
OAKD_PIPELINE_WORKER = "charlie/vision/oakd_service/oakd_pipeline"
RGB_CAMERA_WORKER = "charlie/vision/oakd_service/oakd_rgb_camera_input"
HAND_LADNMARKS_WORKER = "charlie/vision/oakd_service/oakd_hand_landmarks"
HAND_ASL_WORKER = "charlie/vision/oakd_service/oakd_hand_asl"
preview_width = 640
preview_heigth = 360
palm_detection_nn_input_size = 128
hand_landmarks_nn_input_size = 224
hand_asl_nn_input_size = 224
CURTCommands.initialize()
oakd_pipeline_config = [
["add_rgb_cam_node", preview_width, preview_heigth],
["add_rgb_cam_preview_node"],
["add_nn_node", "palm_detection", "palm_detection_sh6.blob", palm_detection_nn_input_size, palm_detection_nn_input_size],
["add_nn_node", "hand_landmarks", "hand_landmark_sh6.blob", hand_landmarks_nn_input_size, hand_landmarks_nn_input_size],
["add_nn_node", "hand_asl", "hand_asl_6_shaves.blob", hand_asl_nn_input_size, hand_asl_nn_input_size],
]
oakd_pipeline_worker = CURTCommands.get_worker(
OAKD_PIPELINE_WORKER
)
config_handler = CURTCommands.config_worker(oakd_pipeline_worker, oakd_pipeline_config)
success = CURTCommands.get_result(config_handler)["dataValue"]["data"]
rgb_camera_worker = CURTCommands.get_worker(
RGB_CAMERA_WORKER
)
hand_landmarks_worker = CURTCommands.get_worker(
HAND_LADNMARKS_WORKER
)
hand_asl_worker = CURTCommands.get_worker(HAND_ASL_WORKER)
while True:
rgb_frame_handler = CURTCommands.request(rgb_camera_worker, params=["get_rgb_frame"])
hand_landmarks_handler = CURTCommands.request(
hand_landmarks_worker, params=[rgb_frame_handler]
)
hand_asl_handler = CURTCommands.request(
hand_asl_worker, params=[hand_landmarks_handler, rgb_frame_handler]
)
hand_asl_results = CURTCommands.get_result(hand_asl_handler)["dataValue"]["data"]
print(hand_asl_handler)
|
StarcoderdataPython
|
3473392
|
""" A wechat personal account api project
See:
https://github.com/littlecodersh/ItChat
"""
from setuptools import setup, find_packages
from codecs import open
from os import path
import itchat
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='itchat',
version=itchat.__version__,
description='A complete wechat personal account api',
long_description=long_description,
url='https://github.com/littlecodersh/ItChat',
author='LittleCoder',
author_email='<EMAIL>',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
keywords='wechat itchat api robot weixin personal extend',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(),
install_requires=['requests', 'pyqrcode', 'pypng'],
# List additional groups of dependencies here
extras_require={},
)
|
StarcoderdataPython
|
9642222
|
import pyowm
import pyowm.utils
import json
import requests
import re
def varosLekerese():
data = json.loads(requests.get("http://ipinfo.io/json").text)
return data["city"]
parancs = self.parancs
beszed = self.beszed
h = self.h
hangFelismeres = self.hangFelismeres
if "időjárás" in parancs.lower() or "hőmérséklet" in parancs.lower() or "hány fok van" in parancs.lower():
if "milyen az időjárás" == parancs.lower() or "milyen a hőmérséklet" == parancs.lower() or "hány fok van" == parancs.lower():
varos = varosLekerese()
elif bool(re.search("^milyen a.*időjárás$", parancs.lower())):
varos = h.stem(parancs.split()[2])[0]
else:
varos = h.stem(parancs.split()[-1])[0]
owm = pyowm.OWM("bc12083e70d2d22298c2df1cec7101d9")
mgr = owm.weather_manager()
megfigyeles = mgr.weather_at_place(varos)
idojaras = megfigyeles.weather
homerseklet = idojaras.temperature('celsius')['temp']
beszed(f"A {varos}i hőmérséklet {homerseklet} celsius fok")
quit()
|
StarcoderdataPython
|
6672190
|
<filename>HeapSort/HeapSort.py<gh_stars>0
#!/bin/python
def Heapify(DataList, Size):
Layer = 1
ParentIndex = 1
while ParentIndex < Size:
ParentIndex = ParentIndex << 1
Layer += 1
ParentIndex = 2**(Layer-1) - 2
while ParentIndex >= 0:
LeftIndex = 2 * ParentIndex + 1
if LeftIndex < Size:
TmpParent = ParentIndex
TmpChildL = LeftIndex
TmpChildR = TmpChildL + 1
while TmpChildL < Size:
if DataList[TmpChildL] > DataList[TmpParent]:
if TmpChildR < Size and DataList[TmpChildL] < DataList[TmpChildR]:
DataList[TmpParent] , DataList[TmpChildR] = DataList[TmpChildR] , DataList[TmpParent]
TmpParent = TmpParent * 2 + 2
TmpChildL = TmpChildR * 2 + 1
else:
DataList[TmpParent] , DataList[TmpChildL] = DataList[TmpChildL] , DataList[TmpParent]
TmpParent = TmpParent * 2 + 1
TmpChildL = TmpChildL * 2 + 1
elif TmpChildR < Size and DataList[TmpChildR] > DataList[TmpParent]:
DataList[TmpParent] , DataList[TmpChildR] = DataList[TmpChildR] , DataList[TmpParent]
TmpParent = TmpParent * 2 + 2
TmpChildL = TmpChildR * 2 + 1
else:
break
TmpChildR = TmpChildL + 1
ParentIndex = ParentIndex - 1
def HeapSort(DataList):
Size = len(DataList)
Iterator = len(DataList)
Heapify(DataList, Size)
while(Size > 0):
print DataList[7]
DataList[0], DataList[Size - 1] = DataList[Size - 1], DataList[0]
Size = Size - 1
Heapify(DataList, Size)
DataList = [1,0,2,1,1,1,2,0]
#Heapify(DataList, len(DataList))
HeapSort(DataList)
print (" ".join( str(data) for data in DataList))
# 8
# 12 9
# 7 22 3 26
# 14 11 15 22
#
# 26
# 22 9
# 14 22 3 8
# 7 11 15 12
|
StarcoderdataPython
|
245427
|
<filename>tweet-reader/tweet_producer.py
import pandas as pd
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
from kafka import KafkaProducer
from get_authentication import get_authentication
auth_dict = get_authentication()
auth = OAuthHandler(auth_dict['CONSUMER_KEY'], auth_dict['CONSUMER_SECRET'])
auth.set_access_token(auth_dict['ACCESS_TOKEN'], auth_dict['ACCESS_SECRET'])
topic_name = 'TWEETS'
producer = KafkaProducer(
bootstrap_servers=['broker:9092'])
print("Producer created")
def get_ticker_list():
# For demonstration purposes we query tweets
# with a couple of well-known companies
# instead of simply querying the quote we also quote
# the alias as most people refer to the company
# not by its stock quote (duh!) but by the company name
df = pd.DataFrame(columns=['Symbol']
, data=['AAPL', 'apple', 'MSFT', 'microsoft', 'AMZN', 'amazon',
'FB', 'facebook', 'GOOG', 'google', 'NVDA', 'nvidia', 'ADBE', 'adobe'])
#df = pd.read_csv("NASDAQ_tickers.csv").iloc[:10]
return ['#'+x for x in df.Symbol.values.tolist()]
class Listener(StreamListener):
def on_status(self, raw_data):
print(raw_data.text)
producer.send(topic_name, str.encode(raw_data.text))
return True
def get_tweets(lst):
print(lst)
while True:
listener = Listener()
stream = Stream(auth, listener)
stream.filter(track=lst, stall_warnings=True)
if __name__ == "__main__":
lst = get_ticker_list()
get_tweets(lst)
|
StarcoderdataPython
|
12846719
|
<filename>scripts/figures/gene_abundance.py
#%%
import scanpy as sc
import pandas as pd
from pathlib import Path
from vectorseq.utils import check_gene_abundance, create_dir
from vectorseq.marker_constants import BrainGenes
data_dir = Path("/spare_volume/vectorseq-data")
figure_save_dir = create_dir(data_dir / "gene_abundance")
#%% [markdown]
# ## Gene Abundance Table for Experiment: 3250, Brain Region: v1
#%%
experiment_id = "3250"
brain_region = "v1"
run_dir = data_dir / experiment_id / brain_region
all_cells_output_dir = create_dir(run_dir / "all_cells")
adata = sc.read_h5ad(all_cells_output_dir / "filter" / "adata.h5ad")
filtered_tg_list = [
gene for gene in BrainGenes.TG_MARKERS if gene.upper() in adata.obs.columns
]
endogenous_genes_list = [
"Snap25",
"Rbfox3",
"Slc17a6",
"Camk2a",
"Gad1",
"Gad2",
"Mog",
"Flt1",
]
gene_list = filtered_tg_list + endogenous_genes_list
count_fractions_df = pd.DataFrame()
for gene in gene_list:
temp = check_gene_abundance(adata, gene_of_interest=gene)
if not temp.empty:
count_fractions_df = count_fractions_df.append(
pd.DataFrame.from_dict(
{
"gene": gene,
"number_of_expressing_cells": temp.shape[0],
"number_of_reads": temp.goi_counts.sum(),
"abundance_in_expressing_cells": f"{round(temp.percent_count_goi.mean(),2)} +/- {round(temp.percent_count_goi.std(),2)}",
},
orient="index",
).T
)
print(f"{gene} detected.")
else:
print(f"{gene} not detected.")
count_fractions_df.set_index(keys="gene", drop=True, inplace=True)
count_fractions_df.to_csv(
figure_save_dir / f"{experiment_id}_{brain_region}_all_cells_gene_abundance.csv"
)
# %%
#%% [markdown]
# ## Gene Abundance Table for Experiment: 3382, Brain Region: snr
#%%
experiment_id = "3382"
brain_region = "snr"
run_dir = data_dir / experiment_id / brain_region
all_cells_output_dir = create_dir(run_dir / "all_cells")
adata = sc.read_h5ad(all_cells_output_dir / "filter" / "adata.h5ad")
filtered_tg_list = [
gene for gene in BrainGenes.TG_MARKERS if gene.upper() in adata.obs.columns
]
endogenous_genes_list = [
"Snap25",
"Rbfox3",
"Slc17a6",
"Camk2a",
"Gad1",
"Gad2",
"Mog",
"Flt1",
]
gene_list = filtered_tg_list + endogenous_genes_list
count_fractions_df = pd.DataFrame()
for gene in gene_list:
temp = check_gene_abundance(adata, gene_of_interest=gene)
if not temp.empty:
count_fractions_df = count_fractions_df.append(
pd.DataFrame.from_dict(
{
"gene": gene,
"number_of_expressing_cells": temp.shape[0],
"number_of_reads": temp.goi_counts.sum(),
"abundance_in_expressing_cells": f"{round(temp.percent_count_goi.mean(),2)} +/- {round(temp.percent_count_goi.std(),2)}",
},
orient="index",
).T
)
print(f"{gene} detected.")
else:
print(f"{gene} not detected.")
count_fractions_df.set_index(keys="gene", drop=True, inplace=True)
count_fractions_df.to_csv(
figure_save_dir / f"{experiment_id}_{brain_region}_all_cells_gene_abundance.csv"
)
# %%
#%% [markdown]
# ## Gene Abundance Table for Experiment: 3454, Brain Region: sc
#%%
data_dir = Path("/spare_volume/vectorseq-data")
experiment_id = "3454"
brain_region = "sc"
run_dir = data_dir / experiment_id / brain_region
all_cells_output_dir = create_dir(run_dir / "all_cells")
adata = sc.read_h5ad(all_cells_output_dir / "filter" / "adata.h5ad")
filtered_tg_list = [
gene for gene in BrainGenes.TG_MARKERS if gene.upper() in adata.obs.columns
]
endogenous_genes_list = [
"Snap25",
"Rbfox3",
"Slc17a6",
"Camk2a",
"Gad1",
"Gad2",
"Mog",
"Flt1",
]
gene_list = filtered_tg_list + endogenous_genes_list
count_fractions_df = pd.DataFrame()
for gene in gene_list:
temp = check_gene_abundance(adata, gene_of_interest=gene)
if not temp.empty:
count_fractions_df = count_fractions_df.append(
pd.DataFrame.from_dict(
{
"gene": gene,
"number_of_expressing_cells": temp.shape[0],
"number_of_reads": temp.goi_counts.sum(),
"abundance_in_expressing_cells": f"{round(temp.percent_count_goi.mean(),2)} +/- {round(temp.percent_count_goi.std(),2)}",
},
orient="index",
).T
)
print(f"{gene} detected.")
else:
print(f"{gene} not detected.")
count_fractions_df.set_index(keys="gene", drop=True, inplace=True)
count_fractions_df.to_csv(
figure_save_dir / f"{experiment_id}_{brain_region}_all_cells_gene_abundance.csv"
)
#%%
|
StarcoderdataPython
|
9689187
|
# Copyright (c) 2020 Rik079, <NAME>, Zibadian, Micro-T. All rights reserved.
__version__ = "Alpha"
# Discord login Token
token = ""
# Path to modules folder
modulepath = "./modules"
# AWS credentials
aws_id = ''
aws_secret = ''
aws_region = 'us-west-2'
# Staff
# ------------------------
# Admins
adminids = []
# Tech guys
botadminids = []
|
StarcoderdataPython
|
1950126
|
<reponame>prodProject/WorkkerAndConsumerServer
from enum import Enum
from CommonCode.passwordHashOrDehashHelper import PasswordHasherOrDeHasher
from Enums.passwordEnum import PasswordMode
from Password.passwordHelper import PasswordHelper
from Services.loginService import LoginService
class States(Enum):
START = 0,
GET_PASSWORD_MODE = 1,
GENEREATE_PASSWORD = 2,
GET_LOGIN = 3,
VERIFY_PASSWORD = 4,
DONE = 5,
class GenereateAndVerifyPassword:
m_helper = PasswordHelper()
m_loginService = LoginService()
m_passwordEncrytorOrDecryptor = PasswordHasherOrDeHasher();
m_login = None
pb = None
mode = None
m_isValid = False
def start(self, pb, mode):
self.pb = pb
self.mode = mode
self.controlFlow(currentState=States.GET_PASSWORD_MODE)
def done(self):
if (self.mode == PasswordMode.GENERATE_PASSWORD):
return self.pb
else:
return self.m_isValid
def getPasswordMode(self):
if (self.mode == PasswordMode.GENERATE_PASSWORD):
self.controlFlow(currentState=States.GENEREATE_PASSWORD)
elif (self.mode == PasswordMode.VERIFY_PASSWORD):
self.controlFlow(currentState=States.GET_LOGIN)
else:
self.controlFlow(currentState=States.DONE)
def getGenreatePassword(self):
self.pb.password = self.m_passwordEncrytorOrDecryptor.getMd5hashFromPassWord(
password=self.m_helper.getPasswordFromLoginPb(loginPb=self.pb))
self.controlFlow(currentState=States.DONE)
def getLogin(self):
self.m_login = self.m_loginService.get(id=self.pb.dbInfo.id)
self.controlFlow(currentState=States.VERIFY_PASSWORD)
def getVerifyPassWord(self):
self.m_isValid = self.m_passwordEncrytorOrDecryptor.getMd5PasswordMatch(
actualPassword=self.m_helper.getPasswordFromLoginPb(loginPb=self.pb),
hashedPassword=self.m_login.password)
self.controlFlow(currentState=States.DONE)
def controlFlow(self, currentState):
if (currentState == States.GET_PASSWORD_MODE):
self.getPasswordMode()
elif (currentState == States.GENEREATE_PASSWORD):
self.getGenreatePassword()
elif (currentState == States.GET_LOGIN):
self.getLogin()
elif (currentState == States.VERIFY_PASSWORD):
self.getVerifyPassWord()
elif (currentState == States.DONE):
self.done()
|
StarcoderdataPython
|
8086206
|
<gh_stars>0
from generator import User
from generator import Database
def generator_bot():
print("Bienvenido al sistema de gestión de usuarios!")
Database.create_table()
menu()
def menu():
res = input('Quiere crear, eliminar o buscar un usuario? \n[a] Crear \n[b] Eliminar \n[c] Buscar \n> ')
if res == "a":
return create_user()
elif res == "b":
return delete_user()
elif res == "c":
return get_user()
else:
print_message()
return menu()
def create_user():
name = input("Ingrese el nombre: ", ).capitalize()
surname = input("Ingrese el apellido: ", ).capitalize()
area = input("Ingrese el área: ", ).upper()
user1 = User(name, surname)
username = user1.username()
codigo = username.encode('utf-8').hex()[:6].upper()
vm = virtual_machine(codigo, area)
data = Database(name, surname, area, username, vm)
data.put_in()
print("Usuario creado satisfactoriamente... ")
data.get_user(username)
close_question()
def virtual_machine(codigo, area):
res = input("Por favor, seleccione el Sistema Operativo de la VM: \n[a] Windows \n[b] Linux \n>")
if res == "a":
return "VMW" + codigo + area[:3] + str(User.today.month)
elif res == "b":
return "VML" + codigo + area[:3] + str(User.today.month)
else:
print_message()
virtual_machine()
def delete_user():
res = input('Por favor ingrese el nombre de usuario que desea eliminar: ', )
Database.get_user(res)
query = Database.delete(res)
close_question()
def get_user():
res = input('Por favor ingrese el nombre de usuario que desea buscar: ', )
Database.get_user(res)
close_question()
def print_message():
print("Selección equivocada, por favor vuelva a seleccionar.")
def close_question():
res = input('Desea hacer algo más? \n[a] Si \n[b] No \n> ')
if res == "a":
return menu()
elif res == "b":
print("Usted ha finalizado sesión.")
else:
print_message()
return close_question()
generator_bot()
|
StarcoderdataPython
|
9676701
|
import logging
from ..settings import azure_configs
from ..settings import local_configs
from ..settings import gcp_configs
from .base import BlobStorage
from .gcp import GoogleCloudStorage
from .azure import AzureStorage
from .local import LocalStorage
def BlobStorageFactory(provider="local") -> BlobStorage:
"""Create a storage provider.
Args:
provider: the name of the storage provider. Choose among
`local` and `gcp`.
Return: a storage provider of the class `BlobStorage`.
"""
if provider == "local":
root = local_configs.get("root")
return LocalStorage(root)
if provider == "gcp":
project_id = gcp_configs.get("project_id")
bucket_name = gcp_configs.get("bucket_name")
service_account_file = gcp_configs.get("service_account_file")
return GoogleCloudStorage(project_id=project_id,
bucket_name=bucket_name,
service_account_file=service_account_file)
if provider == "azure":
# Set logging level
connection_string = azure_configs.get("connection_string")
container_name = azure_configs.get("container_name")
log_level = int(azure_configs.get("log_level"))
logging.getLogger("azure.storage.common.storageclient")\
.setLevel(log_level)
return AzureStorage(connection_string=connection_string,
container_name=container_name)
raise ValueError("Uknown provider: "+provider)
|
StarcoderdataPython
|
168935
|
<filename>vendor/iptables-1.8.7/iptables-test.py
#!/usr/bin/env python
#
# (C) 2012-2013 by <NAME> <<EMAIL>>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This software has been sponsored by <NAME> <http://www.sophos.com>
#
from __future__ import print_function
import sys
import os
import subprocess
import argparse
IPTABLES = "iptables"
IP6TABLES = "ip6tables"
ARPTABLES = "arptables"
EBTABLES = "ebtables"
IPTABLES_SAVE = "iptables-save"
IP6TABLES_SAVE = "ip6tables-save"
ARPTABLES_SAVE = "arptables-save"
EBTABLES_SAVE = "ebtables-save"
#IPTABLES_SAVE = ['xtables-save','-4']
#IP6TABLES_SAVE = ['xtables-save','-6']
EXTENSIONS_PATH = "extensions"
LOGFILE="/tmp/iptables-test.log"
log_file = None
class Colors:
HEADER = '\033[95m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
ENDC = '\033[0m'
def print_error(reason, filename=None, lineno=None):
'''
Prints an error with nice colors, indicating file and line number.
'''
print(filename + ": " + Colors.RED + "ERROR" +
Colors.ENDC + ": line %d (%s)" % (lineno, reason))
def delete_rule(iptables, rule, filename, lineno):
'''
Removes an iptables rule
'''
cmd = iptables + " -D " + rule
ret = execute_cmd(cmd, filename, lineno)
if ret == 1:
reason = "cannot delete: " + iptables + " -I " + rule
print_error(reason, filename, lineno)
return -1
return 0
def run_test(iptables, rule, rule_save, res, filename, lineno, netns):
'''
Executes an unit test. Returns the output of delete_rule().
Parameters:
:param iptables: string with the iptables command to execute
:param rule: string with iptables arguments for the rule to test
:param rule_save: string to find the rule in the output of iptables -save
:param res: expected result of the rule. Valid values: "OK", "FAIL"
:param filename: name of the file tested (used for print_error purposes)
:param lineno: line number being tested (used for print_error purposes)
'''
ret = 0
cmd = iptables + " -A " + rule
if netns:
cmd = "ip netns exec ____iptables-container-test " + EXECUTEABLE + " " + cmd
ret = execute_cmd(cmd, filename, lineno)
#
# report failed test
#
if ret:
if res == "OK":
reason = "cannot load: " + cmd
print_error(reason, filename, lineno)
return -1
else:
# do not report this error
return 0
else:
if res == "FAIL":
reason = "should fail: " + cmd
print_error(reason, filename, lineno)
delete_rule(iptables, rule, filename, lineno)
return -1
matching = 0
splitted = iptables.split(" ")
if len(splitted) == 2:
if splitted[1] == '-4':
command = IPTABLES_SAVE
elif splitted[1] == '-6':
command = IP6TABLES_SAVE
elif len(splitted) == 1:
if splitted[0] == IPTABLES:
command = IPTABLES_SAVE
elif splitted[0] == IP6TABLES:
command = IP6TABLES_SAVE
elif splitted[0] == ARPTABLES:
command = ARPTABLES_SAVE
elif splitted[0] == EBTABLES:
command = EBTABLES_SAVE
command = EXECUTEABLE + " " + command
if netns:
command = "ip netns exec ____iptables-container-test " + command
args = splitted[1:]
proc = subprocess.Popen(command, shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
#
# check for segfaults
#
if proc.returncode == -11:
reason = "iptables-save segfaults: " + cmd
print_error(reason, filename, lineno)
delete_rule(iptables, rule, filename, lineno)
return -1
# find the rule
matching = out.find(rule_save.encode('utf-8'))
if matching < 0:
reason = "cannot find: " + iptables + " -I " + rule
print_error(reason, filename, lineno)
delete_rule(iptables, rule, filename, lineno)
return -1
# Test "ip netns del NETNS" path with rules in place
if netns:
return 0
return delete_rule(iptables, rule, filename, lineno)
def execute_cmd(cmd, filename, lineno):
'''
Executes a command, checking for segfaults and returning the command exit
code.
:param cmd: string with the command to be executed
:param filename: name of the file tested (used for print_error purposes)
:param lineno: line number being tested (used for print_error purposes)
'''
global log_file
if cmd.startswith('iptables ') or cmd.startswith('ip6tables ') or cmd.startswith('ebtables ') or cmd.startswith('arptables '):
cmd = EXECUTEABLE + " " + cmd
print("command: {}".format(cmd), file=log_file)
ret = subprocess.call(cmd, shell=True, universal_newlines=True,
stderr=subprocess.STDOUT, stdout=log_file)
log_file.flush()
# generic check for segfaults
if ret == -11:
reason = "command segfaults: " + cmd
print_error(reason, filename, lineno)
return ret
def run_test_file(filename, netns):
'''
Runs a test file
:param filename: name of the file with the test rules
'''
#
# if this is not a test file, skip.
#
if not filename.endswith(".t"):
return 0, 0
if "libipt_" in filename:
iptables = IPTABLES
elif "libip6t_" in filename:
iptables = IP6TABLES
elif "libxt_" in filename:
iptables = IPTABLES
elif "libarpt_" in filename:
# only supported with nf_tables backend
if EXECUTEABLE != "xtables-nft-multi":
return 0, 0
iptables = ARPTABLES
elif "libebt_" in filename:
# only supported with nf_tables backend
if EXECUTEABLE != "xtables-nft-multi":
return 0, 0
iptables = EBTABLES
else:
# default to iptables if not known prefix
iptables = IPTABLES
f = open(filename)
tests = 0
passed = 0
table = ""
total_test_passed = True
if netns:
execute_cmd("ip netns add ____iptables-container-test", filename, 0)
for lineno, line in enumerate(f):
if line[0] == "#" or len(line.strip()) == 0:
continue
if line[0] == ":":
chain_array = line.rstrip()[1:].split(",")
continue
# external non-iptables invocation, executed as is.
if line[0] == "@":
external_cmd = line.rstrip()[1:]
if netns:
external_cmd = "ip netns exec ____iptables-container-test " + external_cmd
execute_cmd(external_cmd, filename, lineno)
continue
# external iptables invocation, executed as is.
if line[0] == "%":
external_cmd = line.rstrip()[1:]
if netns:
external_cmd = "ip netns exec ____iptables-container-test " + EXECUTEABLE + " " + external_cmd
execute_cmd(external_cmd, filename, lineno)
continue
if line[0] == "*":
table = line.rstrip()[1:]
continue
if len(chain_array) == 0:
print("broken test, missing chain, leaving")
sys.exit()
test_passed = True
tests += 1
for chain in chain_array:
item = line.split(";")
if table == "":
rule = chain + " " + item[0]
else:
rule = chain + " -t " + table + " " + item[0]
if item[1] == "=":
rule_save = chain + " " + item[0]
else:
rule_save = chain + " " + item[1]
res = item[2].rstrip()
ret = run_test(iptables, rule, rule_save,
res, filename, lineno + 1, netns)
if ret < 0:
test_passed = False
total_test_passed = False
break
if test_passed:
passed += 1
if netns:
execute_cmd("ip netns del ____iptables-container-test", filename, 0)
if total_test_passed:
print(filename + ": " + Colors.GREEN + "OK" + Colors.ENDC)
f.close()
return tests, passed
def show_missing():
'''
Show the list of missing test files
'''
file_list = os.listdir(EXTENSIONS_PATH)
testfiles = [i for i in file_list if i.endswith('.t')]
libfiles = [i for i in file_list
if i.startswith('lib') and i.endswith('.c')]
def test_name(x):
return x[0:-2] + '.t'
missing = [test_name(i) for i in libfiles
if not test_name(i) in testfiles]
print('\n'.join(missing))
#
# main
#
def main():
parser = argparse.ArgumentParser(description='Run iptables tests')
parser.add_argument('filename', nargs='*',
metavar='path/to/file.t',
help='Run only this test')
parser.add_argument('-H', '--host', action='store_true',
help='Run tests against installed binaries')
parser.add_argument('-l', '--legacy', action='store_true',
help='Test iptables-legacy')
parser.add_argument('-m', '--missing', action='store_true',
help='Check for missing tests')
parser.add_argument('-n', '--nftables', action='store_true',
help='Test iptables-over-nftables')
parser.add_argument('-N', '--netns', action='store_true',
help='Test netnamespace path')
args = parser.parse_args()
#
# show list of missing test files
#
if args.missing:
show_missing()
return
global EXECUTEABLE
EXECUTEABLE = "xtables-legacy-multi"
if args.nftables:
EXECUTEABLE = "xtables-nft-multi"
if os.getuid() != 0:
print("You need to be root to run this, sorry")
return
if not args.host:
os.putenv("XTABLES_LIBDIR", os.path.abspath(EXTENSIONS_PATH))
os.putenv("PATH", "%s/iptables:%s" % (os.path.abspath(os.path.curdir),
os.getenv("PATH")))
test_files = 0
tests = 0
passed = 0
# setup global var log file
global log_file
try:
log_file = open(LOGFILE, 'w')
except IOError:
print("Couldn't open log file %s" % LOGFILE)
return
if args.filename:
file_list = args.filename
else:
file_list = [os.path.join(EXTENSIONS_PATH, i)
for i in os.listdir(EXTENSIONS_PATH)
if i.endswith('.t')]
file_list.sort()
if not args.netns:
try:
import unshare
unshare.unshare(unshare.CLONE_NEWNET)
except:
print("Cannot run in own namespace, connectivity might break")
for filename in file_list:
file_tests, file_passed = run_test_file(filename, args.netns)
if file_tests:
tests += file_tests
passed += file_passed
test_files += 1
print("%d test files, %d unit tests, %d passed" % (test_files, tests, passed))
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1930740
|
<filename>.venv/lib/python3.8/site-packages/opencensus/trace/span_context.py<gh_stars>0
# Copyright 2017, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SpanContext encapsulates the current context within the request's trace."""
import six
import logging
import random
import re
from opencensus.trace import trace_options as trace_options_module
_INVALID_TRACE_ID = '0' * 32
INVALID_SPAN_ID = '0' * 16
TRACE_ID_PATTERN = re.compile('[0-9a-f]{32}?')
SPAN_ID_PATTERN = re.compile('[0-9a-f]{16}?')
# Default options, don't force sampling
DEFAULT_OPTIONS = '0'
class SpanContext(object):
"""SpanContext includes 3 fields: traceId, spanId, and an trace_options flag
which indicates whether or not the request is being traced. It contains the
current context to be propagated to the child spans.
:type trace_id: str
:param trace_id: (Optional) Trace_id is a 32 digits uuid for the trace.
If not given, will generate one automatically.
:type span_id: str
:param span_id: (Optional) Identifier for the span, unique within a trace.
:type trace_options: :class: `~opencensus.trace.trace_options.TraceOptions`
:param trace_options: (Optional) TraceOptions indicates 8 trace options.
:type from_header: bool
:param from_header: (Optional) Indicates whether the trace context is
generated from request header.
"""
def __init__(
self,
trace_id=None,
span_id=None,
trace_options=None,
tracestate=None,
from_header=False):
if trace_id is None:
trace_id = generate_trace_id()
if trace_options is None:
trace_options = trace_options_module.TraceOptions(DEFAULT_OPTIONS)
self.from_header = from_header
self.trace_id = self._check_trace_id(trace_id)
self.span_id = self._check_span_id(span_id)
self.trace_options = trace_options
self.tracestate = tracestate
def __repr__(self):
"""Returns a string form of the SpanContext.
:rtype: str
:returns: String form of the SpanContext.
"""
fmt = '{}(trace_id={}, span_id={}, trace_options={}, tracestate={})'
return fmt.format(
type(self).__name__,
self.trace_id,
self.span_id,
self.trace_options,
self.tracestate,
)
def _check_span_id(self, span_id):
"""Check the format of the span_id to ensure it is 16-character hex
value representing a 64-bit number. If span_id is invalid, logs a
warning message and returns None
:type span_id: str
:param span_id: Identifier for the span, unique within a span.
:rtype: str
:returns: Span_id for the current span.
"""
if span_id is None:
return None
assert isinstance(span_id, six.string_types)
if span_id is INVALID_SPAN_ID:
logging.warning(
'Span_id %s is invalid (cannot be all zero)', span_id)
self.from_header = False
return None
match = SPAN_ID_PATTERN.match(span_id)
if match:
return span_id
else:
logging.warning(
'Span_id %s does not the match the '
'required format', span_id)
self.from_header = False
return None
def _check_trace_id(self, trace_id):
"""Check the format of the trace_id to ensure it is 32-character hex
value representing a 128-bit number. If trace_id is invalid, returns a
randomly generated trace id
:type trace_id: str
:param trace_id:
:rtype: str
:returns: Trace_id for the current context.
"""
assert isinstance(trace_id, six.string_types)
if trace_id is _INVALID_TRACE_ID:
logging.warning(
'Trace_id %s is invalid (cannot be all zero), '
'generating a new one.', trace_id)
self.from_header = False
return generate_trace_id()
match = TRACE_ID_PATTERN.match(trace_id)
if match:
return trace_id
else:
logging.warning(
'Trace_id %s does not the match the required format,'
'generating a new one instead.', trace_id)
self.from_header = False
return generate_trace_id()
def generate_span_id():
"""Return the random generated span ID for a span. Must be a 16 character
hexadecimal encoded string
:rtype: str
:returns: 16 digit randomly generated hex trace id.
"""
return '{:016x}'.format(random.getrandbits(64))
def generate_trace_id():
"""Generate a random 32 char hex trace_id.
:rtype: str
:returns: 32 digit randomly generated hex trace id.
"""
return '{:032x}'.format(random.getrandbits(128))
|
StarcoderdataPython
|
11382232
|
<filename>services/ap_to_redis.py
#!/usr/bin/env python3
from mycelium.components import RedisBridge, Connector
from mycelium_utils import Scripter
class ScripterExt(Scripter):
def run_main(self):
rb = RedisBridge(db=self.rd_cfg.databases['robot'])
self.conn = Connector(self.cfg.ap_to_redis, self.cfg.connection_baudrate, 1, 0)
params = self.rd_cfg.robot
while not self.exit_threads:
try:
self.conn.send_heartbeat()
m = self.conn.get_callbacks(params)
if m is not None:
rb.add_key(m.to_json(), m.get_type(), to_json=False)
except:
pass
def close_script(self):
try:
self.conn.disconnect()
except:
pass
scripter = ScripterExt(log_source="ap_to_redis")
scripter.run()
|
StarcoderdataPython
|
3359377
|
<gh_stars>0
"""
Common utilities
"""
import numpy as np
import torch
from shapely.geometry import Polygon
def check_numpy_to_torch(x):
if isinstance(x, np.ndarray):
return torch.from_numpy(x).float(), True
return x, False
def check_contain_nan(x):
if isinstance(x, dict):
return any(check_contain_nan(v) for k, v in x.items())
if isinstance(x, list):
return any(check_contain_nan(itm) for itm in x)
if isinstance(x, int) or isinstance(x, float):
return False
if isinstance(x, np.ndarray):
return np.any(np.isnan(x))
return torch.any(x.isnan()).detach().cpu().item()
def rotate_points_along_z(points, angle):
"""
Args:
points: (B, N, 3 + C)
angle: (B), radians, angle along z-axis, angle increases x ==> y
Returns:
"""
points, is_numpy = check_numpy_to_torch(points)
angle, _ = check_numpy_to_torch(angle)
cosa = torch.cos(angle)
sina = torch.sin(angle)
zeros = angle.new_zeros(points.shape[0])
ones = angle.new_ones(points.shape[0])
rot_matrix = torch.stack((
cosa, sina, zeros,
-sina, cosa, zeros,
zeros, zeros, ones
), dim=1).view(-1, 3, 3).float()
points_rot = torch.matmul(points[:, :, 0:3].float(), rot_matrix)
points_rot = torch.cat((points_rot, points[:, :, 3:]), dim=-1)
return points_rot.numpy() if is_numpy else points_rot
def rotate_points_along_z_2d(points, angle):
"""
Rorate the points along z-axis.
Parameters
----------
points : torch.Tensor / np.ndarray
(N, 2).
angle : torch.Tensor / np.ndarray
(N,)
Returns
-------
points_rot : torch.Tensor / np.ndarray
Rorated points with shape (N, 2)
"""
points, is_numpy = check_numpy_to_torch(points)
angle, _ = check_numpy_to_torch(angle)
cosa = torch.cos(angle)
sina = torch.sin(angle)
# (N, 2, 2)
rot_matrix = torch.stack((cosa, sina, -sina, cosa), dim=1).view(-1, 2,
2).float()
points_rot = torch.einsum("ik, ikj->ij", points.float(), rot_matrix)
return points_rot.numpy() if is_numpy else points_rot
def remove_ego_from_objects(objects, ego_id):
"""
Avoid adding ego vehicle to the object dictionary.
Parameters
----------
objects : dict
The dictionary contained all objects.
ego_id : int
Ego id.
"""
if ego_id in objects:
del objects[ego_id]
def retrieve_ego_id(base_data_dict):
"""
Retrieve the ego vehicle id from sample(origin format).
Parameters
----------
base_data_dict : dict
Data sample in origin format.
Returns
-------
ego_id : str
The id of ego vehicle.
"""
ego_id = None
for cav_id, cav_content in base_data_dict.items():
if cav_content['ego']:
ego_id = cav_id
break
return ego_id
def compute_iou(box, boxes):
"""
Compute iou between box and boxes list
Parameters
----------
box : shapely.geometry.Polygon
Bounding box Polygon.
boxes : list
List of shapely.geometry.Polygon.
Returns
-------
iou : np.ndarray
Array of iou between box and boxes.
"""
# Calculate intersection areas
iou = [box.intersection(b).area / box.union(b).area for b in boxes]
return np.array(iou, dtype=np.float32)
def convert_format(boxes_array):
"""
Convert boxes array to shapely.geometry.Polygon format.
Parameters
----------
boxes_array : np.ndarray
(N, 4, 2) or (N, 8, 3).
Returns
-------
list of converted shapely.geometry.Polygon object.
"""
polygons = [Polygon([(box[i, 0], box[i, 1]) for i in range(4)]) for box in
boxes_array]
return np.array(polygons)
def torch_tensor_to_numpy(torch_tensor):
"""
Convert a torch tensor to numpy.
Parameters
----------
torch_tensor : torch.Tensor
Returns
-------
A numpy array.
"""
return torch_tensor.numpy() if not torch_tensor.is_cuda else \
torch_tensor.cpu().detach().numpy()
|
StarcoderdataPython
|
11336293
|
"""This module contains custom mpld3 plugins to add useful features to the graph.
The JavaScript in the Python files was built from the TypeScript source files in the
`mpld3-plugins` directory.
Classes
-------
InteractiveLegend
Class defining an mpld3 plugin to create an interactive legend.
RangeSelectorButtons
Class defining an mpld3 plugin to create range selector buttons.
SaveImageButtons
Class defining an mpld3 plugin to create save as image buttons.
TimeSeriesTooltip
Class defining an mpld3 plugin to create line graph tooltips.
"""
from autoplot.plugins.interactive_legend import InteractiveLegend
from autoplot.plugins.range_selector_buttons import RangeSelectorButtons
from autoplot.plugins.save_image_buttons import SaveImageButtons
from autoplot.plugins.time_series_tooltip import TimeSeriesTooltip
|
StarcoderdataPython
|
11374782
|
<gh_stars>1-10
from unittest.result import TestResult, failfast
from instant_coverage import clear_url_caches
from django.http import HttpResponse
from django.test import SimpleTestCase
from django.test.utils import setup_test_environment, teardown_test_environment
from django.views.generic import View
from mock import patch
import six
def mocked_patterns(patterns):
clear_url_caches()
return patch('instant_coverage.tests.urls.urlpatterns', patterns)
class PickyTestResult(TestResult):
"""
A TestResult subclass that will retain just exceptions and messages from
tests run, rather than storing an entire traceback.
"""
@failfast
def addFailure(self, test, err):
self.failures.append((test, err))
def get_results_for(test_name, mixin=None, **test_attributes):
from instant_coverage import InstantCoverageMixin
if mixin is None:
class EverythingTest(InstantCoverageMixin, SimpleTestCase):
pass
else:
class EverythingTest(mixin, InstantCoverageMixin, SimpleTestCase):
pass
try:
setup_test_environment()
except RuntimeError:
# look, this is gross, but what we're doing here to make an in-test
# fake test environment is pretty gross already, so let's just placate
# django for now:
teardown_test_environment()
setup_test_environment()
test = EverythingTest(test_name)
for attribute, value in six.iteritems(test_attributes):
setattr(test, attribute, value)
result = PickyTestResult()
if hasattr(test, '_pre_setup'):
test._pre_setup()
test.run(result)
if not result.errors == []:
# there should only ever be failures; if there's an error we should
# throw something useful
raise Exception(result.errors[0][1])
return result
class WorkingView(View):
def get(self, request, *args, **kwargs):
return HttpResponse()
class BrokenView(View):
def get(self, request, *args, **kwargs):
raise Exception('this view is broken')
|
StarcoderdataPython
|
3399899
|
import requests
import json
import re
import psycopg2.extensions
import bot.secret as secret
def reply_text(cur, reply_token, REPLY_ENDPOINT, HEADER, text, userid):
reply = ''
"""
url= secret.WCDAPI
response = requests.get(url)
tenki = json.loads(response.text)
"""
if re.match('登録 ', text):
memo = text[3:]
cur.execute("INSERT INTO touroku(userid, data) VALUES(%s, %s);", [
userid, memo])
reply += "「" + memo + '」を登録しました。'
elif re.match('削除 ', text):
memo = text[3:]
if memo == '全部' or memo == 'ぜんぶ' or memo == 'すべて' or memo == '全て':
cur.execute("DELETE FROM touroku WHERE userid=%s", [userid])
reply += "すべてのメモを削除しました。"
elif memo == '最後' or memo == 'さいご':
cur.execute("SELECT * FROM touroku WHERE userid=%s", [userid])
sakujo_taplelist = cur.fetchall()
last_memo = len(sakujo_taplelist) - 1
idz = sakujo_taplelist[last_memo][0]
reply += "「" + sakujo_taplelist[last_memo][2] + "」を削除しました。"
cur.execute("DELETE FROM touroku WHERE id=%s", [idz])
else:
memo = int(memo) - 1
cur.execute("SELECT * FROM touroku WHERE userid=%s", [userid])
sakujo_taplelist = cur.fetchall()
idz = sakujo_taplelist[memo][0]
reply += "「" + sakujo_taplelist[memo][2] + "」を削除しました。"
cur.execute("DELETE FROM touroku WHERE id=%s", [idz])
elif text == '一覧':
cur.execute("SELECT * FROM touroku WHERE userid = %s", [userid])
itiran_taplelist = cur.fetchall()
if len(itiran_taplelist) is not 0:
print(itiran_taplelist)
for i, j in enumerate(itiran_taplelist):
reply += str(i+1) + " " + j[2] + '\n'
reply = reply[:-1]
else:
reply += "何も登録されていません!"
elif re.match('おうむがえし ', text):
reply += text[7:]
elif re.match('userid', text):
reply += userid
payload = {
"replyToken": reply_token,
"messages": [
{
"type": "text",
"text": reply
}
]
}
requests.post(REPLY_ENDPOINT, headers=HEADER,
data=json.dumps(payload)) # LINEにデータを送信
return reply
|
StarcoderdataPython
|
3298573
|
"""
Testing tool to validate serialization and deserialization.
WARNING: Not for production use.
Specifically constructed to assist with json loading and dumping within the library.
As a secondary case, this displays how many python serializers/deserializers should be able to take
advantage of the dataclass usage.
"""
from dataclasses import asdict, is_dataclass
from enum import Enum
import json
import os
from pathlib import Path
from typing import Any, Callable, List, Optional, Type, TypeVar, Union, cast
from pydantic.json import pydantic_encoder
from pydantic.tools import parse_raw_as, parse_obj_as
from electionguard.group import hex_to_int, int_to_hex
T = TypeVar("T")
def construct_path(
target_file_name: str,
target_path: Optional[Path] = None,
target_file_extension="json",
) -> Path:
"""Construct path from file name, path, and extension."""
target_file = f"{target_file_name}.{target_file_extension}"
return os.path.join(target_path, target_file)
def from_raw(type_: Type[T], obj: Any) -> T:
"""Deserialize raw as type."""
obj = custom_decoder(obj)
return cast(type_, parse_raw_as(type_, obj))
def to_raw(data: Any) -> Any:
"""Serialize data to raw json format."""
return json.dumps(data, indent=4, default=custom_encoder)
def from_file_to_dataclass(dataclass_type_: Type[T], path: Union[str, Path]) -> T:
"""Deserialize file as dataclass type."""
with open(path, "r") as json_file:
data = json.load(json_file)
data = custom_decoder(data)
return parse_obj_as(dataclass_type_, data)
def from_list_in_file_to_dataclass(
dataclass_type_: Type[T], path: Union[str, Path]
) -> T:
"""Deserialize list of objects in file as dataclass type."""
with open(path, "r") as json_file:
data = json.load(json_file)
data = custom_decoder(data)
return cast(dataclass_type_, parse_obj_as(List[dataclass_type_], data))
def to_file(
data: Any,
target_file_name: str,
target_path: Optional[Path] = None,
target_file_extension="json",
) -> None:
"""Serialize object to file (defaultly json)."""
if not os.path.exists(target_path):
os.makedirs(target_path)
with open(
construct_path(target_file_name, target_path, target_file_extension), "w"
) as outfile:
json.dump(data, outfile, indent=4, default=custom_encoder)
# Color and abbreviation can both be of type hex but should not be converted
banlist = ["color", "abbreviation", "is_write_in"]
def _recursive_replace(object, type_: Type, replace: Callable[[Any], Any]):
"""Iterate through object to replace."""
if isinstance(object, dict):
for key, item in object.items():
if isinstance(item, (dict, list)):
object[key] = _recursive_replace(item, type_, replace)
if isinstance(item, type_) and key not in banlist:
object[key] = replace(item)
if isinstance(object, list):
for index, item in enumerate(object):
if isinstance(item, (dict, list)):
object[index] = _recursive_replace(item, type_, replace)
if isinstance(item, type_):
object[index] = replace(item)
return object
class NumberEncodeOption(Enum):
"""Option for encoding numbers."""
Int = "int"
Hex = "hex"
# Base64 = "base64"
OPTION = NumberEncodeOption.Hex
def _get_int_encoder() -> Callable[[Any], Any]:
if OPTION is NumberEncodeOption.Hex:
return int_to_hex
return lambda x: x
def custom_encoder(obj: Any) -> Any:
"""Integer encoder to convert int representations to type for json."""
if is_dataclass(obj):
new_dict = asdict(obj)
obj = _recursive_replace(new_dict, int, _get_int_encoder())
return obj
return pydantic_encoder(obj)
def _get_int_decoder() -> Callable[[Any], Any]:
def safe_hex_to_int(input: str) -> Union[int, str]:
try:
return hex_to_int(input)
except ValueError:
return input
if OPTION is NumberEncodeOption.Hex:
return safe_hex_to_int
return lambda x: x
def custom_decoder(obj: Any) -> Any:
"""Integer decoder to convert json stored int back to int representations."""
return _recursive_replace(obj, str, _get_int_decoder())
|
StarcoderdataPython
|
8020301
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 10 11:01:52 2018
@author: alex
"""
import mechanize
import re
import time
from selenium import webdriver
browser = webdriver.Firefox()
#mechanize can not work correctly
def login():
browser = mechanize.Browser()
browser.set_handle_robots(False)
browser.open("https://www.linkedin.com/")
browser.select_form(class_="login-form")
#browser.select_form(name="login-form")
browser["session_key"] = "your user name"
browser["session_password"] = "<PASSWORD>"
response = browser.submit()
print response.read()
#selenium can work
def selelogin(url):
browser.get(url)
loginInput = browser.find_element_by_name('session_key')
loginInput.send_keys('your user name')
passwd = browser.find_element_by_name('session_password')
passwd.send_keys('<PASSWORD>')
btn = browser.find_element_by_id('login-submit')
btn.click()
#networks()
time.sleep(3)
# can not work effectively
#networkicon = browser.find_element_by_class_name('nav-item__icon')
#networkicon = browser.find_element_by_id('mynetwork-nav-item')
#networkicon = browser.find_element_by_class_name('nav-item__link nav-item__link--underline')
networkicon = browser.find_element_by_link_text('My Network')
networkicon.click()
time.sleep(3)
seeall = browser.find_element_by_link_text('See all')
#eeall = browser.find_element_by_id('ember2962')
seeall.click()
time.sleep(3)
connects = browser.find_element_by_class_name('ember-view')
print(connects.text)
#for i in connects:
# print(i)
time.sleep(3)
#store the pagesource in a file to validata it
print(browser.page_source)
f = open('te.html','w')
f.write(browser.page_source.encode('utf-8'))
f.flush()
f.close()
#source = browser.page_source
#source = source.decode(encoding='UTF-8')
#print source
#get and analysis the items
soup = BeautifulSoup((browser.page_source).encode('utf-8'),'lxml')
#soup = BeautifulSoup(browser.page_source, 'lxml')
#print soup.find('li',class_="mn-pymk-list__card display-flex flex-column").text
for i in soup.find('li',class_="mn-pymk-list__card display-flex flex-column").text:
#print i
url = i
#connects = browser.find_element_by_class_name('pymk-card__link')
#connects.click()
#print(connects.text)
#for i in connects:
# print(i)
def networks():
browser.get('http://www.linkedin.com/feed/')
networkicon = browser.find_element_by_class_name('nav-item__link nav-item__link--underline js-nav-item-link active')
networkicon.click()
def close():
browser.close()
if __name__ == '__main__':
myurl ='https://www.linkedin.com/'
#newspider(myurl)
selelogin(myurl)
#fetchfriend(myurl)
|
StarcoderdataPython
|
1968940
|
from .baserepository import BaseRepository
from factories.customer import CustomerFactory
from connection.results.customersigninresult import CustomerSignInResult
from .actions.customer import CustomerActions
from decorators.repositories import RepositoryConnected
#from decorators.decorators import RequiredParams
from requests.exceptions import HTTPError
class CustomerRepository(BaseRepository):
_endpoint = 'customers'
_factory = CustomerFactory
_actions_module = CustomerActions
@RepositoryConnected()
def create(self, customer_draft):
return CustomerSignInResult(repository=self, **super()._create(customer_draft))
@RepositoryConnected()
def get(self, id: str = None, key: str = None, email_token: str = None, password_token: str = None):
if email_token:
return self.new(**self.client.get(path='customers/email-token=%s' % (email_token)).json())
elif password_token:
return self.new(**self.client.get(path='customers/password-token=%s' % (email_token)).json())
return super().get(id, key)
# @RequiredParams(('obj'), ('id', 'version'))
@RepositoryConnected()
def change_password(self, current_password: str, new_password: str, obj=None, id: str = None, version: int = None, force=False):
try:
if obj:
return self.new(**self.client.post(path='customers/password', json={'id': obj.id, 'version': obj.version, 'currentPassword': <PASSWORD>, 'newPassword': <PASSWORD>}).json())
if not id or not version:
raise Exception('Please, provide id and version')
return self.new(**self.client.post(path='customers/password', json={'id': id, 'version': version, 'currentPassword': <PASSWORD>, 'newPassword': <PASSWORD>}).json())
except HTTPError as error:
if force and error.response.status_code == 409:
if obj:
_obj = self.get(obj.id)
else:
_obj = self.get(id)
return self.change_password(current_password, new_password, _obj, id, _obj.version, force)
raise error
|
StarcoderdataPython
|
4911300
|
from typing import List
# O(n) time complexity and O(n) space complexity
# def backspaceCompare(self, s: str, t: str) -> bool:
# return back(s, []) == back(t, [])
#
# def back(s: str, stack: List):
# for i in s:
# if i != "#":
# stack.append(i)
# elif stack:
# stack.pop()
#
# return stack
def backspaceCompare(s, t):
p1 = len(s) - 1
p2 = len(t) - 1
while p1 >= 0 or p2 >= 0:
char1 = char2 = ""
if p1 >= 0:
char1, p1 = getChar(s, p1)
if p2 >= 0:
char2, p2 = getChar(t, p2)
if char1 != char2:
return False
return True
def getChar(s, p):
char, count = '', 0
while p >= 0 and not char:
if s[p] == '#':
count += 1
elif count == 0:
char = s[p]
else:
count -= 1
p -= 1
return char, p
|
StarcoderdataPython
|
11325429
|
<reponame>KarsSloeserwij/SimpleAStarPython
import math
class Astar():
def __init__(self, board):
self.board = board
pass
def get_distance(self, a, b):
return abs(a.x - b.x) + abs(a.y - b.y)
def retrace_path(self, start, end):
print("FOUND PATH")
path = []
currentState = end;
while(currentState != start):
path.append(currentState);
currentState = currentState.parent;
#print("Removing Parent: " + path[path.size() - 1].name);
path[len(path) - 1].parent = None;
path.reverse()
print(path)
return path;
pass;
def find_path(self, start, end):
open_set = []
closed_set = []
open_set.append(start);
while len(open_set) > 0:
current_node = open_set[0]
for i in range(len(open_set)):
if(open_set[i].f_cost() <= current_node.f_cost() or open_set[i].h_cost < current_node.h_cost):
current_node = open_set[i]
open_set.remove(current_node);
closed_set.append(current_node);
print(current_node.x, current_node.y)
print(end.x, end.y)
#current_node.checked = True
if current_node == end:
return self.retrace_path(start, end)
for neighbour in self.board.get_cell_neighbours(current_node.x , current_node.y):
if(neighbour in closed_set):
continue;
new_movement_cost = current_node.g_cost + self.get_distance(current_node, neighbour);
if new_movement_cost < neighbour.g_cost or neighbour not in open_set:
neighbour.g_cost = new_movement_cost;
neighbour.h_cost = self.get_distance(neighbour, end);
neighbour.parent = current_node;
if ( neighbour not in open_set):
open_set.append(neighbour);
|
StarcoderdataPython
|
11317134
|
from django.apps import AppConfig
class DocumentsConfig(AppConfig):
name = 'documents'
verbose_name = 'Mục Tài Liệu'
verbose_name_plural = 'Mục Tài Liệu'
|
StarcoderdataPython
|
8015142
|
import sys
import time
from os import getpid
from queue import Queue, Empty
import traceback
from _thread import allocate_lock, start_new_thread
from speedysvc.logger.std_logging.LoggerServer import LoggerServer
from speedysvc.client_server.shared_memory.SHMClient import SHMClient
from speedysvc.client_server.base_classes.ClientMethodsBase import ClientMethodsBase
from speedysvc.logger.std_logging.log_entry_types import \
NOTSET, DEBUG, INFO, ERROR, WARNING, CRITICAL, STDOUT, STDERR
# We'll use a queue here sending in a thread rather
# than synchronous logging, so as to minimise the
# risk of recursive log writes, etc
log_queue = Queue()
_old_stdout = sys.stdout
_old_stderr = sys.stderr
class LoggerClient(ClientMethodsBase):
def __init__(self, service_server_methods):
"""
A basic logger which sends stderr/stdout
output to a logging server
"""
self.lock = allocate_lock()
self.pid = getpid()
# Note that ClientMethodsBase will have a set of server methods
# associated with the log service. These are the server methods
# associated with the service itself.
self.service_server_methods = service_server_methods
self.client = SHMClient(LoggerServer, port=f'{service_server_methods.port}_log',
use_spinlock=False, use_in_process_lock=True)
ClientMethodsBase.__init__(self, client_provider=self.client)
self.stderr_logger = self._StdErrLogger(self)
self.stdout_logger = self._StdOutLogger(self)
self.__shut_me_down = False
start_new_thread(self.__log_thread, ())
def shutdown(self):
self.__shut_me_down = True
#=================================================================#
# RPC Methods #
#=================================================================#
def __log_thread(self):
"""
A lot of the time, it can be hard to know where stderr/stdout starts and ends
(e.g. print('foo', 'bar') might print foo, bar, and \n separately)
This tries to treat stdout/stderr data as a sequence of directly following
strings and merges it together, assuming they occur almost immediately after
each other (up to 0.01 seconds).
I've made stdout/stderr output as [INFO/ERROR]+9 level
"""
cur_stderr_msg = None
cur_stdout_msg = None
method_stats_last_updated = 0
while not self.__shut_me_down:
try:
if cur_stderr_msg or cur_stdout_msg:
item = log_queue.get(timeout=0.01)
else:
item = log_queue.get(timeout=2.0)
if item[-1] == STDOUT:
if cur_stdout_msg:
# Append to the previous stdout call
cur_stdout_msg[-2] += item[-2]
else:
cur_stdout_msg = list(item)
elif item[-1] == STDERR:
if cur_stderr_msg:
# Append to the previous stderr call
cur_stderr_msg[-2] += item[-2]
else:
cur_stderr_msg = list(item)
else:
self._write_to_log_(item)
except Empty:
if cur_stdout_msg:
# If Empty is raised, a timeout has occurred
# Assume this is the end of the data that's being sent to stdout
self._write_to_log_(tuple(cur_stdout_msg[:-1]+[INFO+9]))
cur_stdout_msg = None
if cur_stderr_msg:
# The same, but for stderr
self._write_to_log_(tuple(cur_stderr_msg[:-1]+[ERROR+9]))
cur_stderr_msg = None
if time.time()-method_stats_last_updated >= 4:
# Periodically inform the management server how long methods
# are taking/how many times they're being called for benchmarks
self._update_method_stats_()
method_stats_last_updated = time.time()
elif not cur_stderr_msg and not cur_stderr_msg and sys.platform == 'win32':
# win32 doesn't seem to allow for timeouts with the queue here
time.sleep(2.0)
except Exception as e:
# WARNING WARNING - should (hopefully) never get here
# I'm printing errors directly to the old stderr
# to prevent the risk of recursive exceptions
import traceback
_old_stderr.write(traceback.format_exc())
time.sleep(1)
def _write_to_log_(self, log_params):
"""
Should not be called directly!
:param log_params:
:return:
"""
self.send(LoggerServer._write_to_log_, log_params)
def _update_method_stats_(self):
"""
Send method statistics to the central management
interface to allow for benchmarks periodically
"""
DStats = {}
for name in dir(self.service_server_methods):
attr = getattr(self.service_server_methods, name)
if hasattr(attr, 'metadata'):
# DMetadata = {'num_calls': ..., 'total_time': ...}
DStats[name] = attr.metadata
self.send(LoggerServer._update_method_stats_, [self.pid, DStats])
#=========================================================#
# Service Status #
#=========================================================#
def get_service_status(self):
return self.send(LoggerServer.get_service_status, [])
def set_service_status(self, status):
return self.send(LoggerServer.set_service_status, [status])
#=========================================================#
# Service Time Series Data #
#=========================================================#
def get_last_record(self):
return self.send(LoggerServer.get_last_record, [])
def get_average_over(self, from_time, to_time):
return self.send(LoggerServer.get_average_over, [from_time, to_time])
def add_pid(self, pid):
return self.send(LoggerServer.add_pid, [pid])
def remove_pid(self, pid):
return self.send(LoggerServer.remove_pid, [pid])
def start_collecting(self):
return self.send(LoggerServer.start_collecting, [])
def stop_collecting(self):
return self.send(LoggerServer.stop_collecting, [])
#=================================================================#
# User-Callable Methods #
#=================================================================#
def __call__(self, msg, level=NOTSET):
"""
Output a message. This allows going self.log()
as shorthand for self.log.notset(msg)
Note this puts onto a log queue running in a different thread,
so as to prevent potential deadlocks when one thread tries to
write at the same time
:param msg: the string message
:param level: the log level, e.g. DEBUG or INFO
"""
pid = self.pid
#print(hasattr(self, 'server_methods'))
if hasattr(self.service_server_methods, 'port'):
port = self.service_server_methods.port
else:
port = -1
if hasattr(self.service_server_methods, 'name'):
service_name = self.service_server_methods.name
else:
service_name = '(unknown service)'
log_queue.put(
(int(time.time()), pid, port, service_name, msg, level)
)
def notset(self, msg):
"""
Output a message of whose level is not defined
:param msg: the string message
"""
self(msg, NOTSET)
def debug(self, msg):
"""
Output a debug message
:param msg: the string message
"""
self(msg, DEBUG)
def info(self, msg):
"""
Output an informational message
:param msg: the string message
"""
self(msg, INFO)
information = info
def error(self, msg):
"""
Output an error message
:param msg: the string message
"""
self(msg, ERROR)
def warn(self, msg):
"""
Output a warning message
:param msg: the string message
"""
self(msg, WARNING)
warning = warn
def critical(self, msg):
"""
Output a critical/fatal message
:param msg: the string message
"""
self(msg, CRITICAL)
#=================================================================#
# StdOut/StdErr Backwards Compatibility #
#=================================================================#
class _StdOutLogger:
def __init__(self, logger_client):
sys.stdout = self
self.logger_client = logger_client
def flush(self):
_old_stdout.flush()
def write(self, s):
_old_stdout.write(s)
self.logger_client(s, STDOUT)
class _StdErrLogger:
def __init__(self, logger_client):
sys.stderr = self
self.logger_client = logger_client
def flush(self):
_old_stderr.flush()
def write(self, s):
_old_stderr.write(s)
self.logger_client(s, STDERR)
|
StarcoderdataPython
|
3531607
|
from django.urls import path
from .views import register
app_name = 'accounts'
urlpatterns = [
path('cadastro-usuario/', register, name='register'),
]
|
StarcoderdataPython
|
4852204
|
import sys
import time
import threading
from ns4help import *
class Model:
def __init__(self, func, *args, **kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
def run(self, t_run: int):
try:
t_model = ModelFunc(self.func, *self.args, **self.kwargs)
t_htime = ModelTimeHandler(t_run)
if checksyn(*sys.argv):
t_htime.start()
t_model.start()
else:
gethelp(*sys.argv, **self.kwargs)
except IndexError as err:
errreport(err)
class ModelFunc(threading.Thread):
tRun = None
event = False
errorEvent = True
def __init__(self, func, *args, **kwargs):
threading.Thread.__init__(self)
self.func = func
self.args = args
self.kwargs = kwargs
def run(self):
if not ModelFunc.errorEvent:
# l_time = ModelFunc.tRun
s_time = time.asctime()
while not ModelFunc.event:
l_time = time.asctime()
self.func(s_time, l_time, *self.args, **self.kwargs)
# l_time -= 1
pass
class ModelTimeHandler(threading.Thread):
def __init__(self, t_run):
threading.Thread.__init__(self)
ModelFunc.tRun = t_run
def run(self):
ModelFunc.errorEvent = False
time.sleep(ModelFunc.tRun)
ModelFunc.event = True
print('Done!\n')
|
StarcoderdataPython
|
51689
|
import unittest
import prody
import numpy as np
import pytest
import itertools
from path import Path
from ..mhc_peptide import BasePDB
from ..sampling.generate_peptides import PeptideSampler
from .. import utils
from ..helpers import isolate, isolated_filesystem
@pytest.fixture()
def default_mhc():
return utils.load_gdomains_mhc('1ao7')
@pytest.fixture()
def default_pep():
return utils.load_gdomains_peptide('1ao7')
@isolate
def test_instantiate_with_seq():
sampler = PeptideSampler('ADCHTRTAC')
assert sampler.pep.numAtoms() > 10
@isolate
def test_instantiate_with_short_seq():
with pytest.raises(RuntimeError):
PeptideSampler('ADCH')
@isolate
def test_instantiate_with_long_seq():
with pytest.raises(RuntimeError):
PeptideSampler('ADCHLKKKKKKKKKKKK')
@isolate
def test_instantiate_with_wrong_letters_seq():
with pytest.raises(RuntimeError):
PeptideSampler('ADCHLBBKK')
@isolate
def test_instantiate_with_pdb():
prody.writePDB('pep.pdb', utils.load_gdomains_peptide('1ao7'))
sampler = PeptideSampler(pep='pep.pdb')
assert sampler.pep.numAtoms() > 10
@isolate
def test_instantiate_with_pep_and_mhc():
prody.writePDB('pep.pdb', utils.load_gdomains_peptide('1ao7'))
prody.writePDB('mhc.pdb', utils.load_gdomains_mhc('1ao7'))
sampler = PeptideSampler(pep='pep.pdb', rec='mhc.pdb')
assert sampler.pep.numAtoms() > 10
assert sampler.rec.numAtoms() > 100
@isolate
def test_instantiate_with_seq_and_custom_template():
prody.writePDB('template.pdb', utils.load_gdomains_peptide('1ao7'))
sampler = PeptideSampler('ADCHTRTAC', custom_template='template.pdb')
assert sampler.pep.numAtoms() > 10
@pytest.mark.parametrize('nsamples', [1, 10, 100, 1000, 15000])
def test_generate_simple(nsamples):
with isolated_filesystem():
sampler = PeptideSampler(pep=utils.load_gdomains_peptide('1ao7'))
sampler.generate_peptides(nsamples, 1, 0.3, 123)
assert sampler.brikard.numCoordsets() == nsamples
@isolate
def test_generate_with_template():
prody.writePDB('template.pdb', utils.load_gdomains_peptide('1ao7'))
sampler = PeptideSampler('ADCHTRTAC', custom_template='template.pdb')
sampler.generate_peptides(10, 1, 0.2, 123)
assert sampler.brikard.numCoordsets() == 10
@pytest.mark.parametrize('pep,rec', itertools.product(['1a1m', '1t22', '2bvo'], ['1a1m', '1t22', '2bvo']))
def test_generate_with_rec(pep, rec):
with isolated_filesystem():
sampler = PeptideSampler(pep=utils.load_gdomains_peptide(pep), rec=utils.load_gdomains_mhc(rec))
sampler.generate_peptides(10, 1, 0.2, 123)
assert sampler.brikard.numCoordsets() == 10
# check that receptor is fixed by default during sampling
def test_generate_receptor_fixed(default_mhc, default_pep):
with isolated_filesystem():
sampler = PeptideSampler(pep=default_pep, rec=default_mhc)
sampler.generate_peptides(10, 1, 0.2, 123)
assert sampler.brikard.numCoordsets() == 10
rec_fixed = sampler.brikard.select('chain A')
assert np.all(rec_fixed.getCoordsets(0) == rec_fixed.getCoordsets(1))
# check that receptor is flexible with sample_resi_within parameter set
def test_generate_receptor_flexible(default_mhc, default_pep):
with isolated_filesystem():
sampler = PeptideSampler(pep=default_pep, rec=default_mhc)
sampler.generate_peptides(10, 1, 0.2, 123, sample_resi_within=7)
assert sampler.brikard.numCoordsets() == 10
rec_flex = sampler.brikard.select('chain A')
assert np.any(rec_flex.getCoordsets(0) != rec_flex.getCoordsets(1))
@pytest.mark.parametrize('radius', range(1, 7, 2))
def test_generate_receptor_variable_radius(default_mhc, default_pep, radius):
with isolated_filesystem():
sampler = PeptideSampler(pep=default_pep, rec=default_mhc)
sampler.generate_peptides(10, 1, 0.2, 123, sample_resi_within=radius)
assert sampler.brikard.numCoordsets() == 10
|
StarcoderdataPython
|
80184
|
import discord
from discord.ext import commands
import octorest
class OctoPrint(commands.Cog):
def __init__(self, bot: commands.AutoShardedBot):
self.bot = bot
#bot.loop.create_task(self.connect_printer())
async def connect_printer(self):
await self.bot.wait_until_ready()
try:
client = octorest.OctoRest(url="127.0.0.1:5000", apikey=apikey)
return client
except ConnectionError as ex:
# Handle exception as you wish
print(ex)
|
StarcoderdataPython
|
3238076
|
#!/usr/bin/env python
'''
This script starts all Turtlebot control services
which are defined under `srv/` folder.
The key setence is:
turtle_services = TurtlebotControlRosServices()
turtle_services.start()
WARNING: `SetPose` is not supported for real robot, only for Gazebo simulation.
'''
from ros_turtlebot_control.srv import GetPose, GetPoseResponse
from ros_turtlebot_control.srv import MoveToPoint, MoveToPointResponse
from ros_turtlebot_control.srv import MoveToPose, MoveToPoseResponse
from ros_turtlebot_control.srv import MoveToRelativePoint, MoveToRelativePointResponse
from ros_turtlebot_control.srv import MoveToRelativePose, MoveToRelativePoseResponse
from ros_turtlebot_control.srv import ResetPose, ResetPoseResponse
from ros_turtlebot_control.srv import SetPose, SetPoseResponse
from ros_turtlebot_control.srv import StopMoving, StopMovingResponse
from ros_turtlebot_control.srv import IsMoving, IsMovingResponse
import rospy
import threading
import yaml
import os
import sys
from turtle_lib import Turtle
from utils.commons import read_yaml_file
ROOT = os.path.dirname(os.path.abspath(__file__))+"/"
CONFIG_FILEPATH = ROOT + "config.yaml"
NODE_NAME = 'run_turtlebot_control_server'
SRV_NAMESPACE, turtle = None, None # To be initialized later.
# ============================================================================== #
def _srv_callback_wrapper(callback_func):
''' Print messages before and after the callback function. '''
def new_callback_func(self, req):
'''Argument: `req` is the input of the ROS service call. '''
rospy.loginfo("Service: " + self._srv_name +
": Receive request: {}".format(req))
response = callback_func(self, req)
rospy.loginfo("Service: " + self._srv_name +
": Request has been sent to turtlebot_lib.py!")
return response
return new_callback_func
class _SrvTemplate(object):
''' A template for creating ROS service. '''
def __init__(self, srv_name,
srv_in_type,
srv_out_type):
if SRV_NAMESPACE:
srv_name = SRV_NAMESPACE + "/" + srv_name # Add name space
self._srv = rospy.Service(
srv_name, srv_in_type, self._callback)
rospy.loginfo(" Service starts: " + srv_name)
self._srv_name = srv_name
self._srv_in_type = srv_in_type
self._srv_out_type = srv_out_type
def _callback(self, req):
raise NotImplementedError("Please overload this function!")
# ============================================================================== #
class TurtlebotControlRosServices(object):
def __init__(self):
self._is_start = False
def start(self):
self._h1 = TurtlebotControlRosServices.ServiceMoveToPoint()
self._h2 = TurtlebotControlRosServices.ServiceMoveToPose()
self._h3 = TurtlebotControlRosServices.ServiceMoveToRelativePoint()
self._h4 = TurtlebotControlRosServices.ServiceMoveToRelativePose()
self._h5 = TurtlebotControlRosServices.ServiceStopMoving()
self._h6 = TurtlebotControlRosServices.ServiceSetPose()
self._h7 = TurtlebotControlRosServices.ServiceResetPose()
self._h8 = TurtlebotControlRosServices.ServiceGetPose()
self._h9 = TurtlebotControlRosServices.ServiceIsMoving()
self._is_start = True
def __del__(self):
if self._is_start:
turtle.stop_moving()
class ServiceMoveToPoint(_SrvTemplate):
def __init__(self):
super(TurtlebotControlRosServices.ServiceMoveToPoint, self).__init__(
srv_name='move_to_point',
srv_in_type=MoveToPoint,
srv_out_type=MoveToPointResponse,
)
def _callback(self, req):
turtle.move_to_pose(x_goal_w=req.x,
y_goal_w=req.y)
return self._srv_out_type()
class ServiceMoveToPose(_SrvTemplate):
def __init__(self):
super(TurtlebotControlRosServices.ServiceMoveToPose, self).__init__(
srv_name='move_to_pose',
srv_in_type=MoveToPose,
srv_out_type=MoveToPoseResponse,
)
@_srv_callback_wrapper
def _callback(self, req):
turtle.move_to_pose(x_goal_w=req.x,
y_goal_w=req.y,
theta_goal_w=req.theta)
return self._srv_out_type()
class ServiceMoveToRelativePoint(_SrvTemplate):
def __init__(self):
super(TurtlebotControlRosServices.ServiceMoveToRelativePoint, self).__init__(
srv_name='move_to_relative_point',
srv_in_type=MoveToRelativePoint,
srv_out_type=MoveToRelativePointResponse,
)
@_srv_callback_wrapper
def _callback(self, req):
turtle.move_to_relative_pose(
x_goal_r=req.x,
y_goal_r=req.y)
return self._srv_out_type()
class ServiceMoveToRelativePose(_SrvTemplate):
def __init__(self):
super(TurtlebotControlRosServices.ServiceMoveToRelativePose, self).__init__(
srv_name='move_to_relative_pose',
srv_in_type=MoveToRelativePose,
srv_out_type=MoveToRelativePoseResponse,
)
@_srv_callback_wrapper
def _callback(self, req):
turtle.move_to_relative_pose(
x_goal_r=req.x,
y_goal_r=req.y,
theta_goal_r=req.theta)
return self._srv_out_type()
class ServiceStopMoving(_SrvTemplate):
def __init__(self):
super(TurtlebotControlRosServices.ServiceStopMoving, self).__init__(
srv_name='stop_moving',
srv_in_type=StopMoving,
srv_out_type=StopMovingResponse,
)
@_srv_callback_wrapper
def _callback(self, req):
turtle.stop_moving()
return self._srv_out_type()
class ServiceSetPose(_SrvTemplate):
def __init__(self):
super(TurtlebotControlRosServices.ServiceSetPose, self).__init__(
srv_name='set_pose',
srv_in_type=SetPose,
srv_out_type=SetPoseResponse,
)
@_srv_callback_wrapper
def _callback(self, req):
turtle.set_pose(req.x, req.y, req.theta)
return self._srv_out_type()
class ServiceResetPose(_SrvTemplate):
def __init__(self):
super(TurtlebotControlRosServices.ServiceResetPose, self).__init__(
srv_name='reset_pose',
srv_in_type=ResetPose,
srv_out_type=ResetPoseResponse,
)
@_srv_callback_wrapper
def _callback(self, req):
turtle.reset_pose()
return self._srv_out_type()
class ServiceGetPose(_SrvTemplate):
def __init__(self):
super(TurtlebotControlRosServices.ServiceGetPose, self).__init__(
srv_name='get_pose',
srv_in_type=GetPose,
srv_out_type=GetPoseResponse,
)
def _callback(self, req):
res = GetPoseResponse()
x, y, theta = turtle.get_pose()
return GetPoseResponse(x, y, theta)
class ServiceIsMoving(_SrvTemplate):
def __init__(self):
super(TurtlebotControlRosServices.ServiceIsMoving, self).__init__(
srv_name='is_moving',
srv_in_type=IsMoving,
srv_out_type=IsMovingResponse,
)
def _callback(self, req):
is_moving = turtle.is_moving()
return IsMovingResponse(is_moving)
def main():
rospy.init_node(NODE_NAME)
rospy.loginfo("Node starts: " + NODE_NAME)
# Define global variables.
global turtle, SRV_NAMESPACE
turtle = Turtle(CONFIG_FILEPATH)
SRV_NAMESPACE = read_yaml_file(CONFIG_FILEPATH)["srv_namespace"]
# ROS Node deconstructor.
rospy.on_shutdown(lambda: turtle.stop_moving())
# Start ROS services.
turtle_services = TurtlebotControlRosServices()
turtle_services.start()
# Loop.
rospy.spin()
rospy.loginfo("Node stops: " + NODE_NAME)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
11222390
|
<filename>scriptbase/disk.py
# Copyright 2016-19 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Disk-related utilities for Unix-like systems.
Some functions deal with MacOS differences.
Some are only implemented for MacOS for now.
"""
import sys
import os
import re
import subprocess
from glob import glob
from decimal import Decimal
from . import command
from . import console
from . import shell
from . import utility
RE_MOUNT = re.compile('^(/[a-z0-9_/]+) on (/[a-z0-9_/ ]+)( [(][^)]*[)])?', re.IGNORECASE)
def iter_mounted_volumes():
"""Iterate mounted volume paths."""
with command.Command('mount') as cmd:
for line in cmd:
matched = RE_MOUNT.match(line)
if matched:
yield matched.group(2), matched.group(1)
def mounts_check(*mountpoints):
"""Return True if all passed mount points have mounted volumes."""
mounted = dict(iter_mounted_volumes())
for mountpoint in mountpoints:
if mountpoint not in mounted:
return False
return True
def _get_version(path):
try:
return int(os.path.splitext(path)[0].split('-')[-1])
except ValueError:
return -1
def get_versioned_path(path, suffix):
"""Convert path to versioned path by adding suffix and counter when necessary."""
(base, ext) = os.path.splitext(path)
re_strip_version = re.compile('(.*)-%s(-[0-9]*)?' % suffix)
matched = re_strip_version.match(base)
if matched:
base = matched.group(1)
path = '%s-%s%s' % (base, suffix, ext)
if not os.path.exists(path):
return path
max_version = 1
for chk in glob('%s-%s-[0-9]*%s' % (base, suffix, ext)):
version = _get_version(chk)
if version > max_version:
max_version = version
suffix2 = '%s-%d' % (suffix, max_version + 1)
return '%s-%s%s' % (base, suffix2, ext)
def purge_versions(path, suffix, num_keep, reverse=False):
"""
Purge file versions created by get_versioned_path.
Purge specified quantity in normal or reverse sequence.
"""
(base, ext) = os.path.splitext(path)
re_strip_version = re.compile('(.*)-%s(-[0-9]*)?' % suffix)
matched = re_strip_version.match(base)
if matched:
base = matched.group(1)
versions = [version for version in glob('%s-%s*%s' % (base, suffix, ext))]
versions.sort(key=_get_version, reverse=reverse)
num_purge = len(versions) - num_keep
if num_purge > len(versions):
num_purge = 0
if num_purge > 0:
for version_path in versions[:num_purge]:
os.remove(version_path)
return num_purge
class DiskVolume(utility.DumpableObject):
"""Data for a disk volume."""
unit_labels = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB']
def __init__(
self,
volume_dev,
disk_dev,
raw_disk_dev,
filesystem,
size,
name,
uuid,
mountpoint):
self.volume_dev = volume_dev
self.disk_dev = disk_dev
self.raw_disk_dev = raw_disk_dev
self.filesystem = filesystem
self.size = int(size)
self.name = name
self.uuid = uuid
self.mountpoint = mountpoint
utility.DumpableObject.__init__(self)
@classmethod
def format_disk_size(cls, size, places=2):
"""Return adjusted size string with unit."""
threshold = 1000 ** (len(cls.unit_labels) - 1)
for i in range(len(cls.unit_labels) - 1, 0, -1):
if size >= threshold:
value_str = str(Decimal(size) / threshold)
dec_pos = value_str.find('.')
if dec_pos == -1:
return '{}.00 {}'.format(value_str, cls.unit_labels[i])
value_places = len(value_str) - dec_pos - 1
if value_places < places:
zeros = '0' * (places - value_places)
return '{}{} {}'.format(value_str, zeros, cls.unit_labels[i])
if value_places > places:
return '{} {}'.format(value_str[:(places - value_places)], cls.unit_labels[i])
return (value_str, cls.unit_labels[i])
threshold //= 1000
return '{} {}'.format(size, cls.unit_labels[0])
def short_summary(self):
"""Short summary string to for user consumption."""
return 'label: {label}, disk: {disk}, volume: {volume}, size: {size}'.format(
label=self.name,
disk=self.disk_dev,
volume=self.volume_dev,
size=self.format_disk_size(self.size),
)
FILESYSTEM_NAME_TRANSLATIONS_1 = {
'Apple_APFS': 'APFS',
'Apple_HFS': 'HFS',
'EFI': 'EFI',
'Windows_FAT_32': 'FAT32',
}
FILESYSTEM_NAME_TRANSLATIONS_2 = {
'Windows_NTFS': 'NTFS',
'UFSD_NTFS': 'NTFS',
'Journaled HFS+': 'HFS+',
}
def volumes_list():
"""Provide data for currently visible volumes."""
if sys.platform != 'darwin':
console.abort('Currently, volumes_list() is only implemented for MacOS')
import plistlib
volumes = []
proc = subprocess.run(['diskutil', 'list', '-plist', 'physical'],
capture_output=True, check=True)
list_data = plistlib.loads(proc.stdout)
for disk_or_partition in list_data['AllDisksAndPartitions']:
for volume in disk_or_partition.get('Partitions', []):
# Assume that "useful" user volumes have UUIDs.
uuid = volume.get('VolumeUUID')
if uuid:
filesystem = FILESYSTEM_NAME_TRANSLATIONS_1.get(volume.get('Content'))
if not filesystem:
proc2 = subprocess.run(['diskutil', 'info', '-plist', uuid],
capture_output=True, check=True)
info_data = plistlib.loads(proc2.stdout)
filesystem = info_data['FilesystemName']
if filesystem in FILESYSTEM_NAME_TRANSLATIONS_2:
filesystem = FILESYSTEM_NAME_TRANSLATIONS_2[filesystem]
volumes.append(DiskVolume(
'/dev/{}'.format(volume.get('DeviceIdentifier')),
'/dev/{}'.format(disk_or_partition['DeviceIdentifier']),
'/dev/r{}'.format(disk_or_partition['DeviceIdentifier']),
filesystem,
volume.get('Size'),
volume.get('VolumeName', '(unnamed)'),
uuid,
volume.get('MountPoint'),
))
return volumes
def volume_unmount(volume):
"""Unmount a volume based on a mountpoint."""
if sys.platform != 'darwin':
console.abort('Currently, volume_unmount() is only implemented for MacOS')
subprocess.run(['diskutil', 'unmount', volume.mountpoint], check=True)
def volumes_for_identifier(identifier):
"""Find volume by volume name, mountpoint, UUID, or device name."""
return [
volume
for volume in volumes_list()
if identifier in [
volume.name,
volume.mountpoint,
volume.uuid,
volume.disk_dev,
]
]
def volume_for_identifier(identifier):
"""Find exactly one volume by identifier (see volumes_for_identifier())."""
volumes = volumes_for_identifier(identifier)
if not volumes:
console.abort('No volume "{}" was found.'.format(identifier))
if len(volumes) != 1:
console.abort('There are {} volumes for "{}".'.format(len(volumes), identifier))
return volumes[0]
class Compressor:
"""Compressor data."""
def __init__(self, name, uncompress_cmd, *compress_cmds):
self.name = name
self.uncompress_cmd = uncompress_cmd
self.compress_cmds = compress_cmds
def get_compress_command(self):
"""Check for and return compress command."""
progs = []
cmd = None
for compress_cmd in self.compress_cmds:
prog = compress_cmd.split()[0]
if shell.find_executable(prog):
cmd = compress_cmd
break
progs.append(prog)
else:
console.abort('Unable to find {} compression program: {}'
.format(self.name, ' '.join(progs)))
return cmd
def get_expand_command(self):
"""Check for and return expansion command."""
prog = self.uncompress_cmd.split()[0]
if not shell.find_executable(prog):
console.abort('Unable to find {} expansion program: {}'.format(self.name, prog))
return self.uncompress_cmd
class Compressors:
"""Access compression/expansion commands."""
compressors = [
Compressor('gzip', 'gzcat', 'pigz -c -f -', 'gzip -c -f -'),
Compressor('xz', 'xzcat', 'xz -c -T0 -f -'),
]
@classmethod
def get_compressor(cls, name):
"""Return an appropriate compressor, if available."""
compressor = None
for check_compressor in cls.compressors:
if check_compressor.name == name:
compressor = check_compressor
break
else:
console.abort('No {} compressor found.'.format(name))
return compressor
@classmethod
def get_compress_command(cls, name):
"""Return compression command, if available."""
compressor = cls.get_compressor(name)
return compressor.get_compress_command()
@classmethod
def get_expand_command(cls, name):
"""Return expansion command, if available."""
compressor = cls.get_compressor(name)
return compressor.get_expand_command()
def backup_device(device_path, output_path, compression=None): #pylint: disable=unused-argument
"""Copy input device to gzip-compressed output file."""
ctx = utility.DictObject(**locals())
if compression:
ctx.compress_cmd = Compressors.get_compress_command(compression)
ctx.compress_prog = ctx.compress_cmd.split()[0]
cmd = 'sudo dd if={device_path} bs=1M | {compress_cmd} > "{output_path}"'
msg = 'Reading device with dd and writing image with {compress_prog}.'
else:
cmd = 'sudo dd if={device_path} of="{output_path}" bs=1M'
msg = 'Reading device and writing image with dd.'
console.info([ctx.format(msg), 'Press CTRL-T for status.'])
cmd = ctx.format(cmd)
console.info(cmd)
ctx.retcode = os.system(cmd)
if ctx.retcode != 0:
console.abort(ctx.format('Image restore command failed with return code {retcode}.'))
def restore_device(device_path, input_path, compression=None): #pylint: disable=unused-argument
"""Uncompress input file and copy to output device."""
ctx = utility.DictObject(**locals())
if compression:
ctx.expand_cmd = Compressors.get_expand_command(compression)
msg = ('Uncompressing image file with {} and writing to device with dd.'
.format(ctx.expand_cmd))
cmd = ctx.format('{expand_cmd} "{input_path}" | sudo dd of={device_path} bs=64K')
else:
msg = 'Reading from image file and writing to device with dd.'
cmd = ctx.format('sudo dd if="{input_path}" of={device_path} bs=1M')
console.info([msg, 'Press CTRL-T for status.'])
console.info(cmd)
ctx.retcode = os.system(cmd)
if ctx.retcode != 0:
console.abort(ctx.format('Image restore command failed with return code {retcode}.'))
|
StarcoderdataPython
|
1990083
|
<reponame>swreinehr/katana
def test_import_applications_property_graph():
import galois.lonestar.analytics.bfs
import galois.lonestar.analytics.jaccard
import galois.lonestar.analytics.pagerank
import galois.lonestar.analytics.connected_components
import galois.lonestar.analytics.kcore
def test_import_loops():
import galois.loops
def test_import_property_graph():
import galois.property_graph
def test_import_graph():
import galois.graphs
def test_import_datastructures():
import galois.datastructures
def test_import_atomic():
import galois.atomic
def test_import_numba():
import galois.numba_support.pyarrow
import galois.numba_support.galois
|
StarcoderdataPython
|
9755208
|
from datetime import datetime
from bson import json_util
from mongoengine import StringField, connect, Document, DateField, ReferenceField, ListField, QuerySet
connect("article")
class CustomQuerySet(QuerySet):
def to_json(self):
return "[%s]" % (",".join([doc.to_json() for doc in self]))
class Tag(Document):
name = StringField(max_length=50, unique=True)
color = StringField(max_length=10, unique=False, default="#007bff")
class Article(Document):
title = StringField(required=True, max_length=200, unique=True)
author = StringField(required=True, max_length=25)
body = StringField(required=True)
summary = StringField(required=True)
date = DateField(required=True, default=datetime.utcnow)
tags = ListField(ReferenceField(Tag))
meta = {'queryset_class': CustomQuerySet}
def to_json(self):
data = self.to_mongo()
data["tags"] = [{'name': tag.name, 'color': tag.color} for tag in self.tags]
return json_util.dumps(data)
|
StarcoderdataPython
|
8033903
|
<reponame>opensciencegrid/network_analytics
import threading
class ConnectionListener(object):
"""
This class should be used as a base class for objects registered
using Connection.set_listener().
"""
def on_connecting(self, host_and_port):
"""
Called by the STOMP connection once a TCP/IP connection to the
STOMP server has been established or re-established. Note that
at this point, no connection has been established on the STOMP
protocol level. For this, you need to invoke the "connect"
method on the connection.
\param host_and_port a tuple containing the host name and port
number to which the connection has been established.
"""
pass
def on_connected(self, headers, body):
"""
Called by the STOMP connection when a CONNECTED frame is
received, that is after a connection has been established or
re-established.
\param headers a dictionary containing all headers sent by the
server as key/value pairs.
\param body the frame's payload. This is usually empty for
CONNECTED frames.
"""
pass
def on_disconnected(self):
"""
Called by the STOMP connection when a TCP/IP connection to the
STOMP server has been lost. No messages should be sent via
the connection until it has been reestablished.
"""
pass
def on_heartbeat_timeout(self):
"""
Called by the STOMP connection when a heartbeat message has not been
received beyond the specified period.
"""
pass
def on_message(self, headers, body):
"""
Called by the STOMP connection when a MESSAGE frame is
received.
\param headers a dictionary containing all headers sent by the
server as key/value pairs.
\param body the frame's payload - the message body.
"""
pass
def on_receipt(self, headers, body):
"""
Called by the STOMP connection when a RECEIPT frame is
received, sent by the server if requested by the client using
the 'receipt' header.
\param headers a dictionary containing all headers sent by the
server as key/value pairs.
\param body the frame's payload. This is usually empty for
RECEIPT frames.
"""
pass
def on_error(self, headers, body):
"""
Called by the STOMP connection when an ERROR frame is
received.
\param headers a dictionary containing all headers sent by the
server as key/value pairs.
\param body the frame's payload - usually a detailed error
description.
"""
pass
def on_send(self, headers, body):
"""
Called by the STOMP connection when it is in the process of sending a message
\param headers a dictionary containing the headers that will be sent with this message
\param body the message payload
"""
pass
class WaitingListener(ConnectionListener):
"""
A listener which waits for a specific receipt to arrive
"""
def __init__(self, receipt):
self.condition = threading.Condition()
self.receipt = receipt
self.received = False
def on_receipt(self, headers, body):
if 'receipt-id' in headers and headers['receipt-id'] == self.receipt:
self.condition.acquire()
self.received = True
self.condition.notify()
self.condition.release()
def wait_on_receipt(self):
self.condition.acquire()
while not self.received:
self.condition.wait()
self.condition.release()
class StatsListener(ConnectionListener):
"""
A connection listener for recording statistics on messages sent and received.
"""
def __init__(self):
self.errors = 0
self.connections = 0
self.messages_recd = 0
self.messages_sent = 0
def on_error(self, headers, message):
"""
\see ConnectionListener::on_error
"""
self.errors += 1
def on_connecting(self, host_and_port):
"""
\see ConnectionListener::on_connecting
"""
self.connections += 1
def on_message(self, headers, message):
"""
\see ConnectionListener::on_message
"""
self.messages_recd += 1
def on_send(self, headers, message):
"""
\see ConnectionListener::on_send
"""
self.messages_sent += 1
def __str__(self):
"""
Return a string containing the current statistics (messages sent and received,
errors, etc)
"""
return '''Connections: %s
Messages sent: %s
Messages received: %s
Errors: %s''' % (self.connections, self.messages_sent, self.messages_recd, self.errors)
|
StarcoderdataPython
|
11381200
|
"""
Data related to configuration
"""
import copy
import pkg_resources
import asdf
def get_defaults():
filename = pkg_resources.resource_filename(
'pydrad',
'configure/data/defaults.asdf',
)
with asdf.open(filename) as af:
return copy.deepcopy(dict(af.tree))
|
StarcoderdataPython
|
369293
|
<gh_stars>1-10
{
"targets": [
{
"target_name": "addon",
"sources": [ "src/logql.cc" ],
"libraries": [ "<!(pwd)/logql.so" ]
}
]
}
|
StarcoderdataPython
|
5018171
|
<reponame>caoyukun0430/Computer-Networking-A-Top-Down-Approach-NOTES
from socket import *
import os
import sys
import struct
import time
import select
import binascii
ICMP_ECHO_REQUEST = 8
ICMP_ECHO_REPLY = 0
PING_NUMBER = 4
def checksum(str):
csum = 0
countTo = (len(str) / 2) * 2
count = 0
while count < countTo:
thisVal = str[count+1] * 256 + str[count]
csum = csum + thisVal
csum = csum & 0xffffffff
count = count + 2
if countTo < len(str):
csum = csum + str[len(str) - 1].decode()
csum = csum & 0xffffffff
csum = (csum >> 16) + (csum & 0xffff)
csum = csum + (csum >> 16)
answer = ~csum
answer = answer & 0xffff
answer = answer >> 8 | (answer << 8 & 0xff00)
return answer
def receiveOnePing(mySocket, ID, timeout, destAddr):
timeLeft = timeout
while 1:
startedSelect = time.time()
whatReady = select.select([mySocket], [], [], timeLeft)
howLongInSelect = (time.time() - startedSelect)
if whatReady[0] == []: # Timeout
return None
timeReceived = time.time()
recPacket, addr = mySocket.recvfrom(1024)
#Fetch the ICMP header from the IP packet
# ICMP is in the 20 to 28 byte of the header
header = recPacket[20: 28]
type, code, checksum, packetid, sequence = struct.unpack("bbHHh", header)
# Type and code must be set to 0.
# identifier should be same for request and reply
# dstaddr should match
if addr[0] == str(destAddr) and type == ICMP_ECHO_REPLY and code == 0 and packetid == ID:
# calculate the data size
byte_in_double = struct.calcsize("d")
timeSent = struct.unpack("d", recPacket[28: 28 + byte_in_double])[0]
rtt = timeReceived - timeSent
# TTL is in header 8-9 byte, has format recPacket[8:9] (b'-',)
# ASCII characters - is the TTL
ttl = ord(struct.unpack("c", recPacket[8: 9])[0].decode())
return (byte_in_double, rtt, ttl)
timeLeft = timeLeft - howLongInSelect
if timeLeft <= 0:
return None
def sendOnePing(mySocket, destAddr, ID):
# Header is type (8), code (8), checksum (16), id (16), sequence (16)
myChecksum = 0
# Make a dummy header with a 0 checksum.
# struct -- Interpret strings as packed binary data, "bbHHh" is format
header = struct.pack("bbHHh", ICMP_ECHO_REQUEST, 0, myChecksum, ID, 1)
data = struct.pack("d", time.time())
# Calculate the checksum on the data and the dummy header.
myChecksum = checksum(header + data)
# Get the right checksum, and put in the header
# htons() function converts a 16 bit positive integer from host byte order to network byte order.
# https://pythontic.com/modules/socket/byteordering-coversion-functions
# so we can either use htons() or pack using (struct.pack("!bbHHh")
if sys.platform == 'darwin':
myChecksum = htons(myChecksum) & 0xffff
#Convert 16-bit integers from host to network byte order.
else:
myChecksum = htons(myChecksum)
header = struct.pack("bbHHh", ICMP_ECHO_REQUEST, 0, myChecksum, ID, 1)
packet = header + data
mySocket.sendto(packet, (destAddr, 1)) # AF_INET address must be tuple, not str
#Both LISTS and TUPLES consist of a number of objects
#which can be referenced by their position number within the object
def doOnePing(destAddr, timeout):
icmp = getprotobyname("icmp")
#SOCK_RAW is a powerful socket type. For more details see: http://sock-raw.org/papers/sock_raw
#Create Socket here
# mySocket = socket(AF_INET, SOCK_RAW, IPPROTO_ICMP)
mySocket = socket(AF_INET, SOCK_RAW, icmp)
myID = os.getpid() & 0xFFFF #Return the current process i
sendOnePing(mySocket, destAddr, myID)
res = receiveOnePing(mySocket, myID, timeout, destAddr)
mySocket.close()
return res
def ping(host, timeout=1):
#timeout=1 means: If one second goes by without a reply from the server,
#the client assumes that either the client’s ping or the server’s pong is lost
dest = gethostbyname(host)
print("Pinging " + dest + " using Python:")
print("")
loss = 0
rtt_arr = []
#Send ping requests to a server separated by approximately one second
# while 1 :
# instead of keeping pinging, we run default PING_NUMBER=4 times
for i in range(0, PING_NUMBER):
res = doOnePing(dest, timeout)
if not res:
print("Request timed out.")
loss += 1
else:
byte_in_double = res[0]
rtt = int(res[1]*1000)
rtt_arr.append(rtt)
ttl = res[2]
print("Received from %s: byte(s) = %d delay = %dms TTL = %d" % (dest, byte_in_double, rtt, ttl))
time.sleep(1)# one second
print("Packet: sent = %d received = %d lost = %d (%.0f%%)" % (PING_NUMBER, PING_NUMBER - loss, loss, loss/PING_NUMBER*100))
print("Round Trip Time (rtt): min = %dms max = %dms avg = %dms" % (min(rtt_arr), max(rtt_arr), int(sum(rtt_arr)/len(rtt_arr))))
return
ping("www.google.com")
|
StarcoderdataPython
|
1964926
|
import json
import math
from elasticsearch import ConnectionError, NotFoundError
import falcon
from reach.web.views import template
def _get_pages(current_page, last_page):
"""Return a list of pages to be used in the rendered template from the
last page number."""
pages = []
if current_page > 3:
pages.append(1)
if current_page > 4:
pages.append('...')
pages.extend(
range(
max(current_page - 2, 1), min(current_page + 3, last_page)
)
)
if current_page < last_page - 3:
pages.append('...')
if last_page not in pages:
pages.append(last_page)
return pages
def _search_es(es, params, explain=False):
"""Run a search on the elasticsearch database.
Args:
es: An Elasticsearch active connection.
params: The request's parameters. Shoud include 'term' and at
least a field ([text|title|organisation]).
explain: A boolean to enable|disable elasticsearch's explain.
Returns:
True|False: The search success status
es.search()|str: A dict containing the result of the search if it
succeeded or a string explaining why it failed
"""
try:
fields = params.get('fields', []).split(',')
page = params.get('page', 1)
size = params.get('size', 50)
es.cluster.health(wait_for_status='yellow')
es_body = {
'from': (page - 1) * size,
'size': size,
'query': {
'multi_match': {
'query': params.get('term'),
'type': "best_fields",
'fields': ['.'.join(['doc', f]) for f in fields]
}
}
}
return True, es.search(
index='policy-test-docs',
body=json.dumps(es_body),
explain=explain
)
except ConnectionError:
message = 'Could not join the elasticsearch server.'
raise falcon.HTTPServiceUnavailable(description=message)
except NotFoundError:
message = 'No results found.'
return False, {'message': message}
except Exception as e:
raise falcon.HTTPError(description=str(e))
class FulltextApi:
"""Let you search for terms in publications fulltexts. Returns a json.
Args:
es: An elasticsearch connection
es_explain: A boolean to enable|disable elasticsearch's explain.
"""
def __init__(self, es, es_explain):
self.es = es
self.es_explain = es_explain
def on_get(self, req, resp):
"""Returns the result of a search on the elasticsearch cluster.
Args:
req: The request passed to this controller
resp: The reponse object to be returned
"""
if req.params:
status, response = _search_es(self.es, req.params, self.es_explain)
if status:
response['status'] = 'success'
resp.body = json.dumps(response)
else:
resp.body = json.dumps({
'status': 'error',
'message': response
})
else:
resp.body = json.dumps({
'status': 'error',
'message': "The request doesn't contain any parameters"
})
resp.status = falcon.HTTP_400
class FulltextPage(template.TemplateResource):
"""Let you search for terms in publications fulltexts. Returns a web page.
Args:
es: An elasticsearch connection
es_explain: A boolean to enable|disable elasticsearch's explain.
"""
def __init__(self, template_dir, es, es_explain, context=None):
self.es = es
self.es_explain = es_explain
super(FulltextPage, self).__init__(template_dir, context)
def on_get(self, req, resp):
if req.params:
params = {
"term": req.params.get('term', ''), # es returns none on empty
"fields": "text,organisation", # search_es is expects a str
"page": int(req.params.get('page', 1)),
"size": int(req.params.get('size', 50)),
}
status, response = _search_es(self.es, params, True)
self.context['es_response'] = response
self.context['es_status'] = status
if (not status) or (response.get('message')):
self.context.update(params)
super(FulltextPage, self).render_template(
resp,
'/results/policy-docs',
)
return
self.context['pages'] = _get_pages(
params['page'],
math.ceil(
float(response['hits']['total']['value']) / params['size'])
)
self.context.update(params)
super(FulltextPage, self).render_template(
resp,
'/results/policy-docs',
)
else:
super(FulltextPage, self).on_get(req, resp)
|
StarcoderdataPython
|
1708136
|
<filename>dataset.py
import time
import torch
import numpy as np
import pandas as pd
import scipy
from h5py import File
import itertools, random
from tqdm import tqdm
from loguru import logger
import torch.utils.data as tdata
from typing import List, Dict
class TrainHDF5Dataset(tdata.Dataset):
"""
HDF5 dataset indexed by a labels dataframe.
Indexing is done via the dataframe since we want to preserve some storage
in cases where oversampling is needed ( pretty likely )
"""
def __init__(self,
h5filedict: Dict,
h5labeldict: Dict,
label_type='soft',
transform=None):
super(TrainHDF5Dataset, self).__init__()
self._h5filedict = h5filedict
self._h5labeldict = h5labeldict
self._datasetcache = {}
self._labelcache = {}
self._len = len(self._h5labeldict)
# IF none is passed still use no transform at all
self._transform = transform
assert label_type in ('soft', 'hard', 'softhard', 'hardnoise')
self._label_type = label_type
self.idx_to_item = {
idx: item
for idx, item in enumerate(self._h5labeldict.keys())
}
first_item = next(iter(self._h5filedict.keys()))
with File(self._h5filedict[first_item], 'r') as store:
self.datadim = store[first_item].shape[-1]
def __len__(self):
return self._len
def __del__(self):
for k, cache in self._datasetcache.items():
cache.close()
for k, cache in self._labelcache.items():
cache.close()
def __getitem__(self, index: int):
fname: str = self.idx_to_item[index]
h5file: str = self._h5filedict[fname]
labelh5file: str = self._h5labeldict[fname]
if not h5file in self._datasetcache:
self._datasetcache[h5file] = File(h5file, 'r')
if not labelh5file in self._labelcache:
self._labelcache[labelh5file] = File(labelh5file, 'r')
data = self._datasetcache[h5file][f"{fname}"][()]
speech_target = self._labelcache[labelh5file][f"{fname}/speech"][()]
noise_target = self._labelcache[labelh5file][f"{fname}/noise"][()]
speech_clip_target = self._labelcache[labelh5file][
f"{fname}/clipspeech"][()]
noise_clip_target = self._labelcache[labelh5file][
f"{fname}/clipnoise"][()]
noise_clip_target = np.max(noise_clip_target) # take max around axis
if self._label_type == 'hard':
noise_clip_target = noise_clip_target.round()
speech_target = speech_target.round()
noise_target = noise_target.round()
speech_clip_target = speech_clip_target.round()
elif self._label_type == 'hardnoise': # only noise yay
noise_clip_target = noise_clip_target.round()
noise_target = noise_target.round()
elif self._label_type == 'softhard':
r = np.random.permutation(noise_target.shape[0] // 4)
speech_target[r] = speech_target[r].round()
target_clip = torch.tensor((noise_clip_target, speech_clip_target))
data = torch.as_tensor(data).float()
target_time = torch.as_tensor(
np.stack((noise_target, speech_target), axis=-1)).float()
if self._transform:
data = self._transform(data)
return data, target_time, target_clip, fname
class HDF5Dataset(tdata.Dataset):
"""
HDF5 dataset indexed by a labels dataframe.
Indexing is done via the dataframe since we want to preserve some storage
in cases where oversampling is needed ( pretty likely )
"""
def __init__(self, h5file: File, h5label: File, fnames, transform=None):
super(HDF5Dataset, self).__init__()
self._h5file = h5file
self._h5label = h5label
self.fnames = fnames
self.dataset = None
self.label_dataset = None
self._len = len(fnames)
# IF none is passed still use no transform at all
self._transform = transform
with File(self._h5file, 'r') as store, File(self._h5label,
'r') as labelstore:
self.datadim = store[self.fnames[0]].shape[-1]
def __len__(self):
return self._len
def __getitem__(self, index):
if self.dataset is None:
self.dataset = File(self._h5file, 'r')
self.label_dataset = File(self._h5label, 'r')
fname = self.fnames[index]
data = self.dataset[fname][()]
speech_target = self.label_dataset[f"{fname}/speech"][()]
noise_target = self.label_dataset[f"{fname}/noise"][()]
speech_clip_target = self.label_dataset[f"{fname}/clipspeech"][()]
noise_clip_target = self.label_dataset[f"{fname}/clipnoise"][()]
noise_clip_target = np.max(noise_clip_target) # take max around axis
target_clip = torch.tensor((noise_clip_target, speech_clip_target))
data = torch.as_tensor(data).float()
target_time = torch.as_tensor(
np.stack((noise_target, speech_target), axis=-1)).float()
if self._transform:
data = self._transform(data)
return data, target_time, target_clip, fname
class EvalH5Dataset(tdata.Dataset):
"""
HDF5 dataset indexed by a labels dataframe.
Indexing is done via the dataframe since we want to preserve some storage
in cases where oversampling is needed ( pretty likely )
"""
def __init__(self, h5file: File, fnames=None):
super(EvalH5Dataset, self).__init__()
self._h5file = h5file
self._dataset = None
# IF none is passed still use no transform at all
with File(self._h5file, 'r') as store:
if fnames is None:
self.fnames = list(store.keys())
else:
self.fnames = fnames
self.datadim = store[self.fnames[0]].shape[-1]
self._len = len(store)
def __len__(self):
return self._len
def __getitem__(self, index):
if self._dataset is None:
self._dataset = File(self._h5file, 'r')
fname = self.fnames[index]
data = self._dataset[fname][()]
data = torch.as_tensor(data).float()
return data, fname
class MinimumOccupancySampler(tdata.Sampler):
"""
docstring for MinimumOccupancySampler
samples at least one instance from each class sequentially
"""
def __init__(self, labels, sampling_mode='same', random_state=None):
self.labels = labels
data_samples, n_labels = labels.shape
label_to_idx_list, label_to_length = [], []
self.random_state = np.random.RandomState(seed=random_state)
for lb_idx in range(n_labels):
label_selection = labels[:, lb_idx]
if scipy.sparse.issparse(label_selection):
label_selection = label_selection.toarray()
label_indexes = np.where(label_selection == 1)[0]
self.random_state.shuffle(label_indexes)
label_to_length.append(len(label_indexes))
label_to_idx_list.append(label_indexes)
self.longest_seq = max(label_to_length)
self.data_source = np.empty((self.longest_seq, len(label_to_length)),
dtype=np.uint32)
# Each column represents one "single instance per class" data piece
for ix, leng in enumerate(label_to_length):
# Fill first only "real" samples
self.data_source[:leng, ix] = label_to_idx_list[ix]
self.label_to_idx_list = label_to_idx_list
self.label_to_length = label_to_length
if sampling_mode == 'same':
self.data_length = data_samples
elif sampling_mode == 'over': # Sample all items
self.data_length = np.prod(self.data_source.shape)
def _reshuffle(self):
# Reshuffle
for ix, leng in enumerate(self.label_to_length):
leftover = self.longest_seq - leng
random_idxs = np.random.randint(leng, size=leftover)
self.data_source[leng:,
ix] = self.label_to_idx_list[ix][random_idxs]
def __iter__(self):
# Before each epoch, reshuffle random indicies
self._reshuffle()
n_samples = len(self.data_source)
random_indices = self.random_state.permutation(n_samples)
data = np.concatenate(
self.data_source[random_indices])[:self.data_length]
return iter(data)
def __len__(self):
return self.data_length
class MultiBalancedSampler(tdata.sampler.Sampler):
"""docstring for BalancedSampler
Samples for Multi-label training
Sampling is not totally equal, but aims to be roughtly equal
"""
def __init__(self, Y, replacement=False, num_samples=None):
assert Y.ndim == 2, "Y needs to be one hot encoded"
if scipy.sparse.issparse(Y):
raise ValueError("Not supporting sparse amtrices yet")
class_counts = np.sum(Y, axis=0)
class_weights = 1. / class_counts
class_weights = class_weights / class_weights.sum()
classes = np.arange(Y[0].shape[0])
# Revert from many_hot to one
class_ids = [tuple(classes.compress(idx)) for idx in Y]
sample_weights = []
for i in range(len(Y)):
# Multiple classes were chosen, calculate average probability
weight = class_weights[np.array(class_ids[i])]
# Take the mean of the multiple classes and set as weight
weight = np.mean(weight)
sample_weights.append(weight)
self._weights = torch.as_tensor(sample_weights, dtype=torch.float)
self._len = num_samples if num_samples else len(Y)
self._replacement = replacement
def __len__(self):
return self._len
def __iter__(self):
return iter(
torch.multinomial(self._weights, self._len,
self._replacement).tolist())
def gettraindataloader(h5files,
h5labels,
label_type=False,
transform=None,
**dataloader_kwargs):
dset = TrainHDF5Dataset(h5files,
h5labels,
label_type=label_type,
transform=transform)
return tdata.DataLoader(dset,
collate_fn=sequential_collate,
**dataloader_kwargs)
def getdataloader(h5file, h5label, fnames, transform=None,
**dataloader_kwargs):
dset = HDF5Dataset(h5file, h5label, fnames, transform=transform)
return tdata.DataLoader(dset,
collate_fn=sequential_collate,
**dataloader_kwargs)
def pad(tensorlist, padding_value=0.):
lengths = [len(f) for f in tensorlist]
max_len = np.max(lengths)
# max_len = 2000
batch_dim = len(lengths)
data_dim = tensorlist[0].shape[-1]
out_tensor = torch.full((batch_dim, max_len, data_dim),
fill_value=padding_value,
dtype=torch.float32)
for i, tensor in enumerate(tensorlist):
length = tensor.shape[0]
out_tensor[i, :length, ...] = tensor[:length, ...]
return out_tensor, torch.tensor(lengths)
def sequential_collate(batches):
# sort length wise
data, targets_time, targets_clip, fnames = zip(*batches)
data, lengths_data = pad(data)
targets_time, lengths_tar = pad(targets_time, padding_value=0)
targets_clip = torch.stack(targets_clip)
assert lengths_data.shape == lengths_tar.shape
return data, targets_time, targets_clip, fnames, lengths_tar
if __name__ == '__main__':
import utils
label_df = pd.read_csv(
'data/csv_labels/unbalanced_from_unbalanced/unbalanced.csv', sep='\s+')
data_df = pd.read_csv("data/data_csv/unbalanced.csv", sep='\s+')
merged = data_df.merge(label_df, on='filename')
common_idxs = merged['filename']
data_df = data_df[data_df['filename'].isin(common_idxs)]
label_df = label_df[label_df['filename'].isin(common_idxs)]
label = utils.df_to_dict(label_df)
data = utils.df_to_dict(data_df)
trainloader = gettraindataloader(
h5files=data,
h5labels=label,
transform=None,
label_type='soft',
batch_size=64,
num_workers=3,
shuffle=False,
)
with tqdm(total=len(trainloader)) as pbar:
for batch in trainloader:
inputs, targets_time, targets_clip, filenames, lengths = batch
pbar.set_postfix(inp=inputs.shape)
pbar.update()
|
StarcoderdataPython
|
12860663
|
<filename>modules/tankshapes/__init__.py
""" Tank shapes package for Guns.
This init file marks the package as a usable module.
"""
|
StarcoderdataPython
|
4939461
|
from django.apps import AppConfig
class PersoonlijkConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'persoonlijk'
|
StarcoderdataPython
|
8060684
|
<reponame>mpolson64/Ax-1
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
from typing import Any, Callable, Type, Dict
from ax.core.experiment import Experiment
from ax.storage.json_store.decoder import object_from_json
from ax.storage.json_store.registry import (
CORE_CLASS_DECODER_REGISTRY,
CORE_DECODER_REGISTRY,
)
def load_experiment(
filepath: str,
decoder_registry: Dict[str, Type] = CORE_DECODER_REGISTRY,
class_decoder_registry: Dict[
str, Callable[[Dict[str, Any]], Any]
] = CORE_CLASS_DECODER_REGISTRY,
) -> Experiment:
"""Load experiment from file.
1) Read file.
2) Convert dictionary to Ax experiment instance.
"""
with open(filepath, "r") as file:
json_experiment = json.loads(file.read())
return object_from_json(
json_experiment, decoder_registry, class_decoder_registry
)
|
StarcoderdataPython
|
1891595
|
"""Entry point for CLI commands."""
import click
from .predict import predict
from .preprocess import preprocess
from .test import test
from .train import train
@click.group()
def entry_point():
"""Entry point for CLI commands."""
# TODO: configure logging on app start
entry_point.add_command(preprocess)
entry_point.add_command(predict)
entry_point.add_command(train)
entry_point.add_command(test)
|
StarcoderdataPython
|
11354184
|
<gh_stars>1-10
import urllib.request
import os
URLS = (
'https://raw.githubusercontent.com/maigfrga/spark-streaming-book/master/data/movielens/tags.csv', # noqa
'https://raw.githubusercontent.com/maigfrga/spark-streaming-book/master/data/movielens/ratings.csv', # noqa
'https://raw.githubusercontent.com/maigfrga/spark-streaming-book/master/data/movielens/movies.csv' # noqa,
)
def main():
"""
Download the reduced version of movieles dataset
"""
def download(url):
response = urllib.request.urlopen(url)
data = response.read()
data = data.decode('utf-8')
fname = url.split('/')[-1]
with open(os.path.join(
os.environ['SPARK_DATA'], fname), 'w') as f:
f.write(data)
for url in URLS:
download(url)
if __name__ == '__main__':
if 'SPARK_DATA' not in os.environ:
print('Error. Please define SPARK_DATA variable')
exit(1)
main()
|
StarcoderdataPython
|
282234
|
#!/usr/bin/python
# Project : diafuzzer
# Copyright (C) 2017 Orange
# All rights reserved.
# This software is distributed under the terms and conditions of the 'BSD 3-Clause'
# license which can be found in the file 'LICENSE' in this package distribution.
from struct import pack, unpack
from cStringIO import StringIO
import time
import re
from pprint import pformat
import Dia
from random import randint
from copy import deepcopy
import sys
import scenario
from socket import inet_pton, inet_ntop, AF_INET, AF_INET6
class IncompleteBuffer(Exception): pass
class MsgInvalidLength(Exception): pass
class AVPInvalidLength(Exception): pass
'''can be triggered at runtime by script'''
class RecvMismatch(Exception): pass
U16_MAX = pow(2,16)-1
U24_MAX = pow(2,24)-1
def pack24(x):
assert(x >= 0 and x <= U24_MAX)
s = pack('!L', x)
return s[1:]
def unpack24(x):
xp = '\x00' + x
return unpack('!L', xp)[0]
assert(pack24(0) == '\x00\x00\x00')
assert(0 == unpack24('\x00\x00\x00'))
def read_exactly(f, n):
b = f.read(n)
if len(b) != n: raise IncompleteBuffer()
return b
def get_matcher(elm):
m = re.match(r'code=(\d+)(?:,vendor=(\d+))?(?:\[(\d+)\])?', elm)
assert(m)
(code, vendor, index) = m.groups()
if index is None:
index = 0
else:
index = int(index, 0)
if vendor is None:
vendor = 0
else:
vendor = int(vendor, 0)
code = int(code, 0)
return lambda x, code=code, vendor=vendor: x.code == code and x.vendor == vendor
def get_filter(elm):
m = re.match(r'code=(\d+)(?:,vendor=(\d+))?(?:\[(\d+)\])?', elm)
assert(m)
(code, vendor, index) = m.groups()
if index is None:
index = 0
else:
index = int(index, 0)
if vendor is None:
vendor = 0
else:
vendor = int(vendor, 0)
code = int(code, 0)
def find_it(elms):
avps = [e for e in elms if e.code == code and e.vendor == vendor]
return avps[index]
return find_it
sys.setrecursionlimit(10000)
class Msg:
def __init__(self, **kwds):
self.version = 1
self.length = None
self.R = False
self.P = False
self.E = False
self.T = False
self.reserved = None
self.code = 0
self.app_id = 0
self.e2e_id = None
self.h2h_id = None
self.avps = []
for k in kwds:
setattr(self, k, kwds[k])
def __repr__(self, offset=0, indent=2):
attrs = {}
for k in ['code', 'app_id', 'e2e_id', 'h2h_id', 'avps']:
attrs[k] = getattr(self, k)
comment = ''
if hasattr(self, 'model'):
comment = self.model.name
if self.R: attrs['R'] = True
if self.P: attrs['P'] = True
if self.E: attrs['E'] = True
if self.T: attrs['T'] = True
if self.version != 1: attrs['version'] = self.version
if self.length is not None: attrs['length'] = self.length
r = ''
r += ' '*offset + 'Msg('
elms = []
for k in ['version', 'R', 'P', 'E', 'T', 'reserved',
'code', 'app_id']:
if k in attrs:
if k == 'app_id':
elms.append('%s=0x%x' % (k, attrs[k]))
else:
elms.append('%s=%r' % (k, attrs[k]))
r += ', '.join(elms)
if 'avps' in attrs:
r += ', avps=[ # %s \n' % comment
for a in self.avps:
r += a.__repr__(offset+indent, indent) + ',\n'
r += ' '*offset + ']'
r += ')'
return r
@staticmethod
def recv(f, _timeout=5.0):
f.settimeout(_timeout)
data = scenario.unpack_frame(f)
return Msg.decode(data)
def send(self, f):
data = self.encode()
scenario.pack_frame(f, data)
@staticmethod
def decode(s, tag=False):
f = StringIO(s)
attrs = {}
attrs['version'] = unpack('!B', read_exactly(f, 1))[0]
attrs['total_length'] = unpack24(read_exactly(f, 3))
flags = unpack('!B', read_exactly(f, 1))[0]
if flags & 0x80: attrs['R'] = True
if flags & 0x40: attrs['P'] = True
if flags & 0x20: attrs['E'] = True
if flags & 0x10: attrs['T'] = True
reserved = flags & 0x0f
if reserved: attrs['reserved'] = reserved
attrs['code'] = unpack24(read_exactly(f, 3))
attrs['app_id'] = unpack('!L', read_exactly(f, 4))[0]
attrs['h2h_id'] = unpack('!L', read_exactly(f, 4))[0]
attrs['e2e_id'] = unpack('!L', read_exactly(f, 4))[0]
length = attrs['total_length']
length -= 20
if length < 0: raise MsgInvalidLength()
avps = []
data = read_exactly(f, length)
while True:
a = Avp.decode(data)
avps.append(a)
assert(a.padded_length % 4 == 0)
data = data[a.padded_length:]
if len(data) == 0:
break
attrs['avps'] = avps
m = Msg(**attrs)
if tag:
Dia.Directory.tag(m)
return m
def encode(self):
f = StringIO()
content = ''
for a in self.avps:
content += a.encode()
if self.length:
length = self.length
else:
length = len(content) + 20
f.write(pack('!B', self.version))
f.write(pack24(length))
flags = 0
if self.R: flags |= 0x80
if self.P: flags |= 0x40
if self.E: flags |= 0x20
if self.T: flags |= 0x10
if self.reserved: flags |= self.reserved
f.write(pack('!B', flags))
f.write(pack24(self.code))
f.write(pack('!L', self.app_id))
if self.h2h_id is None:
self.h2h_id = randint(0, pow(2, 32)-1)
f.write(pack('!L', self.h2h_id))
if self.e2e_id is None:
self.e2e_id = randint(0, pow(2, 32)-1)
f.write(pack('!L', self.e2e_id))
f.write(content)
return f.getvalue()
def all_avps(self):
for a in self.avps:
for sub_a in a.all_avps():
yield sub_a
def eval_path(self, path):
elms = path.split('/')[1:]
a = get_filter(elms[0])(self.avps)
return a.eval_path(elms[1:])
def modify_value(self, path, value):
'''traverse AVP tree down to target, and set intermediate length to None
in order to force fixup.'''
elms = path.split('/')[1:]
a = get_filter(elms[0])(self.avps)
a.length = None
a.modify_value(elms[1:], value)
def suppress_avps(self, path):
elms = path.split('/')[1:]
assert(len(elms) >= 1)
if len(elms) == 1:
self.length = None
m = get_matcher(elms[0])
new_avps = []
for a in self.avps:
if not m(a):
new_avps.append(a)
self.avps = new_avps
else:
a = get_filter(elms[0])(self.avps)
a.length = None
a.suppress_avps(elms[1:])
def overflow_avps(self, path, count):
elms = path.split('/')[1:]
assert(len(elms) >= 1)
if len(elms) == 1:
self.length = None
m = get_matcher(elms[0])
existing_avps = [a for a in self.avps if m(a)]
existing_count = len(existing_avps)
assert(existing_count > 0)
self.avps.extend([existing_avps[-1]] * (count-existing_count))
else:
a = get_filter(elms[0])(self.avps)
a.length = None
a.overflow_avps(elms[1:], count)
def compute_path(self, avp):
avps = [a for a in self.avps if a.code == avp.code and a.vendor == avp.vendor]
assert(len(avps) > 0)
attrs = {}
if avp.code != 0:
attrs['code'] = avp.code
if avp.vendor != 0:
attrs['vendor'] = avp.vendor
path = '/'
for name in attrs:
path += '%s=%d' % (name, attrs[name])
if len(avps) == 1:
return path
else:
return '%s[%d]' % (path, avps.index(avp))
class Avp:
def __init__(self, **kwds):
self.code = 0
self.V = False
self.M = False
self.P = False
self.reserved = None
self.vendor = 0
self.avps = []
self.data = None
self.length = None
self.model = None
for k in kwds:
if k == 'u32':
self.data = pack('!L', kwds[k])
elif k == 's32':
self.data = pack('!I', kwds[k])
elif k == 'u64':
self.data = pack('!Q', kwds[k])
elif k == 'f32':
self.data = pack('!f', kwds[k])
elif k == 'f64':
self.data = pack('!d', kwds[k])
elif k == 'v4':
self.data = pack('!H', 1) + inet_pton(AF_INET, kwds[k])
elif k == 'v6':
self.data = pack('!H', 2) + inet_pton(AF_INET6, kwds[k])
else:
setattr(self, k, kwds[k])
def __repr__(self, offset=0, indent=2):
attrs = {}
attrs['code'] = self.code
for k in ['reserved', 'vendor', 'data', 'length']:
if getattr(self, k) is not None:
attrs[k] = getattr(self, k)
model_avp = None
if hasattr(self, 'model_avp'):
model_avp = self.model_avp
if self.V: attrs['V'] = True
if self.M: attrs['M'] = True
if self.P: attrs['P'] = True
if len(self.avps) > 0: attrs['avps'] = self.avps
r = ''
if model_avp is not None:
r += ' '*offset + '# %s\n' % model_avp.name
r += ' '*offset + 'Avp('
elms = []
for k in ['code', 'V', 'M', 'P', 'reserved', 'vendor']:
if k in attrs:
elms.append('%s=%r' % (k, attrs[k]))
r += ', '.join(elms)
if hasattr(self, 'var'):
r += ', data=%s' % getattr(self, 'var')
elif 'avps' in attrs:
r += ', avps=[\n'
for a in self.avps:
r += a.__repr__(offset+indent, indent) + ',\n'
r += ' '*offset + ']'
elif 'data' in attrs:
if model_avp is not None:
if model_avp.datatype in ['Unsigned32']:
r += ', u32=%d' % unpack('!L', attrs['data'])[0]
elif model_avp.datatype in ['Integer32', 'Enumerated']:
r += ', u32=%d' % unpack('!L', attrs['data'])[0]
elif model_avp.datatype in ['Unsigned64']:
r += ', u64=%d' % unpack('!Q', attrs['data'])[0]
elif model_avp.datatype == 'Address':
family = unpack('!H', attrs['data'][:2])[0]
if family == 1:
r += ', v4=%r' % inet_ntop(AF_INET, attrs['data'][2:])
elif family == 2:
r += ', v6=%r' % inet_ntop(AF_INET6, attrs['data'][2:])
else:
r += ', data=%r' % attrs['data']
else:
r += ', data=%r' % attrs['data']
if self.model:
r += ', conformant=%r' % self.model
r += ')'
return r
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
@staticmethod
def decode(s):
f = StringIO(s)
attrs = {}
attrs['code'] = unpack('!L', read_exactly(f, 4))[0]
flags = unpack('!B', read_exactly(f, 1))[0]
if flags & 0x80: attrs['V'] = True
if flags & 0x40: attrs['M'] = True
if flags & 0x20: attrs['P'] = True
reserved = flags & 0x1f
if reserved: attrs['reserved'] = reserved
length = unpack24(read_exactly(f, 3))
attrs['length'] = length
data_length = length
data_length -= 8
if flags & 0x80 != 0:
attrs['vendor'] = unpack('!L', read_exactly(f, 4))[0]
data_length -= 4
if data_length < 0: raise AVPInvalidLength()
data = read_exactly(f, data_length)
attrs['padded_length'] = length
if data_length % 4 != 0:
padding = 4 - (data_length % 4)
read_exactly(f, padding)
attrs['padded_length'] += padding
attrs['data'] = data
if len(data) < 12:
return Avp(**attrs)
try:
avps = []
while True:
cld_a = Avp.decode(data)
avps.append(cld_a)
assert(cld_a.padded_length % 4 == 0)
data = data[cld_a.padded_length:]
if len(data) == 0:
break
attrs['avps'] = avps
except:
pass
return Avp(**attrs)
def encode(self):
f = StringIO()
f.write(pack('!L', self.code))
flags = 0
if self.V: flags |= 0x80
if self.M: flags |= 0x40
if self.P: flags |= 0x20
if self.reserved: flags |= self.reserved
f.write(pack('!B', flags))
content = ''
if self.avps:
content = ''
for a in self.avps:
content += a.encode()
elif self.data:
content = self.data
length = self.length
if length is None:
length = len(content)
length += 8
if self.V:
length += 4
f.write(pack24(length))
if self.V:
f.write(pack('!L', self.vendor))
if content:
f.write(content)
if length % 4 != 0:
padding = 4 - (length % 4)
f.write('\x00' * padding)
return f.getvalue()
def all_avps(self):
yield self
for a in self.avps:
for sub_a in a.all_avps():
yield sub_a
def eval_path(self, elms):
if len(elms) == 0:
return self
a = get_filter(elms[0])(self.avps)
return a.eval_path(elms[1:])
def modify_value(self, elms, value):
'''traverse AVP tree down to target, and set intermediate length to None
in order to force fixup.'''
if len(elms) == 0:
self.length = None
self.data = value
self.avps = []
return
a = get_filter(elms[0])(self.avps)
a.length = None
a.modify_value(elms[1:], value)
def suppress_avps(self, elms):
if len(elms) == 1:
self.length = None
m = get_matcher(elms[0])
new_avps = []
for a in self.avps:
if not m(a):
new_avps.append(a)
self.avps = new_avps
else:
a = get_filter(elms[0])(self.avps)
a.length = None
a.suppress_avps(elms[1:])
def overflow_avps(self, elms, count):
if len(elms) == 1:
self.length = None
m = get_matcher(elms[0])
existing_avps = [a for a in self.avps if m(a)]
existing_count = len(existing_avps)
assert(existing_count > 0)
self.avps.extend([existing_avps[-1]] * (count-existing_count))
else:
a = get_filter(elms[0])(self.avps)
a.length = None
a.overflow_avps(elms[1:], count)
def compute_path(self, avp):
index = None
found = False
seen = 0
for a in self.avps:
if a.code == avp.code and a.vendor == avp.vendor:
seen += 1
if a == avp:
assert(index is None)
index = seen-1
assert(index is not None and seen >= 1)
if seen == 1:
return '/code=%d,vendor=%d' % (avp.code, avp.vendor)
else:
return '/code=%d,vendor=%d[%d]' % (avp.code, avp.vendor, seen-1)
def overflow_stacking(self, depth=128):
new_avp = deepcopy(self)
for x in range(depth):
stack_avp = deepcopy(self)
stack_avp.length = None
stack_avp.avps.append(new_avp)
new_avp = stack_avp
data = ''
for a in self.avps:
data += a.encode()
data += new_avp.encode()
return data
if __name__ == '__main__':
from binascii import unhexlify as ux
from binascii import hexlify as x
UNPADDED_AVP = ux('0000012b4000000c00000000')
a = Avp.decode(UNPADDED_AVP)
assert(a.encode() == UNPADDED_AVP)
PADDED_AVP = ux('0000010d400000334d75205365727669636520416e616c797a6572204469616d6574657220496d706c656d656e746174696f6e00')
a = Avp.decode(PADDED_AVP)
assert(a.encode() == PADDED_AVP)
CER = ux('010000c88000010100000000000000000000000000000108400000113132372e302e302e3100000000000128400000166473742e646f6d61696e2e636f6d0000000001014000000e00017f00000100000000010a4000000c000000000000010d400000334d75205365727669636520416e616c797a6572204469616d6574657220496d706c656d656e746174696f6e000000012b4000000c000000000000010c4000000c000007d100000104400000200000010a4000000c000028af000001024000000c01000000')
m = Msg.decode(CER)
assert(m.encode() == CER)
m = Msg(avps=[Avp(code=280, data='toto'), Avp(code=280, data='toto'), Avp(code=280, data='tata')])
p = m.compute_path(Avp(code=280, data='toto'))
assert(p == '/code=280[0]')
p = m.compute_path(Avp(code=280, data='tata'))
assert(p == '/code=280[2]')
m = Msg(avps=[Avp(code=280, data='toto'), Avp(code=281, data='toto'), Avp(code=282, data='tata')])
p = m.compute_path(Avp(code=280, data='toto'))
assert(p == '/code=280')
m = Msg(avps=[Avp(code=280, data='toto'), Avp(code=281, data='toto'), Avp(code=282, data='tata')])
p = m.compute_path(Avp(code=280, data='toto'))
assert(p == '/code=280')
m = Msg(avps=[Avp(code=280, data='toto'), Avp(code=280, data='toto'), Avp(code=280, data='tata')])
a = m.eval_path('/code=280')
assert(a == Avp(code=280, data='toto'))
a = m.eval_path('/code=280[1]')
assert(a == Avp(code=280, data='toto'))
a = m.eval_path('/code=280,vendor=0[1]')
assert(a == Avp(code=280, data='toto'))
a = m.eval_path('/code=280[2]')
assert(a == Avp(code=280, data='tata'))
a = Avp(code=257, v4='127.0.0.1')
assert(a.encode() == ux('000001010000000e00017f0000010000'))
|
StarcoderdataPython
|
11289188
|
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .views import (
PuntoVenditaViewSet,
)
# Create a router and register our viewsets with it.
ROUTER = DefaultRouter()
ROUTER.register('punti-vendita', PuntoVenditaViewSet)
# The API URLs are now determined automatically by the router.
# Additionally, we include the login URLs for the browsable API.
urlpatterns = [
path('', include(ROUTER.urls)),
]
|
StarcoderdataPython
|
5025006
|
<filename>test/command_line/test_refine_bravais_settings.py
from __future__ import absolute_import, division, print_function
import json
import os
import pytest
from cctbx import sgtbx, uctbx
from dxtbx.serialize import load
from dials.command_line import refine_bravais_settings
def test_refine_bravais_settings_i04_weak_data(dials_regression, tmpdir):
data_dir = os.path.join(dials_regression, "indexing_test_data", "i04_weak_data")
pickle_path = os.path.join(data_dir, "indexed.pickle")
experiments_path = os.path.join(data_dir, "experiments.json")
with tmpdir.as_cwd():
refine_bravais_settings.run(
[
pickle_path,
experiments_path,
"reflections_per_degree=5",
"minimum_sample_size=500",
"beam.fix=all",
"detector.fix=all",
"prefix=tst_",
]
)
for i in range(1, 10):
assert tmpdir.join("tst_bravais_setting_%i.expt" % i).check()
experiments_list = load.experiment_list(
tmpdir.join("tst_bravais_setting_9.expt").strpath, check_format=False
)
assert len(experiments_list) == 1
assert (
experiments_list[0]
.crystal.get_unit_cell()
.is_similar_to(uctbx.unit_cell((57.782, 57.782, 150.011, 90, 90, 90)))
)
assert experiments_list[0].crystal.get_space_group().type().hall_symbol() == " P 4"
assert tmpdir.join("tst_bravais_summary.json").check()
with tmpdir.join("tst_bravais_summary.json").open("rb") as fh:
bravais_summary = json.load(fh)
assert set(bravais_summary) == {"1", "2", "3", "4", "5", "6", "7", "8", "9"}
assert set(bravais_summary["9"]).issuperset(
{"bravais", "max_angular_difference", "unit_cell", "rmsd", "nspots"}
)
assert bravais_summary["9"]["unit_cell"] == pytest.approx(
[57.78, 57.78, 150.0, 90.0, 90.0, 90.0], abs=1e-1
)
assert bravais_summary["9"]["bravais"] == "tP"
assert bravais_summary["9"]["recommended"] is True
assert bravais_summary["9"]["rmsd"] == pytest.approx(0.047, abs=1e-2)
def test_refine_bravais_settings_multi_sweep(dials_regression, tmpdir):
data_dir = os.path.join(dials_regression, "indexing_test_data", "multi_sweep")
pickle_path = os.path.join(data_dir, "indexed.pickle")
experiments_path = os.path.join(data_dir, "experiments.json")
with tmpdir.as_cwd():
refine_bravais_settings.run([pickle_path, experiments_path])
for i in range(1, 10):
assert tmpdir.join("bravais_setting_%i.expt" % i).check()
experiments_list = load.experiment_list(
tmpdir.join("bravais_setting_9.expt").strpath, check_format=False
)
assert len(experiments_list) == 4
assert len(experiments_list.crystals()) == 1
assert (
experiments_list[0]
.crystal.get_unit_cell()
.is_similar_to(uctbx.unit_cell((7.31, 7.31, 6.82, 90.00, 90.00, 90.00)))
)
assert experiments_list[0].crystal.get_space_group().type().hall_symbol() == " I 4"
assert tmpdir.join("bravais_summary.json").check()
with tmpdir.join("bravais_summary.json").open("rb") as fh:
bravais_summary = json.load(fh)
for i in range(1, 23):
assert str(i) in bravais_summary
assert bravais_summary["9"]["unit_cell"] == pytest.approx(
[7.31, 7.31, 6.82, 90.00, 90.00, 90.00], abs=1e-1
)
assert bravais_summary["9"]["bravais"] == "tI"
assert bravais_summary["9"]["rmsd"] == pytest.approx(0.103, abs=1e-2)
assert bravais_summary["9"]["recommended"] is True
def test_refine_bravais_settings_trypsin(dials_regression, tmpdir):
data_dir = os.path.join(dials_regression, "indexing_test_data", "trypsin")
pickle_path = os.path.join(data_dir, "indexed.pickle")
experiments_path = os.path.join(data_dir, "experiments.json")
with tmpdir.as_cwd():
refine_bravais_settings.run([pickle_path, experiments_path, "crystal_id=1"])
for i in range(1, 10):
assert tmpdir.join("bravais_setting_%i.expt" % i).check()
experiments_list = load.experiment_list(
tmpdir.join("bravais_setting_5.expt").strpath, check_format=False
)
assert len(experiments_list) == 1
assert (
experiments_list[0]
.crystal.get_unit_cell()
.is_similar_to(uctbx.unit_cell((54.37, 58.29, 66.51, 90.00, 90.00, 90.00)))
)
assert (
experiments_list[0].crystal.get_space_group().type().hall_symbol() == " P 2 2"
)
assert tmpdir.join("bravais_summary.json").check()
with tmpdir.join("bravais_summary.json").open("rb") as fh:
bravais_summary = json.load(fh)
assert set(bravais_summary) == {"1", "2", "3", "4", "5", "6", "7", "8", "9"}
assert bravais_summary["5"]["unit_cell"] == pytest.approx(
[54.37, 58.29, 66.51, 90.00, 90.00, 90.00], abs=1e-1
)
assert bravais_summary["5"]["bravais"] == "oP"
assert bravais_summary["5"]["rmsd"] == pytest.approx(0.1200, abs=1e-2)
assert bravais_summary["5"]["recommended"] is True
assert bravais_summary["9"]["recommended"] is False
def test_refine_bravais_settings_554(dials_regression, tmpdir):
data_dir = os.path.join(dials_regression, "dials-554")
pickle_path = os.path.join(data_dir, "indexed.pickle")
experiments_path = os.path.join(data_dir, "experiments.json")
with tmpdir.as_cwd():
refine_bravais_settings.run([pickle_path, experiments_path])
for i in range(1, 5):
assert tmpdir.join("bravais_setting_%i.expt" % i).check()
experiments_list = load.experiment_list(
tmpdir.join("bravais_setting_5.expt").strpath, check_format=False
)
assert len(experiments_list) == 7
assert len(experiments_list.crystals()) == 1
crystal = experiments_list.crystals()[0]
assert crystal.get_unit_cell().is_similar_to(
uctbx.unit_cell((4.75863, 4.75863, 12.9885, 90, 90, 120))
)
assert crystal.get_space_group().type().hall_symbol() == " R 3"
# assert all of the detectors are different
for expt in experiments_list[1:]:
assert expt.detector != experiments_list[0].detector
for i in (0, 1, 6):
assert experiments_list[i].detector[0].get_origin() == pytest.approx(
(-41, 5.5, -135), abs=1
)
for i in (2, 3, 4, 5):
assert experiments_list[i].detector[0].get_origin() == pytest.approx(
(-41, 91, -99), abs=1
)
assert tmpdir.join("bravais_summary.json").check()
with tmpdir.join("bravais_summary.json").open("rb") as fh:
bravais_summary = json.load(fh)
for i in range(1, 5):
assert str(i) in bravais_summary
assert bravais_summary["5"]["unit_cell"] == pytest.approx(
[4.75863, 4.75863, 12.9885, 90, 90, 120], abs=1e-1
)
assert bravais_summary["5"]["bravais"] == "hR"
assert bravais_summary["5"]["rmsd"] == pytest.approx(0.104, abs=1e-2)
assert bravais_summary["5"]["recommended"] is True
@pytest.mark.parametrize(
"best_monoclinic_beta,expected_space_group,expected_unit_cell",
[
(True, "I 1 2 1", (44.47, 52.85, 111.46, 90.00, 99.91, 90.00)),
(False, "C 1 2 1", (112.67, 52.85, 44.47, 90.00, 102.97, 90.00)),
],
)
def test_setting_c2_vs_i2(
best_monoclinic_beta,
expected_space_group,
expected_unit_cell,
dials_data,
tmpdir,
capsys,
):
data_dir = dials_data("mpro_x0305_processed")
refl_path = data_dir.join("indexed.refl")
experiments_path = data_dir.join("indexed.expt")
with tmpdir.as_cwd():
refine_bravais_settings.run(
[
experiments_path.strpath,
refl_path.strpath,
"best_monoclinic_beta=%s" % best_monoclinic_beta,
]
)
expts_orig = load.experiment_list(experiments_path.strpath, check_format=False)
expts = load.experiment_list(
tmpdir.join("bravais_setting_2.expt").strpath, check_format=False
)
expts[0].crystal.get_space_group().type().lookup_symbol() == expected_space_group
assert expts[0].crystal.get_unit_cell().parameters() == pytest.approx(
expected_unit_cell, abs=1e-2
)
with tmpdir.join("bravais_summary.json").open("rb") as fh:
bravais_summary = json.load(fh)
# Verify that the cb_op converts from the input setting to the refined setting
cb_op = sgtbx.change_of_basis_op(str(bravais_summary["2"]["cb_op"]))
assert (
expts_orig[0]
.crystal.change_basis(cb_op)
.get_unit_cell()
.is_similar_to(
expts[0].crystal.get_unit_cell(),
relative_length_tolerance=0.1,
absolute_angle_tolerance=1,
)
)
captured = capsys.readouterr()
assert bravais_summary["2"]["cb_op"] in captured.out
|
StarcoderdataPython
|
1606025
|
<reponame>diegushko/utils
import torch
import torch.nn as nn
from math import ceil
base_model = [
# expand_ratio, channels, repeats, stride, kernel_size
[1, 16, 1, 1, 3],
[6, 24, 2, 2, 3],
[6, 40, 2, 2, 5],
[6, 80, 3, 2, 3],
[6, 112, 3, 1, 5],
[6, 192, 4, 2, 5],
[6, 320, 1, 1, 3]
]
pthi_values = {
# pthi_value, resolution, drop_rate
"b0": (0, 244, 0.2), # alpha (depth), beta (width), gamma (resolution)
"b1": (0.5, 240, 0.2),
"b2": (1, 260, 0.3),
"b3": (2, 300, 0.3),
"b4": (3, 380, 0.4),
"b5": (4, 456, 0.4),
"b6": (5, 528, 0.5),
"b7": (6, 600, 0.5)
}
class CNNBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, groups=1):
super(CNNBlock, self).__init__()
self.cnn = nn.Conv2d(
in_channels,
out_channels,
kernel_size,
stride,
padding,
groups=groups, # If we set group=1 as we did by default, then this is a normal conv \
bias=False # if we set it to groups=in_channels, then it is a Depthwise convolution.
)
self.bn = nn.BatchNorm2d(out_channels)
self.silu = nn.SiLU() # SiLU > Swish
def forward(self, x):
return self.silu(self.bn(self.cnn(x)))
class SqueezeExcitation(nn.Module):
def __init__(self, in_channels, reduced_dim):
super(SqueezeExcitation, self).__init__()
self.se = nn.Sequential(
nn.AdaptiveAvgPool2d(1), # C x H x W -> C x 1 x 1
nn.Conv2d(in_channels, reduced_dim, 1),
nn.SiLU(),
nn.Conv2d(reduced_dim, in_channels, 1),
nn.Sigmoid()
)
def forward(self, x):
return x * self.se(x)
class InvertedResidualBlock(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
expand_ratio,
reduction=4, # squeeze excitation
survival_prob=0.8 # for stochastic depth
):
super(InvertedResidualBlock, self).__init__()
self.survival_prob = 0.8
self.use_residual = in_channels == out_channels and stride == 1
hidden_dim = in_channels * expand_ratio
self.expand = in_channels != hidden_dim
reduced_dim = int(in_channels / reduction)
if self.expand:
self.expand_conv = CNNBlock(
in_channels, hidden_dim, kernel_size=3, stride=1, padding=1
)
self.conv = nn.Sequential(
CNNBlock(
hidden_dim, hidden_dim, kernel_size, stride, padding, groups=hidden_dim
),
SqueezeExcitation(hidden_dim, reduced_dim),
nn.Conv2d
)
class EfficientNet(nn.Module):
pass
|
StarcoderdataPython
|
11271216
|
<filename>replicable/spec.py<gh_stars>0
from __future__ import print_function, unicode_literals, division, generators
import contextlib
from itertools import product
import numpy as np
import xxhash
try:
import itertools.imap as map
except ImportError:
pass
try:
import itertools.izip as zip
except ImportError:
pass
@contextlib.contextmanager
def state(seed):
rng_state = np.random.get_state()
np.random.seed(seed)
try:
yield
finally:
np.random.set_state(rng_state)
def dict_hash(d):
h = xxhash.xxh64()
stuff = sorted(''.join(list(map(str, d.keys())) + list(map(str, d.values()))))
h.update(stuff)
return h
class Parameters(dict):
@property
def hash(self):
return dict_hash(self)
class Variable(object):
def __init__(self, names):
self.names = names
def __add__(self, other):
return Specification(self, other)
@property
def shape(self):
return len(self.names),
@property
def size(self):
return np.prod(*self.shape)
def iterate(self, seed=0):
return Specification(self).iterate(seed)
class Constant(Variable):
def __init__(self, names, values):
super(Constant, self).__init__(names)
self.values = np.atleast_1d(values)
def __repr__(self):
return "<ConstantParameter({})>".format({n:v for n, v in zip(self.names, self.values)})
def __getitem__(self, item):
try:
return self.values[self.names.index(item)]
except ValueError:
raise KeyError("key {} is not present".format(item))
def __eq__(self, other):
if self.size != other.size:
return False
try:
return all((other[i] == self[i]) and (type(other[i]) == type(self[i])) for i in self.names)
except KeyError:
return False
@property
def shape(self):
return self.values.shape
class Stochastic(Variable):
def __init__(self, names, sampler, n):
super(Stochastic, self).__init__(names)
self.sampler = sampler
self.n = n
@property
def shape(self):
return self.n,
def sample(self, rng, n=1):
yield {name: values for name, values in zip(self.names, self.sampler(rng, n))}
class IntegrityError(Exception):
pass
class Specification(object):
def __init__(self, *parameters):
self.parameters = parameters
self.gridded = [p for p in self.parameters if isinstance(p, Constant)]
self.stochastic = [p for p in self.parameters if isinstance(p, Stochastic)]
self.unpacked_gridded = [(name, value) for p in self.gridded for name, value in zip(p.names, p.values)]
assert len(set(self.names)) == len(self.names), "Unique parameter names must be used"
@property
def names(self):
return [p for params in self.parameters for p in params.names]
@property
def size(self):
return np.prod([p.size for p in self.parameters])
def __len__(self):
return self.size
@property
def shape(self):
return reduce(lambda a, b: a + b, [p.shape for p in self.parameters])
def __call__(self, directory, seed, mode='r'):
"""use a directory for storing simulations together with a seed to create them"""
return PersistedSpecificationIndex(directory, self, seed, mode)
def iterate(self, seed=0):
"""
Iterate over all parameters
:param seed: int: Seed for stochastic components
:return: generator
"""
rng = np.random.RandomState(seed)
names, ranges = zip(*self.unpacked_gridded)
prod = product(*ranges)
griddeds = ({n: p for n, p in zip(names, ps)} for ps in prod)
iterators = [p.sample(rng, 1) for p in self.stochastic] + [griddeds]
while True:
parameters = reduce(lambda a, b: a.update(b), map(next, iterators))
yield Parameters(**parameters)
# def __enter__(self):
# self.index_fname = os.path.join(self.directory, 'index-{}.h5'.format(self.hash_name))
# if not os.path.exists(self.index_fname):
# self.overwrite_index()
# else:
# self.validate_integrity(verbose=True)
# return self
#
# def __exit__(self, exc_type, exc_val, exc_tb):
# self.directory, self.seed = None, None
# def overwrite_index(self):
# with h5py.File(self.index_fname, 'w', libver='latest') as f:
# pass
# store = pd.HDFStore(self.index_fname, 'r+')
# for paramset, hsh in tqdm(self.iterate(), total=self.size, desc='building index'):
# df = pd.DataFrame(paramset)
# df['hash'] = hsh.hexdigest()
# store.append('index', df, format='table', data_columns=True)
# @property
# def files(self):
# _files = []
# for root, dirs, _files in os.walk(self.directory):
# pass
# _files = [os.path.join(self.directory, f) for f in _files if f != self.index_fname]
# return _files
# def _create_virtual_link(self, dataset_names, verbose=True):
# parameter_generator = tqdm(self._iterate(), total=self.size, dsec='linking', disable=not verbose)
# first_fname = next(parameter_generator)[0]
# with h5py.File(os.path.join(self.directory, first_fname), 'r') as first:
# layouts = [h5py.VirtualLayout(shape=(self.size, )+first[ds].shape, dtype=first[ds].dtype) for ds in dataset_names]
#
# for i, (file, hash) in enumerate(parameter_generator):
# vsources = [h5py.VirtualSource(file, ds, shape=first.shape, dtype=first.dtype)]
# layout[i] = vsource
#
# # Add virtual dataset to output file
# with h5py.File(self.index_fname, 'a', libver='latest') as f:
# f.create_virtual_dataset('data', layout, fillvalue=np.nan)
#
# def read_parameter(self, parameter):
# with h5py.File(self.index_fname, 'r') as f:
# return f['parameters'][parameter]
#
# def validate_integrity(self, verbose=True):
# """
# Validates the integrity of the index:
# Are all files present?
# Does the total hash for the files match that which is expected by the specification?
# :return: True if valid
# """
# nmissing = len(self) - len(self.files)
# if nmissing > 0:
# raise IntegrityError("Missing {} files, run `integrity_audit` to identify them".format(nmissing))
# elif nmissing < 0:
# raise IntegrityError("There are {} more valid files than were expected, run `integrity_audit` "
# "to identify them.".format(-nmissing))
# hsh = xxhash.xxh64()
# for f in tqdm(self.files, desc='Hashing files', disable=not verbose):
# hsh.update(os.path.basename(f).strip('.h5'))
# file_hash = hsh.hexdigest()
# hsh = xxhash.xxh64()
# for paramset, hsh in tqdm(self.iterate(), total=self.size, desc='Hashing parameters', disable=not verbose):
# hsh.update(hsh)
# param_hash = hsh.hexdigest()
# if file_hash != param_hash:
# raise IntegrityError("Hash mismatch: files are corrupted or mislabelled, run `integrity_audit` to identify"
# "the problematic ones")
# return True
# def integrity_audit(self, test_existence=True, test_read=False, verbose=True):
# missing = []
# for i, (paramset, hash) in enumerate(tqdm(self.iterate(), total=self.size, desc='Hashing parameters',
# disable=not verbose)):
# fname = os.path.join(self.directory, '{}.h5')
# if test_existence:
# if not os.path.exists(fname):
# missing.append((i, hash))
# def save(self, results, outnames, params, param_hash):
# """
# Save results from a function mapped to a simulation dataset
# :param results: The list of outputs from the function
# :param outnames: Names for each output in the results list
# :param params: The simulation parameters used to create the results
# :param param_hash: The hash of the parameters
# :return:
# """
# h = param_hash.hexdigest()
# fname = os.path.join(self.directory, h+'.h5')
# with h5py.File(fname, 'a', libver='latest') as f:
# f.attrs['hash'] = h
# parameters = f.require_group('parameters')
# outputs = f.require_group('output')
# for key, value in params.items():
# parameters.require_dataset(key, value.shape, value.dtype, exact=True)
# for result, outname in zip(results, outnames):
# outputs.require_dataset(outname, dtype=result.dtype, shape=result.shape, exact=True, data=result)
# def map(self, function, outnames, verbose=True):
# for paramset, hsh in tqdm(self.iterate(), total=self.size, disable=not verbose):
# results = function(**paramset)
# assert len(results) == len(outnames), "Length of `outnames` must be the same as length of function output"
# self.save(results, outnames, paramset, hsh)
|
StarcoderdataPython
|
5192107
|
<filename>src/creationals/scraper_factory.py
'''
@author: oluiscabral
'''
from scrapers.url import URL
from actioners.interfaces.i_login_control import ILoginControl
from scrapers.composites.compare_scraper import CompareScraper
from creationals.stockreport_factory import StockReportScraperFactory
from creationals.compare_factory import CompareScraperFactory
from creationals.balance_factory import BalanceScraperFactory
from creationals.income_factory import IncomeScraperFactory
from creationals.cashflow_factory import CashflowScraperFactory
from scrapers.names import STOCKREPORT, COMPARE, BALANCE, INCOME, CASHFLOW
class ScraperFactory:
@staticmethod
def create_compare_scraper(login_control):
compare_scraper = CompareScraper(COMPARE, URL('https://app.stockopedia.com/compare?tickers=${}'), login_control)
compare_scraper.create_stockreport_scraper(ScraperFactory)
return compare_scraper
@staticmethod
def create(t:str, login_control:ILoginControl):
if t == STOCKREPORT:
return StockReportScraperFactory.create(login_control)
if t == COMPARE:
return CompareScraperFactory.create(login_control)
if t == BALANCE:
return BalanceScraperFactory.create(login_control)
if t == INCOME:
return IncomeScraperFactory.create(login_control)
if t == CASHFLOW:
return CashflowScraperFactory.create(login_control)
|
StarcoderdataPython
|
8106115
|
"""
Very thin wrapper around Fabric. We basically re-implement
the ``fab`` executable.
We use this when we need to create PyCharm run configurations that run Fabric tasks.
"""
if __name__ == '__main__':
from fabric.main import main
main()
|
StarcoderdataPython
|
6687934
|
import matplotlib.pyplot as plt
import numpy as np
import csv
from learningModels.process import LearningProcess, compute_avg_return, points_history
from learningModels.GameEnv import SnakeGameEnv
def write_data(file, data):
"""Save the data in csv file.
file (str): Path where the file will be saved.
data (array): data to be saved.
"""
with open(file, 'w') as f:
data_writer = csv.writer(f, delimiter=',')
for row in data:
data_writer.writerow(row)
def read_data(file):
"""Read and load the data from csv file.
file (str): Path where the file will be saved.
"""
data = []
with open(file, 'r') as f:
data_reader = csv.reader(f, delimiter=',')
for row in data_reader:
data.append([float(i) for i in row])
return data
def training_loops(snakes_games, rewards, redundance):
"""Total training loop for multiple types of games and rewards, each type of game
will be proved with all the rewards an n number of iterations.
snake_games (list): List of objects to be train.
rewards (list): List of dictionaries (rewards of the snake game).
redundance (int): Number of iterations.
"""
for snake in snakes_games:
for num, reward in enumerate(rewards):
agent_environment = SnakeGameEnv(snake, reward, len(snake.state()))
Lp = LearningProcess(agent_environment)
for i in range(redundance):
print('*-*'*15)
print('iteration {} using the reward {} with the rules/game {} and input {}'.format(i, reward, snake.__class__.__name__, len(snake.state())))
Lp.pre_learning_process()
returns, losses = Lp.training()
path = Lp.policy_saver(num, i)
save_returns = path + '/returns.csv'
save_losses = path + '/losses.csv'
write_data(save_returns, returns)
write_data(save_losses, losses)
def sampler(snake, reward, num_reward=0, iteration=0, num_episodes=30, points=False):
"""Evaluates n samples of a trained network.
snake (object): type of game.
reward (dictionary): reward of the snake game environment.
num_reward (int): Number wich indentifies the reward.
iteration (int): Number of the iteration.
num_episodes (int): amount of samples to be taken.
points (bool): If its true then the return will be the a list of lists wich
contains the amount of points per step. The lists have a length
accord with the number of steps the snake 'survived'.
"""
agent_environment = SnakeGameEnv(snake, reward, len(snake.state()))
Lp = LearningProcess(agent_environment)
policy = Lp.load_previous_policy(num_reward, iteration)
if points:
return points_history(Lp.sample_env, policy, num_episodes)
else:
return compute_avg_return(Lp.sample_env, policy, num_episodes)
|
StarcoderdataPython
|
88587
|
<reponame>astro-projects/astro
from astro.files.base import File, get_files # noqa: F401 # skipcq: PY-W2000
|
StarcoderdataPython
|
6437261
|
<filename>run.py
#! /usr/bin/env python
import argparse
from tensorflow.keras import models
from hts.preprocess import *
from hts.visualize import predict_plot
from hts.utils import merge_data, load_raw_data, parse
from hts.model import Model
import datetime
parser = argparse.ArgumentParser()
parser.add_argument('--type', type=str, default='lstm',
choices=['lstm', 'gru', 'mlp', 'tcn'],
help='RNN architecture type.')
parser.add_argument('--activation', type=str, default=None,
choices=['tanh', 'elu', 'relu'],
help='Activation function.')
parser.add_argument('--optimizer', type=str, default='adam',
choices=['sgd', 'rmsprop', 'adam'],
help='Algorithm for the minimization of loss function.')
parser.add_argument('--loss_fn', type=str, default='mse',
choices=['mse', 'mae', 'msle'],
help='Loss function.')
parser.add_argument('--num_layers', type=int, default=2,
help='Number of hidden layers.')
parser.add_argument('--num_neurons', type=int, default=50,
help='Number of neurons per hidden layer.')
parser.add_argument('--learning_rate', type=float, default=0.01,
help='Learning rate for the optimizer.')
parser.add_argument('--epochs', type=int, default=100,
help='Number of training epochs.')
parser.add_argument('--batch_size', type=int, default=64,
help='Batch size.')
parser.add_argument('--dataset', type=str, default='deep',
choices=['deep', 'shallow'],
help='Two current dataset generated by deep and shalow LoRa sensor.')
parser.add_argument('--split_ratio', type=float, default=0.8,
help='Ratio for train-test split.')
parser.add_argument('--step', type=int, default=18,
help='Value for timestamp.')
parser.add_argument('--sensor_test', action='store_true',
help='Test the model on other sensor.')
parser.add_argument('--derivate', action='store_true',
help='Using derivation of variables')
parser.add_argument('--save_checkpoint', action='store_true',
help='Save the best model after the training is done.')
args = parser.parse_args()
model_name_prefix = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
air_path = 'hts/data/Senzor_zraka.csv'
pressure_path = 'hts/data/DHMZ.csv'
if args.dataset == 'deep':
soil_path = 'hts/data/Senzor_zemlje_2.csv'
test_soil_path = 'hts/data/sensor_earth1.csv' # TEST SENSOR
save_dir = f'saved_models/{model_name_prefix}-deep.h5'
elif args.dataset == 'shallow':
soil_path = 'hts/data/Senzor_zemlje.csv'
save_dir = f'saved_models/{model_name_prefix}-shallow.h5'
soil_raw, pressure_raw, air_raw = load_raw_data(soil_path, pressure_path, air_path)
soil = clean_soil(soil_raw, absolute=False)
pressure = clean_air(pressure_raw)
air = clean_air(air_raw)
# TEST SENSOR
test_soil_raw, pressure_raw, air_raw = load_raw_data(test_soil_path, pressure_path, air_path)
test_soil = parse_json_data(test_soil_raw)
test_soil = clean_soil(test_soil, absolute=False)
data = merge_data(pressure, air, soil, drop_duplicate_time=True)
test_data = merge_data(pressure, air, test_soil, drop_duplicate_time=True) # TEST SENSOR
#data, means = parse(data)
#test_data, test_means = parse(test_data)
""" For adding derivation to data """
if args.derivate:
data = additional_processing(data)
test_data = additional_processing(test_data)
if args.type == 'lstm' or args.type == 'gru' or args.type == 'tcn':
if args.sensor_test:
x_train, y_train, x_valid, y_valid, x_test, y_test, \
scaler = process_data_rnn(data, args.step, args.split_ratio, test_data)
else:
x_train, y_train, x_valid, y_valid, x_test, y_test, \
scaler = process_data_rnn(data, args.step, args.split_ratio)
elif args.type == 'mlp':
data_reframed = series_to_supervised(data.values, n_in=1)
data_reframed.drop('var6(t-1)', axis=1, inplace=True)
if args.sensor_test:
data_test_reframed = series_to_supervised(test_data.values, n_in=1)
data_test_reframed.drop('var6(t-1)', axis=1, inplace=True)
x_train, y_train, x_valid, y_valid, x_test, y_test, scaler = \
process_data_mlp(data, args.split_ratio, data)
else:
x_train, y_train, x_valid, y_valid, x_test, y_test, scaler = \
process_data_mlp(data, args.split_ratio)
if args.type == 'lstm' or args.type == 'gru' or args.type == 'tcn':
net = Model(
type=args.type,
input_shape=(x_train.shape[1], x_train.shape[2]),
num_layers=args.num_layers,
num_neurons=args.num_neurons
)
elif args.type == 'mlp':
net = Model(
type=args.type,
input_shape=(x_train.shape[1],),
num_layers=args.num_layers,
num_neurons=args.num_neurons
)
net.build(
optimizer=args.optimizer,
learning_rate=args.learning_rate,
loss_fn=args.loss_fn,
activation=args.activation
)
print('\nTrain set shape: Input {} Target {}'.format(x_train.shape, y_train.shape))
print('Valid set shape: Input {} Target {}'.format(x_valid.shape, y_valid.shape))
print('Test set shape: Input {} Target {}\n'.format(x_test.shape, y_test.shape))
model, losses = net.train(
x_train=x_train,
y_train=y_train,
x_valid=x_valid,
y_valid=y_valid,
epochs=args.epochs,
batch_size=args.batch_size,
save_checkpoint=args.save_checkpoint,
save_dir=save_dir
)
if args.save_checkpoint:
model = models.load_model(save_dir)
print('\n---Loaded model checkpoint---\n')
predict_plot(model, x_train, y_train, x_valid, y_valid, x_test, y_test, scaler, losses=losses, nn_type=args.type,
mean_list=None, test_mean_list=None)
#predict_plot(model, x_train, y_train, x_valid, y_valid, x_test, y_test, scaler, losses=losses)
if not args.save_checkpoint:
decision = input("\nSave model? [y,n] ")
if decision == "y":
model.save(save_dir)
else:
pass
|
StarcoderdataPython
|
6496458
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import pytest
import webtest
import main
@pytest.fixture
def app(testbed):
return webtest.TestApp(main.app)
def test_get(app):
main.Greeting(
parent=main.guestbook_key('default_guestbook'),
author='123',
content='abc'
).put()
response = app.get('/')
# Let's check if the response is correct.
assert response.status_int == 200
def test_post(app):
with mock.patch('main.images') as mock_images:
mock_images.resize.return_value = 'asdf'
response = app.post('/sign', {'content': 'asdf'})
mock_images.resize.assert_called_once_with(mock.ANY, 32, 32)
# Correct response is a redirect
assert response.status_int == 302
def test_img(app):
greeting = main.Greeting(
parent=main.guestbook_key('default_guestbook'),
id=123
)
greeting.author = 'asdf'
greeting.content = 'asdf'
greeting.avatar = b'123'
greeting.put()
response = app.get('/img?img_id=%s' % greeting.key.urlsafe())
assert response.status_int == 200
def test_img_missing(app):
# Bogus image id, should get error
app.get('/img?img_id=123', status=500)
def test_post_and_get(app):
with mock.patch('main.images') as mock_images:
mock_images.resize.return_value = 'asdf'
app.post('/sign', {'content': 'asdf'})
response = app.get('/')
assert response.status_int == 200
|
StarcoderdataPython
|
3598920
|
import pygame
class Label(pygame.sprite.Sprite):
msg: str
position: str
def __init__(self, font_size, position):
pygame.sprite.Sprite.__init__(self)
self.position = position
self.font = pygame.font.Font("28_Days_Later.ttf", font_size)
self.msg = ""
self.image = self.font.render(self.msg, 1, (0, 0, 0))
self.rect = self.image.get_rect()
self.rect.center = position
self.color = (0, 0, 0)
def set_color(self, color):
self.color = color
def set_position(self, position):
self.rect.center = position
def set_msg(self, msg: str):
self.msg = msg
self.image = self.font.render(self.msg, 1, self.color)
self.image.get_rect()
self.rect.center = self.position
class Timer(Label):
minute: int
second: int
frame_rate: int
def __init__(self, font_size, position, minute, second, frame_rate):
Label.__init__(self, font_size, position)
self.frame_rate = frame_rate
self.minute, self.second = self.convert(minute, second)
def convert(self, minute, second):
minute += int(second / 60)
second = (second % 60) * self.frame_rate
return minute, second
def get_real_second(self):
return int(self.second / 30)
def time_up(self):
return self.minute <= 0 and self.get_real_second() <= 0
def update(self):
if self.second < 0:
self.minute -= 1
self.second = 60 * self.frame_rate
if self.minute < 10:
real_minure = "0" + str(self.minute)
else:
real_minure = str(self.minute)
if int(self.second / 30) < 10:
real_second = "0" + str(int(self.second / 30))
else:
real_second = str(int(self.second / 30))
time_string = "%s %s" %(real_minure, real_second)
self.set_msg(time_string)
self.second -= 1
class TempLabel(Label):
def __init__(self, font_size, position, real_time, frame_rate, color=(0, 255, 0)):
Label.__init__(self, font_size, position)
self.frame_time = real_time * frame_rate
self.set_color(color)
def update(self):
self.frame_time -= 1
if self.frame_time < 0:
self.kill()
class Pointer(Label):
def __init__(self, font_size, positions, values):
Label.__init__(self, font_size, positions[0])
self.positions = positions
self.values = values
self.index = 0
self.set_msg("*")
def move_next(self):
if self.index == len(self.positions)-1:
self.index = 0
else:
self.index += 1
self.rect.center = self.positions[self.index]
def move_previous(self):
if self.index == 0:
self.index = len(self.positions)-1
else:
self.index -= 1
self.rect.center = self.positions[self.index]
def get_value(self):
return self.values[self.index]
|
StarcoderdataPython
|
167768
|
import sys
def check_leap_year(year):
if (year % 4) == 0:
if (year % 100) == 0:
if (year % 400) == 0:
return 1
else:
return 0
else:
return 1
else:
return 0
def get_next_date(dd, mm, yy):
if check_leap_year(yy):
if mm == 2:
if dd == 29:
dd = 1
mm += 1
else :
dd += 1
if mm == 12:
mm = 1
yy += 1
print("{0}/{1}/{2}".format(dd,mm,yy))
else:
if dd == 31 or dd == 30:
dd = 1
mm += 1
else:
dd += 1
if mm == 12:
mm = 1
yy += 1
print("{0}/{1}/{2}".format(dd,mm,yy))
else:
if mm == 2:
if dd == 28:
dd = 1
mm += 1
else:
dd += 1
if mm == 12:
mm = 1
yy += 1
print("{0}/{1}/{2}".format(dd,mm,yy))
else:
if dd == 31 or dd == 30:
dd = 1
mm += 1
else:
dd += 1
if mm == 12:
mm = 1
yy += 1
print("{0}/{1}/{2}".format(dd,mm,yy))
def validate_input(dd,mm,yy):
if check_leap_year(yy):
if dd > 29:
print("Invalid day")
sys.exit(0)
else:
if dd > 28:
print("Invalid Date")
sys.exit(0)
if mm > 12:
print("Invalid date")
sys.exit(0)
if mm % 2 == 0 and mm > 30:
print("Invalid Date")
sys.exit(0)
if mm % 2 != 0 and mm > 31:
print("Invalid Date")
sys.exit(0)
dd = int(input("Enter date"))
mm = int(input("Enter month"))
yy = int(input("Enter year"))
validate_input(dd,mm,yy)
get_next_date(dd,mm,yy)
|
StarcoderdataPython
|
5188027
|
<filename>mall/superadmin/models.py
#coding=utf-8
from mall.database import Column, Model, SurrogatePK, db, reference_col, relationship
import datetime as dt
#系统更新版本号
class SystemVersion(SurrogatePK,Model):
__tablename__ = 'system_versions'
#版本号
number = Column(db.String(20))
#标题
title = Column(db.String(100))
#描述
summary = Column(db.String(200))
#内容
context = Column(db.UnicodeText)
created_at = Column(db.DateTime, nullable=False, default=dt.datetime.now)
#基础的商品数据,当输入名称自动查找关联,省去了店家的输入,管理员操作
class BaseProducts(SurrogatePK, Model):
__tablename__ = 'base_products'
#商品名称
title = Column(db.String(255))
#原价
original_price = Column(db.Numeric(15,2))
#优惠价
special_price = Column(db.Numeric(15,2))
#详情
note = Column(db.UnicodeText())
#分类
category_id = Column(db.Integer())
#附加字段
attach_key = Column(db.String(200))
#附加值
attach_value = Column(db.String(500))
#首页展示图
main_photo = Column(db.String(200))
#条码
ean = Column(db.String(50))
#规格
unit = Column(db.Integer,default=1)
#商品分类
class Category(SurrogatePK,Model):
__tablename__ = 'categorys'
#:自身上级,引用自身无限级分类
parent_id = reference_col('categorys')
children = relationship("Category",lazy="joined",join_depth=2)
goods_id = relationship('Goods', backref='category')
#分类名称
name = Column(db.String(100))
#分类图标
ico = Column(db.String(100))
#排序
sort = Column(db.Integer(),default=100)
#状态
status = Column(db.Integer(),default=1)
#是否启用
active = Column(db.Boolean,default=True)
|
StarcoderdataPython
|
318356
|
<reponame>dmrib/linguicator-predictor<gh_stars>0
import asyncio
import websockets
import logging
from linguicator_predictor.websocket import handle_websocket_connection
from linguicator_predictor.models.en.distilgpt2 import DistilGPT2
PORT = 8765
HOST = '0.0.0.0'
LOGGING_FORMAT = '%(asctime)s - %(levelname)s - %(message)s'
LOGGING_DATE_FORMAT = '%m/%d/%Y %I:%M:%S %p'
def main():
"""
I start the server.
:returns: nothing
:rtype: None
"""
# configure predictor
global model
model = DistilGPT2()
# configure logger
logging.basicConfig(format=LOGGING_FORMAT,
datefmt=LOGGING_DATE_FORMAT,
level=logging.INFO)
# start server
logging.info('Starting server...')
start_server = websockets.serve(handle_websocket_connection,
HOST,
PORT)
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever()
|
StarcoderdataPython
|
3443242
|
<reponame>YuriSpiridonov/LeetCode<gh_stars>10-100
"""
Design a parking system for a parking lot. The parking lot has three kinds
of parking spaces: big, medium, and small, with a fixed number of slots for
each size.
Implement the ParkingSystem class:
- ParkingSystem(int big, int medium, int small) Initializes object of
the ParkingSystem class. The number of slots for each parking space
are given as part of the constructor.
- bool addCar(int carType) Checks whether there is a parking space of
carType for the car that wants to get into the parking lot. carType
can be of three kinds: big, medium, or small, which are represented
by 1, 2, and 3 respectively. A car can only park in a parking space
of its carType. If there is no space available, return false, else
park the car in that size space and return true.
Example:
Input
["ParkingSystem", "addCar", "addCar", "addCar", "addCar"]
[[1, 1, 0], [1], [2], [3], [1]]
Output
[null, true, true, false, false]
Explanation
ParkingSystem parkingSystem = new ParkingSystem(1, 1, 0);
parkingSystem.addCar(1); // return true because there is 1
available slot for a big car
parkingSystem.addCar(2); // return true because there is 1
available slot for a medium car
parkingSystem.addCar(3); // return false because there is no
available slot for a small car
parkingSystem.addCar(1); // return false because there is no
available slot for a big car. It is already occupied.
Constraints:
- 0 <= big, medium, small <= 1000
- carType is 1, 2, or 3
- At most 1000 calls will be made to addCar
"""
#Difficulty: Easy
#102 / 102 test cases passed.
#Runtime: 136 ms
#Memory Usage: 14.5 MB
#Runtime: 136 ms, faster than 79.04% of Python3 online submissions for Design Parking System.
#Memory Usage: 14.5 MB, less than 69.90% of Python3 online submissions for Design Parking System.
class ParkingSystem:
def __init__(self, big: int, medium: int, small: int):
self.parking = {1 : big, 2 : medium, 3 : small}
def addCar(self, carType: int) -> bool:
self.parking[carType] -= 1
return self.parking[carType] >= 0
# Your ParkingSystem object will be instantiated and called as such:
# obj = ParkingSystem(big, medium, small)
# param_1 = obj.addCar(carType)
|
StarcoderdataPython
|
3271890
|
<reponame>alex-evans/fgolf
from django.db import models
from bs4 import BeautifulSoup
import requests
class Player(models.Model):
name = models.CharField(max_length=200, unique=True)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
class Person(models.Model):
name = models.CharField(max_length=200)
email = models.CharField(max_length=200)
total_winnings = models.IntegerField(blank=True)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
class Tournament(models.Model):
name = models.CharField(max_length=200)
file_name = models.CharField(max_length=200, blank=True)
start_date = models.DateField('start date')
end_date = models.DateField('end date')
leaderboard_url = models.CharField(max_length=200, blank=True)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
class TournamentPlayer(models.Model):
tournament = models.ForeignKey(Tournament, on_delete=models.CASCADE)
player = models.ForeignKey(Player, on_delete=models.CASCADE)
group = models.CharField(max_length=1, blank=True)
winnings = models.IntegerField(blank=True)
class Meta:
ordering = ['tournament','player']
def __str__(self):
return f'{self.tournament} - {self.player} - {self.group}'
class TournamentPick(models.Model):
tournament = models.ForeignKey(Tournament, on_delete=models.CASCADE)
person = models.ForeignKey(Person, on_delete=models.CASCADE)
pick_a = models.ForeignKey(TournamentPlayer, on_delete=models.CASCADE, related_name='pick_a')
pick_b = models.ForeignKey(TournamentPlayer, on_delete=models.CASCADE, related_name='pick_b')
pick_c = models.ForeignKey(TournamentPlayer, on_delete=models.CASCADE, related_name='pick_c')
pick_d = models.ForeignKey(TournamentPlayer, on_delete=models.CASCADE, related_name='pick_d')
total_winnings = models.IntegerField(blank=True)
def save(self, *args, **kwargs):
self.total_winnings = self.pick_a.winnings + self.pick_b.winnings + self.pick_c.winnings + self.pick_d.winnings
super().save(*args, **kwargs)
def __str__(self):
return self.tournament.name + " - " + self.person.name + " - " + self.pick_a.player.name + ", " + self.pick_b.player.name + ", " + self.pick_c.player.name + ", " + self.pick_d.player.name
|
StarcoderdataPython
|
397261
|
# The MIT License (MIT)
# Copyright (c) 2022 by the xcube development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import collections.abc
import fnmatch
from typing import Dict, Tuple, Hashable, Optional, Mapping, Union
import numpy as np
import xarray as xr
from deprecated import deprecated
from xcube.util.assertions import assert_instance, assert_in, assert_true
AGG_METHODS = 'auto', 'first', 'min', 'max', 'mean', 'median'
DEFAULT_INT_AGG_METHOD = 'first'
DEFAULT_FLOAT_AGG_METHOD = 'mean'
AggMethod = Union[None, str]
AggMethods = Union[AggMethod, Mapping[str, AggMethod]]
def subsample_dataset(
dataset: xr.Dataset,
step: int,
xy_dim_names: Optional[Tuple[str, str]] = None,
agg_methods: AggMethods = None
) -> xr.Dataset:
"""
Subsample *dataset* with given integer subsampling *step*.
Only data variables with spatial dimensions given by
*xy_dim_names* are subsampled.
:param dataset: the dataset providing the variables
:param step: the integer subsampling step size in pixels in
the x and y directions.
For aggregation methods other than "first" it defines the
window size for the aggregation.
:param xy_dim_names: the spatial dimension names
:param agg_methods: Optional aggregation methods.
May be given as string or as mapping from variable name pattern
to aggregation method. Valid aggregation methods are
"auto", "first", "min", "max", "mean", "median".
If "auto", the default, "first" is used for integer variables
and "mean" for floating point variables.
"""
assert_instance(dataset, xr.Dataset, name='dataset')
assert_instance(step, int, name='step')
assert_valid_agg_methods(agg_methods)
x_name, y_name = xy_dim_names or ('y', 'x')
new_data_vars = dict()
new_coords = None # used to collect coordinates from coarsen
for var_name, var in dataset.data_vars.items():
if x_name in var.dims or y_name in var.dims:
agg_method = find_agg_method(agg_methods, var_name, var.dtype)
if agg_method == 'first':
slices = get_variable_subsampling_slices(
var, step, xy_dim_names=xy_dim_names
)
assert slices is not None
new_var = var[slices]
else:
dim = dict()
if x_name in var.dims:
dim[x_name] = step
if y_name in var.dims:
dim[y_name] = step
var_coarsen = var.coarsen(dim=dim,
boundary='pad',
coord_func='min')
new_var: xr.DataArray = getattr(var_coarsen, agg_method)()
if new_var.dtype != var.dtype:
# We don't want, e.g. "mean", to turn data
# from dtype unit16 into float64
new_var = new_var.astype(var.dtype)
new_var.attrs.update(var.attrs)
new_var.encoding.update(var.encoding)
# coarsen() recomputes spatial coordinates.
# Collect them, so we can later apply them to the
# variables that are subsampled by "first"
# (= slice selection).
if new_coords is None:
new_coords = dict(new_var.coords)
else:
new_coords.update(new_var.coords)
else:
new_var = var
new_data_vars[var_name] = new_var
if not new_data_vars:
return dataset
if new_coords:
# Make sure all variables use the same modified
# spatial coordinates from coarsen
new_data_vars = {
k: v.assign_coords({
d: new_coords[d]
for d in v.dims if d in new_coords
})
for k, v in new_data_vars.items()
}
return xr.Dataset(data_vars=new_data_vars,
attrs=dataset.attrs)
def assert_valid_agg_methods(agg_methods: AggMethods):
"""Assert that the given *agg_methods* are valid."""
assert_instance(agg_methods,
(type(None), str, collections.abc.Mapping),
name='agg_methods')
if isinstance(agg_methods, str):
assert_in(
agg_methods, AGG_METHODS,
name='agg_methods'
)
elif agg_methods is not None:
enum = (None, *AGG_METHODS)
for k, v in agg_methods.items():
assert_true(
isinstance(k, str),
message='keys in agg_methods must be strings'
)
assert_true(
v in enum,
message=f'values in agg_methods must be one of {enum}'
)
def find_agg_method(agg_methods: AggMethods,
var_name: Hashable,
var_dtype: np.dtype) -> str:
"""
Find aggregation method in *agg_methods*
for given *var_name* and *var_dtype*.
"""
assert_valid_agg_methods(agg_methods)
if isinstance(agg_methods, str) and agg_methods != 'auto':
return agg_methods
if isinstance(agg_methods, collections.abc.Mapping):
for var_name_pat, agg_method in agg_methods.items():
if var_name == var_name_pat or fnmatch.fnmatch(str(var_name),
var_name_pat):
if agg_method in (None, 'auto'):
break
return agg_method
# here: agg_method is either None or 'auto'
if np.issubdtype(var_dtype, np.integer):
return 'first'
else:
return 'mean'
_FULL_SLICE = slice(None, None, None)
@deprecated(version='0.10.3', reason='no longer in use')
def get_dataset_subsampling_slices(
dataset: xr.Dataset,
step: int,
xy_dim_names: Optional[Tuple[str, str]] = None
) -> Dict[Hashable, Optional[Tuple[slice, ...]]]:
"""
Compute subsampling slices for variables in *dataset*.
Only data variables with spatial dimensions given by
*xy_dim_names* are considered.
:param dataset: the dataset providing the variables
:param step: the integer subsampling step
:param xy_dim_names: the spatial dimension names
"""
assert_instance(dataset, xr.Dataset, name='dataset')
assert_instance(step, int, name='step')
slices_dict: Dict[Tuple[Hashable, ...], Tuple[slice, ...]] = dict()
vars_dict: Dict[Hashable, Optional[Tuple[slice, ...]]] = dict()
for var_name, var in dataset.data_vars.items():
var_index = slices_dict.get(var.dims)
if var_index is None:
var_index = get_variable_subsampling_slices(
var, step, xy_dim_names=xy_dim_names
)
if var_index is not None:
slices_dict[var.dims] = var_index
if var_index is not None:
vars_dict[var_name] = var_index
return vars_dict
def get_variable_subsampling_slices(
variable: xr.DataArray,
step: int,
xy_dim_names: Optional[Tuple[str, str]] = None
) -> Optional[Tuple[slice, ...]]:
"""
Compute subsampling slices for *variable*.
Return None, if *variable* does not contain spatial
dimensions.
:param variable: the dataset providing the variables
:param step: the integer subsampling step
:param xy_dim_names: the spatial dimension names
"""
assert_instance(variable, xr.DataArray, name='variable')
assert_instance(step, int, name='step')
x_dim_name, y_dim_name = xy_dim_names or ('x', 'y')
var_index = None
for index, dim_name in enumerate(variable.dims):
if dim_name == x_dim_name or dim_name == y_dim_name:
if var_index is None:
var_index = index * [_FULL_SLICE]
var_index.append(slice(None, None, step))
elif var_index is not None:
var_index.append(_FULL_SLICE)
return tuple(var_index) if var_index is not None else None
|
StarcoderdataPython
|
128780
|
#-*- coding: utf-8
import json
import time
import requests
from bs4 import BeautifulSoup
# pixiv url and login url.
PIXIV = 'https://www.pixiv.net'
LOGIN_URL = 'https://accounts.pixiv.net/login'
LOGIN_POST_URL = 'https://accounts.pixiv.net/api/login?lang=zh_tw'
# user-agnet.
headers = {'User-Agent' : 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36'}
# login data.
LOGIN_PARAM = { 'lang' : 'zh_tw',
'source' : 'pc',
'view_type' : 'page',
'ref' : 'wwwtop_accounts_index',
}
LOGIN_POST_DATA = {
'pixiv_id' : '',
'captcha' : '',
'g_recaptcha_response' : '',
'password' : '',
'post_key' : '',
'source' : 'pc',
'ref' : 'wwwtop_accounts_index',
'return_to' : 'https://www.pixiv.net/',
}
'''
PixivApiException: deal with PixivApi Exception.
'''
class PixivApiException(Exception):
def __init__(self, error_message):
self.error_message = error_message
def __str__(self):
return self.error_message
class PixivApi(object):
'''
set your pixiv_id and password, make you can fetch all image(over 18).
pixiv = PixivApi(pixiv_id, password)
'''
def __init__(self, pixiv_id, password):
self.pixiv_id = pixiv_id
self.password = password
self.session = requests.Session()
self.session.headers.update(headers)
self.login()
'''
Input:
image_url : image's url.
file_name : store file name.
Output:
None, download the image.
'''
def download(self, image_url, file_name=None):
if file_name is None:
file_name = image_url.split('/')[-1]
else:
file_type = image_url.split('.')[-1]
file_name_name += '.' + file_type
response = self.session.get(image_url, stream=True)
# check whether can download.
if response.status_code != 200:
raise PixivApiException('Download {} fail, {}.'.format(image_url, response.status_code))
with open(file_name, 'wb') as f:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
'''
login your acount.
'''
def login(self):
response = self.session.get(LOGIN_URL, params=LOGIN_PARAM)
parser = BeautifulSoup(response.text, 'html.parser')
post_key = parser.select('[name=post_key]')[0]['value']
# prevent to fast.
time.sleep(0.5)
LOGIN_POST_DATA.update({'pixiv_id' : self.pixiv_id,
'password' : <PASSWORD>,
'post_key' : post_key,})
self.session.post(LOGIN_POST_URL, data=LOGIN_POST_DATA)
# use r18 rank to check whether success login.
check_login_url = 'https://www.pixiv.net/ranking.php?mode=daily_r18&content=illust'
response = self.session.get(check_login_url)
if response.status_code != 200:
raise PixivApiException('Login fail, {}.'.format(response.status_code))
'''
Input:
page : which page that you want to fetch.
Output:
list of image link {'url' : image_url, 'id' : image_id}.
'''
def get_follow(self, page=1):
assert page>0, 'pages must > 0.'
target_url = 'https://www.pixiv.net/bookmark_new_illust.php?p={}'
imagePool = []
response = self.session.get(target_url.format(page))
parser = BeautifulSoup(response.text, 'html.parser')
for block in parser.select('#js-mount-point-latest-following'):
data = json.loads(block['data-items'])
for image_item in data:
imagePool.append( { 'url' : image_item['url'].replace('\\',''), 'id' : image_item['illustId'] })
return imagePool
'''
Input:
author_id : author's pixiv id.
page : which page your want to fetch.
Output:
image link list {'url' : image_url, 'id' : image_id}.
'''
def get_author_images(self, author_id, page=1):
assert page>0, 'page must > 0.'
target_url = 'https://www.pixiv.net/member_illust.php?id={}&type=all&p={}'
response = self.session.get(target_url.format(author_id, page))
# check whether author exits.
if response.status_code != 200:
raise PixivApiException('Author id {} doesn\'t exist, {}.'.format(author_id, response.status_code))
parser = BeautifulSoup(response.text, 'html.parser')
imagePool = []
for item in parser.select('._layout-thumbnail'):
imagePool.append( { 'url' : item.img['data-src'], 'id' : item.img['data-id'] })
return imagePool
'''
Input:
images : numbers of image you want to crawl.
Output:
list content rank male image's link and author's link.
[ {'url': image_link, 'author': author_link, 'id' : image_id} ]
'''
def get_rank(self, page=1, male=True, daily=False, r18=False):
mode = None
# decide whether use daily.
if daily:
mode = 'daily_r18' if r18 else 'daily'
else:
mode = 'male' if male else 'female'
if r18:
mode += '_r18'
target_url = 'https://www.pixiv.net/ranking.php?mode={}&p={}&format=json'.format(mode, page)
response = self.session.get(target_url)
# check whether can get page.
if response.status_code != 200:
raise PixivApiException('Get rank {} fail, {}.'.format(target_url, response.status_code))
imagePool = []
pixiv_json = response.json()
if pixiv_json.get('error', None) is None:
for item in pixiv_json['contents']:
imagePool.append({'url' : item['url'], 'author_id' : item['user_id'], 'id' : item['illust_id']})
return imagePool
def __del__(self):
self.session.close()
def close(self):
self.session.close()
|
StarcoderdataPython
|
333912
|
import os
import sys
_PWEG_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__))).replace('\\', '/')
sys.path.insert(0, '%s/thirdparty_packages' % _PWEG_ROOT) # to include QtPy package
from qtpy.QtWidgets import QApplication
class PluginBase(object):
def __init__(self):
pass
def _connect_to_app(self, plugin_name, app_call_js_op, debug, info, warning, error, is_modal_dialog):
self.plugin_name = plugin_name
self.app_functions = {
'debug': debug,
'info': info,
'warning': warning,
'error': error,
'call_js_op': app_call_js_op,
}
self.is_modal_dialog = is_modal_dialog
def debug(self, msg):
self.app_functions['debug'](msg)
def info(self, msg):
self.app_functions['info'](msg)
def warning(self, msg):
self.app_functions['warning'](msg)
def error(self, msg):
self.app_functions['error'](msg)
def critical(self, msg):
self.app_functions['critical'](msg)
def call_plugin_js_op(self, plugin_js_function_name, op_data):
plugin_op = 'Plugin|%s|%s' % (self.plugin_name, plugin_js_function_name)
self.app_functions['call_js_op'](plugin_op, op_data)
def process_events(self):
if self.is_modal_dialog:
QApplication.instance().processEvents()
|
StarcoderdataPython
|
1955896
|
import numpy as np
from node import Node
def main():
nodes=[]
num_pwr_cycles = 10
t_on_sunlight = 0.5 # t_s (or t_on) when nodes in sleep mode under sunlight
t_off_sunlight = 0.5 # t_off when nodes under sunlight
t_on_shadow = 0.1 # t_s (or t_on) when nodes in sleep mode in shadow
t_off_shadow = 2 # t_off when nodes in shadow
nodes_in_shadow = 0.2
for i in np.arange(num_pwr_cycles):
if np.random.uniform() < nodes_in_shadow:
nodes.append(Node(num_pwr_cycles, t_on_shadow, t_off_shadow))
else:
nodes.append(Node(num_pwr_cycles, t_on_sunlight, t_off_sunlight))
print(time_span(nodes))
# for node in nodes:
# print(node)
def time_span(nodes):
span=0
sorted_nodes = sorted(nodes, key=lambda node:node.wake_up_time)
num_nodes = len(sorted_nodes)
print("Number of nodes: ", num_nodes)
# removing completely overlapping nodes
idx = 0
while idx < num_nodes-1:
if (sorted_nodes[idx].wake_up_time+sorted_nodes[idx].on_time) > \
sorted_nodes[idx+1].wake_up_time+(sorted_nodes[idx+1].on_time):
del sorted_nodes[idx+1] # do not advance the counter because you delete a node
num_nodes-=1 # reduce the total number of nodes
# print("Remaining number of nodes: ", num_nodes)
else:
idx+=1
# calculate the total time span
for i in np.arange(num_nodes-1):
dif = sorted_nodes[i+1].wake_up_time - sorted_nodes[i].wake_up_time
if dif > sorted_nodes[i].on_time:
span+=sorted_nodes[i].on_time
else:
span+=dif
span+=(sorted_nodes[-1].on_time) # add the on-time of the last node
return span
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
202404
|
<reponame>Syntle/PythonBot
import discord
class Embeds:
def cooldown(h, m, s):
description = None
h = round(h)
m = round(m)
s = round(s)
if int(h) is 0 and int(m) is 0:
description = f'⏱️ Please wait {s} seconds before trying this command again!'
elif int(h) is 0 and int(m) is not 0:
description = f'⏱️ Please wait {m} minutes & {s} seconds before trying this command again!'
else:
description = f'⏱️ Please wait {h} hours, {m} minutes & {s} seconds before trying this command again!'\
embed = discord.Embed(
description=description,
color=discord.Colour.red()
)
return embed
def failure(error):
embed = discord.Embed(
description=error,
color=discord.Colour.red()
)
return embed
|
StarcoderdataPython
|
1809457
|
import os
os.chdir('./data/')
for data_file in os.listdir():
with open(data_file) as f:
lines=f.readlines()
header =lines[0]
if not "v_{y}" in header:
with open(data_file, "r+") as f:
for line in lines[1:]:
f.write(line)
input("go to hell headers!!!")
|
StarcoderdataPython
|
5116135
|
<filename>dashboard/current.py
import pandas as pd
import numpy as np
import plotly.graph_objects as go
import dash_core_components as dcc
EVEN_COLOR = "white"
ODD_COLOR = "aliceblue"
def plot_current(data, sort, ascending):
df = data.get_dataset("countries_total")
df_pivot = pd.pivot_table(
df, index=["country", "updated"], columns="record", values="total"
)
df_pivot.reset_index(inplace=True)
df_pivot.sort_values(by=sort, ascending=ascending, inplace=True)
df_pivot.rename(columns=dict(updated="last update"), inplace=True)
header = [
f"<b>{c}</b>"
for c in [
"Country",
"Last Update",
"Confirmed",
"Active",
"Recovered",
"Deaths",
]
]
cells = [
df_pivot["country"],
df_pivot["last update"].dt.strftime("%d.%m.%Y %H:%M"),
df_pivot["confirmed"],
df_pivot["active"],
df_pivot["recovered"],
df_pivot["deaths"],
]
fill_color = [
EVEN_COLOR if x % 2 else ODD_COLOR for x in np.arange(df_pivot.shape[0])
]
table = go.Table(
header=dict(values=header),
cells=dict(values=cells, fill_color=[fill_color * 6]),
)
fig = dict(data=[table])
return dcc.Graph(figure=fig)
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.