hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a5e4666915212b8f6b0b15dc2449a686ce496e42 | 5,633 | py | Python | stackdriver/restapi.py | MarkMarine/stackdriver-client-python | 7e5e5806d02fcf4b8633d19adbce6d64f3082083 | [
"Apache-2.0"
]
| null | null | null | stackdriver/restapi.py | MarkMarine/stackdriver-client-python | 7e5e5806d02fcf4b8633d19adbce6d64f3082083 | [
"Apache-2.0"
]
| null | null | null | stackdriver/restapi.py | MarkMarine/stackdriver-client-python | 7e5e5806d02fcf4b8633d19adbce6d64f3082083 | [
"Apache-2.0"
]
| null | null | null | """
restapi - base for calling rest resources
Stackdriver Public API, Copyright Stackdriver 2014
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import requests
import copy
import types
import json
import logging
logger = logging.getLogger(__name__)
def _wrap_transport_decorator(transport_func, wrapper, userdata):
def inner(*args, **kwargs):
return wrapper(transport_func, userdata=userdata, func_args=args, func_kwargs=kwargs)
return inner
def transport_func(func):
""" Decorates each of the transport functions that can get wrapped by a transport_controller """
func._is_transport_func = True
return func
class RestApi(object):
def __init__(self, entrypoint_uri, version=None, apikey=None, username=None, password=None, useragent=None, transport_controller=None, transport_userdata=None):
"""
Base class for accessing REST services
:param entrypoint_path: The http or https uri to the api
:param version: version of the api we support
:param apikey: the stackdriver apikey to use for authentication
:param username: username for basic auth - this is here for completeness but for the stackdriver apis auth should be done using the apikey
:param password: password for basic auth - this is here for completeness but for the stackdriver apis auth should be done using the apikey
:param transport_controller: if defined run this function before each network call
:param transport_userdata: data to send to the transport_controller
"""
# always end with a slash
entrypoint_uri = entrypoint_uri.strip()
if entrypoint_uri[-1] != '/':
entrypoint_uri += '/'
self._entrypoint_uri = entrypoint_uri
self._apikey = apikey
self._username = username
self._password = password
self._version = version
self._useragent = useragent
if transport_controller:
self._decorate_transport_funcs(transport_controller, transport_userdata)
def _decorate_transport_funcs(self, controller, userdata):
""" decorate all methods that have an attribute of _is_transport_func set to True
skip any methods that start with an underscore (_)
SEE @transport_func decorator
"""
for method_name in dir(self):
if method_name.startswith('_'):
continue
method = getattr(self, method_name, None)
if isinstance(method, types.MethodType):
setattr(self, method_name, _wrap_transport_decorator(method, controller, userdata))
def _merge_headers(self, extra, is_post=False):
headers = {}
if extra is not None:
headers = copy.copy(extra)
if self._apikey:
headers['x-stackdriver-apikey'] = self._apikey
headers['x-stackdriver-version'] = self._version
if is_post:
headers['accept'] = 'application/json, text/plain, */*'
headers['content-type'] = 'application/json'
if self._useragent:
headers['user-agent'] = self._useragent
return headers
def _gen_full_endpoint(self, endpoint_path):
if endpoint_path.startswith('/'):
endpoint_path = endpoint_path[1:]
return '%s%s' % (self._entrypoint_uri, endpoint_path)
@transport_func
def get(self, endpoint, params=None, headers=None):
headers = self._merge_headers(headers)
uri = self._gen_full_endpoint(endpoint)
logger.debug('GET %s', uri, extra={'params': params})
r = requests.get(uri, params=params, headers=headers)
r.raise_for_status()
return r.json()
@transport_func
def post(self, endpoint, data=None, headers=None):
headers = self._merge_headers(headers, is_post=True)
uri = self._gen_full_endpoint(endpoint)
logger.debug('POST %s', uri, extra={'data': data})
r = requests.post(uri, data=json.dumps(data), headers=headers)
r.raise_for_status()
return r.json()
@transport_func
def put(self, endpoint, data=None, headers=None):
headers = self._merge_headers(headers, is_post=True)
uri = self._gen_full_endpoint(endpoint)
logger.debug('PUT %s', uri, extra={'data': data})
r = requests.put(uri, data=json.dumps(data), headers=headers)
r.raise_for_status()
return r.json()
@transport_func
def delete(self, endpoint, headers=None):
headers = self._merge_headers(headers, is_post=True)
uri = self._gen_full_endpoint(endpoint)
logger.debug('DELETE %s', uri)
r = requests.delete(uri, headers=headers)
r.raise_for_status()
return r.json()
@property
def api_version(self):
return self._version
@property
def entrypoint(self):
return self._entrypoint_uri
| 35.427673 | 164 | 0.680987 | 4,275 | 0.758921 | 0 | 0 | 1,516 | 0.269128 | 0 | 0 | 2,098 | 0.372448 |
a5e7acf2b322f72151a720e8d6b6a7577bf377de | 13,896 | py | Python | ventana_perceptron.py | musicbiker/ANNT | 301f1090925c8937f0fd3b4955ec68ff772022ce | [
"MIT"
]
| null | null | null | ventana_perceptron.py | musicbiker/ANNT | 301f1090925c8937f0fd3b4955ec68ff772022ce | [
"MIT"
]
| null | null | null | ventana_perceptron.py | musicbiker/ANNT | 301f1090925c8937f0fd3b4955ec68ff772022ce | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 11 15:05:41 2019
@author: jrodriguez119
"""
import tkinter as tk
from tkinter import ttk
import crearcapas
import perceptron_multicapa
from threading import Thread
import sys
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
from matplotlib.figure import Figure
from tkinter import filedialog as fd
from tkinter import messagebox as mb
import menu
import sklearn
class Display(tk.Frame):
def __init__(self,parent=0):
tk.Frame.__init__(self,parent)
self.output = tk.Text(self, width=80, height=15)
self.output.pack(padx = 30, pady = 5,)
sys.stdout = self
self.pack()
def flush(self):
pass
def write(self, txt):
self.output.insert(tk.END,str(txt))
self.output.see("end")
self.update_idletasks()
#Función que genera la ventana de parámetros del Perceptron multicapa
def Ventana_perceptron(ventana_seleccion,X_train,Y_train,X_test,Y_test,ventana_inicio):
#Crear ventana
ventana_perceptron = tk.Toplevel(ventana_seleccion)
ventana_perceptron.geometry('725x600+500+200')
#Insertar menu
menu.menu(ventana_perceptron,ventana_inicio)
#Esconder ventana previa
ventana_seleccion.withdraw()
#Título
labeltitulo = ttk.Label(ventana_perceptron,text = "Parámetros necesarios para el Perceptrón",
foreground = "#054FAA",font=("Arial Bold", 15))
labeltitulo.pack(pady=10)
#Frame donde alojar los widget de entrada
lframe = ttk.Frame(ventana_perceptron)
lframe.pack()
#------------------------ entrada de datos ---------------------------------
#Tamaño de lote
tamlot = tk.IntVar()
lbtamlote = ttk.Label(lframe,text = "Tamaño lote: ",
foreground = "#054FAA",font=("Arial Bold", 12))
lbtamlote.grid(column=0, row=0 ,pady=5,sticky=tk.W)
etamlot = ttk.Entry(lframe,width=5, textvariable = tamlot)
etamlot.grid(column=1, row=0,pady=5,sticky=tk.E)
#Optimizador
opt =tk.StringVar()
lbopt = ttk.Label(lframe, text="Optimizador: ",
foreground = "#054FAA",font=("Arial Bold", 12))
lbopt.grid(column=0, row=1,pady=5,sticky=tk.W)
cbopt=ttk.Combobox(lframe,width=9,state="readonly",textvariable = opt)
cbopt["values"] = ["SGD", "RMSProp","Adam","Adagrad"]
cbopt.grid(column = 1 ,row = 1,pady=5,columnspan=2)
cbopt.current(0)
#Proporción de validación
pv = tk.DoubleVar()
pv.set(0.2)
lbpv = ttk.Label(lframe,text = "Proporción de Validación :",
foreground = "#054FAA",font=("Arial Bold", 12))
lbpv.grid(column=0, row=2 ,pady=5,sticky=tk.W)
epv = ttk.Entry(lframe,width=5, textvariable = pv)
epv.grid(column=1, row=2,pady=5,sticky=tk.E)
#Número de capas ocultas
nco = tk.IntVar()
lbnco = ttk.Label(lframe,text = "Número capas ocultas :",
foreground = "#054FAA",font=("Arial Bold", 12))
lbnco.grid(column=0, row=3 ,pady=5,sticky=tk.W)
enco = ttk.Entry(lframe,width=5, textvariable = nco)
enco.grid(column=1, row=3,pady=5,sticky=tk.E)
#Función Loss
fl =tk.StringVar()
lbfl = ttk.Label(lframe, text="Función Loss: ",
foreground = "#054FAA",font=("Arial Bold", 12))
lbfl.grid(column=0, row=4,pady=5,sticky=tk.W)
cbfl=ttk.Combobox(lframe,width=21,state="readonly",textvariable = fl)
cbfl["values"] = ["kullback_leibler_divergence","mean_squared_error", "categorical_hinge",
"categorical_crossentropy","binary_crossentropy","poisson","cosine_proximity"]
cbfl.grid(column = 1 ,row = 4,pady=5,columnspan=2,sticky=tk.E)
cbfl.current(3)
#Método de parada
labeltitulo1 = ttk.Label(ventana_perceptron,text = "Método de parada",
foreground = "#054FAA",font=("Arial Bold", 15))
labeltitulo1.pack(pady=10)
lframe1 = ttk.Frame(ventana_perceptron)
lframe1.pack()
#Tipo de parada
#Parada por número de iteraciones
mp=tk.IntVar()
bat1= ttk.Radiobutton(lframe1, value=0,variable=mp)
bat1.grid(column=0, row=0)
nui=tk.IntVar()
lbnui = ttk.Label(lframe1, text="Número de iteraciones: ",
foreground = "#054FAA",font=("Arial Bold", 12))
lbnui.grid(column=1, row=0,pady=5,sticky=tk.W)
enui = ttk.Entry(lframe1,width=5, textvariable = nui)
enui.grid(column=2, row=0,pady=5,sticky=tk.E)
#Parada por control de un parámetro
bat2 = ttk.Radiobutton(lframe1, value=1,variable=mp)
bat2.grid(column=0, row=1)
lbparada = ttk.Label(lframe1, text="Parada temprana: ",
foreground = "#054FAA",font=("Arial Bold", 12))
lbparada.grid(column = 1, row = 1,sticky=tk.W )
#Parámetro a controlar
lbcon = ttk.Label(lframe1, text=" Parámetro a controlar: ",
foreground = "#054FAA",font=("Arial Bold", 12))
lbcon.grid(column = 1, row = 2,pady=5,sticky=tk.W )
con =tk.StringVar()
cbcon=ttk.Combobox(lframe1,width=9,state="readonly",textvariable = con)
cbcon["values"] = ["loss","val_loss", "acc","val_acc"]
cbcon.grid(column = 2 ,row = 2,pady=5,sticky=tk.E)
cbcon.current(0)
#Delta mínima de evolución
delt =tk.DoubleVar()
delt.set(0.001)
lbdelt = ttk.Label(lframe1, text=" Delta min: ",
foreground = "#054FAA",font=("Arial Bold", 12))
lbdelt.grid(column=1, row=3,pady=5,sticky=tk.W)
edelt = ttk.Entry(lframe1,width=5, textvariable = delt)
edelt.grid(column=2, row=3,pady=5,sticky=tk.E)
#Paciencia para realizar la parada
pat =tk.IntVar()
pat.set(3)
lbpat = ttk.Label(lframe1, text=" Paciencia: ",
foreground = "#054FAA",font=("Arial Bold", 12))
lbpat.grid(column=1, row=4,pady=5,sticky=tk.W)
epat = ttk.Entry(lframe1,width=5, textvariable = pat)
epat.grid(column=2, row=4,pady=5,sticky=tk.E)
#Función que abre una ventana externa y nos permite crear nuestro modelo editando las capas ocultas
def crearmodelo():
global NO,AC,BA,DR,numero_capas
numero_capas = int(nco.get())
NO,AC,BA,DR = crearcapas.capas(numero_capas, ventana_perceptron)
btnmodelo = ttk.Button(ventana_perceptron, text = "Crear modelo",style='my.TButton', command=crearmodelo)
btnmodelo.pack(pady=50)
lframe2 = ttk.Frame(ventana_perceptron)
lframe2.pack(side= "bottom")
def entrenar():
lote = tamlot.get()
optimizador = opt.get()
prop_val = pv.get()
numero_capas_ocultas = int(nco.get())
loss = fl.get()
parada = mp.get()
iteraciones = nui.get()
control = con.get()
delta = delt.get()
paciencia = pat.get()
#Excepciones
if lote == 0:
mb.showerror("Error", "Variable tamaño del lote = 0 ")
return
if prop_val == 0:
mb.showerror("Error", "El algoritmo necesita una parte del conjunto de entrenamiento para su validación ")
return
if prop_val > 1:
mb.showerror("Error", "Proporción de validación no válida ")
return
if numero_capas_ocultas == 0:
mb.showerror("Error", "Variable numero de capas ocultas = 0 ")
return
if parada == 0 and iteraciones==0:
mb.showerror("Error", "No se ha indicado el número de iteraciones requeridas ")
return
if parada == 1 and delta==0.0:
mb.showerror("Error", "No se ha indicado el mínimo delta para controlar la evolución ")
return
while True:
try:
NO
break
except NameError:
mb.showerror("Error", "No se ha creado el modelo, haga click en crear modelo ")
return
for i in range(numero_capas_ocultas) :
if NO[i].get()==0:
mb.showerror("Error", "No es posible tener capas con 0 neuronas, asegurese de haber creado el modelo correctamente ")
return
for i in range(numero_capas_ocultas) :
if DR[i].get() > 1:
mb.showerror("Error", "Valor Dropout no válido ")
return
#Ventana donde aparece el proceso y los botones para guardar el modelo
ventana_display = tk.Toplevel(ventana_perceptron)
labeltitulo1 = ttk.Label(ventana_display,text = "Entrenamiento",
foreground = "#054FAA",font=("Arial Bold", 15))
labeltitulo1.pack(pady=5)
#Funcion que representa la evolución del entrenamiento
def plot():
ventana_plot = tk.Toplevel(ventana_perceptron)
ventana_plot.geometry('900x600')
f = Figure(figsize = (5,5),dpi = 100)
a = f.add_subplot(121)
b = f.add_subplot(122)
#Resumimos e imprimimos esos datos
a.plot(entrenamiento.history['acc'])
a.plot(entrenamiento.history['val_acc'])
a.set_title('Precisión del modelo')
a.set_ylabel('Precisión')
a.set_xlabel('Iteraciones')
a.legend(['Entrenamiento', 'Validación'], loc='upper left')
# summarize history for loss
b.plot(entrenamiento.history['loss'])
b.plot(entrenamiento.history['val_loss'])
b.set_title('Loss del modelo')
b.set_ylabel('Loss')
b.set_xlabel('Iteraciones')
b.legend(['Entrenamiento', 'Validación'], loc='upper left')
canvas1 = FigureCanvasTkAgg(f,ventana_plot)
canvas1.get_tk_widget().pack(side = tk.TOP,fill = tk.BOTH, expand = True)
toolbar = NavigationToolbar2Tk(canvas1,ventana_plot)
toolbar.update()
canvas1._tkcanvas.pack(side = tk.TOP,fill = tk.BOTH, expand = True)
def guardarcompl():
nombrearch=fd.asksaveasfilename(initialdir = "/",title = "Guardar como",defaultextension = 'h5')
model.save(nombrearch)
mb.showinfo("Información", "Los datos fueron guardados.")
def guardarpesos():
nombrearch=fd.asksaveasfilename(initialdir = "/",title = "Guardar como",defaultextension = 'h5')
model.save_weights(nombrearch)
mb.showinfo("Información", "Los datos fueron guardados.")
def atras():
ventana_display.destroy()
framebotones = ttk.Frame(ventana_display)
framebotones.pack(side= "bottom")
btnguardarcompl = ttk.Button(framebotones, text="Modelo completo",
command=guardarcompl,style='my.TButton',width = 15)
btnguardarcompl.grid(row = 0, column = 0, padx = 10, pady = 5,sticky=tk.W)
btnguardarpesos = ttk.Button(framebotones, text="Pesos",
command=guardarpesos,style='my.TButton',width = 15)
btnguardarpesos.grid(row = 0, column = 1, padx = 10, pady = 5,sticky=tk.W)
btnplot = ttk.Button(framebotones, text="Plot",
command=plot,style='my.TButton',width = 15)
btnplot.grid(row = 1, column = 0, padx = 10, pady = 5,sticky=tk.W)
btnatras = ttk.Button(framebotones, text="Atrás",
command=atras,style='my.TButton',width = 15)
btnatras.grid(row = 1, column = 1, padx = 10, pady = 5,sticky=tk.W)
def pantalla():
global Display
Display(ventana_display)
def run():
global model, entrenamiento
while True:
try:
model, entrenamiento = perceptron_multicapa.Perceptron_multicapa(ventana_perceptron,ventana_display,X_train,Y_train,X_test,Y_test,
lote,optimizador,prop_val,numero_capas_ocultas,loss,
parada,iteraciones,control,delta,paciencia,NO,AC,BA,DR)
break
except tk.TclError:
mb.showerror("Error desconocido", "Por favor vuelva a intentarlo ")
ventana_display.destroy()
return
except RuntimeError:
mb.showerror("Error desconocido", "Por favor reinicie la aplicación ")
ventana_display.destroy()
return
except sklearn.metrics.classification.UndefinedMetricWarning:
mb.showerror("Error ", "Algo salió mal con los datos, reinicie la aplicación y vuelva a intentarlo ")
ventana_display.destroy()
return
t1=Thread(target=pantalla)
t2=Thread(target=run)
t1.start()
t2.start()
btntrain = ttk.Button(lframe2, text = "Entrenar",style='my.TButton', command=entrenar)
btntrain.grid(row = 0, column = 1, padx = 20, pady=15)
def atras():
ventana_perceptron.destroy()
ventana_seleccion.deiconify()
btnatras = ttk.Button(lframe2, text = "Atras",style='my.TButton', command=atras)
btnatras.grid(row=0,column=0, padx = 20, pady=15)
| 40.750733 | 151 | 0.573258 | 441 | 0.031633 | 0 | 0 | 0 | 0 | 0 | 0 | 3,049 | 0.218707 |
a5e93ad8745db2b82f7503c050a79a9fd3c06143 | 419 | py | Python | tests/search/test_search_onedrive.py | theodoriss/Office365-REST-Python-Client | 3bd7a62dadcd3f0a0aceeaff7584fff3fd44886e | [
"MIT"
]
| 544 | 2016-08-04T17:10:16.000Z | 2022-03-31T07:17:20.000Z | tests/search/test_search_onedrive.py | theodoriss/Office365-REST-Python-Client | 3bd7a62dadcd3f0a0aceeaff7584fff3fd44886e | [
"MIT"
]
| 438 | 2016-10-11T12:24:22.000Z | 2022-03-31T19:30:35.000Z | tests/search/test_search_onedrive.py | theodoriss/Office365-REST-Python-Client | 3bd7a62dadcd3f0a0aceeaff7584fff3fd44886e | [
"MIT"
]
| 202 | 2016-08-22T19:29:40.000Z | 2022-03-30T20:26:15.000Z | from tests.graph_case import GraphTestCase
class TestSearchOneDrive(GraphTestCase):
@classmethod
def setUpClass(cls):
super(TestSearchOneDrive, cls).setUpClass()
@classmethod
def tearDownClass(cls):
pass
def test1_search_files(self):
result = self.client.search.query("Guide.docx", entity_types=["driveItem"]).execute_query()
self.assertIsNotNone(result.value)
| 23.277778 | 99 | 0.706444 | 372 | 0.887828 | 0 | 0 | 142 | 0.338902 | 0 | 0 | 23 | 0.054893 |
a5ea06e0a07718613f62378639588110228f7035 | 728 | py | Python | secu/tests/user_post_test.py | wancy86/tornado-seed | bea842f4ba6b23dda53ec9ae9f1349e1d2b54fd3 | [
"MIT"
]
| null | null | null | secu/tests/user_post_test.py | wancy86/tornado-seed | bea842f4ba6b23dda53ec9ae9f1349e1d2b54fd3 | [
"MIT"
]
| null | null | null | secu/tests/user_post_test.py | wancy86/tornado-seed | bea842f4ba6b23dda53ec9ae9f1349e1d2b54fd3 | [
"MIT"
]
| null | null | null | import requests
from ..base.test import BaseTestCase, AuthorizedTestCase
import uuid
import common
class T(AuthorizedTestCase):
@property
def path(self):
return '/service/secu/user'
def setUp(self):
super().setUp()
self.data = {
'username': 'myao',
'email': '[email protected]',
'mobile': '18665369920',
'pwd': '123456',
'fullname': '姚贯伟',
'roles': ''
}
def test_by_correct_info(self):
response = requests.post(self.url, json=self.data)
self.assertNotEqual(response.text, '', '返回值为空!')
resp = response.json()
self.assertEqual('000', resp['code']) | 26.962963 | 59 | 0.542582 | 641 | 0.859249 | 0 | 0 | 67 | 0.089812 | 0 | 0 | 159 | 0.213137 |
a5ea9efb676efad8c603777a80d368a57ffbe7ba | 2,204 | py | Python | arbeitsplan/management/commands/meldungConsistent.py | hkarl/svpb | 29aab0065ff69c7c4d52812508167514d635cab9 | [
"Apache-2.0"
]
| 3 | 2015-02-20T14:53:17.000Z | 2020-12-01T19:29:14.000Z | arbeitsplan/management/commands/meldungConsistent.py | hkarl/svpb | 29aab0065ff69c7c4d52812508167514d635cab9 | [
"Apache-2.0"
]
| 67 | 2015-01-06T19:48:59.000Z | 2022-03-20T16:56:22.000Z | arbeitsplan/management/commands/meldungConsistent.py | hkarl/svpb | 29aab0065ff69c7c4d52812508167514d635cab9 | [
"Apache-2.0"
]
| 2 | 2015-12-07T09:21:10.000Z | 2015-12-30T18:36:53.000Z | """
Define a command that should be run from a crontab.
This one should check consistency of Meldungen:
at most one Meldung per Aufgabe, per User.
"""
from django.core.management.base import BaseCommand, CommandError
from django.core.management import call_command
from django.utils import translation
from django.conf import settings
from django.core.mail import send_mail
import arbeitsplan.models as models
import datetime
import pprint
from collections import defaultdict
class Command(BaseCommand):
"""Go through all Users and Aufgaben.
Check whether at most one Meldung exist.
"""
args = ""
help = "Check Meldung consistency, send out warning emails"
emailTemplate = "upcomingJob"
def handle(self, *args, **options):
# set the locale right, to get the dates represented correctly
translation.activate(settings.LANGUAGE_CODE)
self.stdout.write('meldungConsistent: Checking on ' +
str(datetime.date.today()))
inconsistent_users = defaultdict(list)
for u in models.User.objects.all():
for a in models.Aufgabe.objects.all():
mqs = models.Meldung.objects.filter(melder=u, aufgabe=a)
c = mqs.count()
if c > 1:
inconsistent_users[u].append(a)
print inconsistent_users
if inconsistent_users:
subject = "SVPB: PROBLEM with Meldungenkonsistenz"
body = pprint.pformat(inconsistent_users)
else:
subject = "SVPB: Meldungen all good"
body = "rechoice"
send_mail(subject,
body,
"[email protected]",
['[email protected]'],
fail_silently=False)
## for kontakt, liste in kontaktKontext.iteritems():
## if kontakt.email:
## mail.send(
## [kontakt.email],
## template="upcomingJob-Kontakt",
## context={'liste': liste,
## 'verantwortlich': kontakt,
## },
## )
translation.deactivate()
| 29.783784 | 72 | 0.587114 | 1,720 | 0.780399 | 0 | 0 | 0 | 0 | 0 | 0 | 828 | 0.375681 |
a5ece29e8598f0696a2017f159b9027891f278ea | 1,215 | py | Python | app/migrations/0013_auto_20200907_1056.py | mapoetto/group2_CTFLab | 5b492ce46875ea37a57701686897bd9613e2dd13 | [
"MIT"
]
| 1 | 2021-10-15T14:37:33.000Z | 2021-10-15T14:37:33.000Z | app/migrations/0013_auto_20200907_1056.py | mapoetto/group2_CTFLab | 5b492ce46875ea37a57701686897bd9613e2dd13 | [
"MIT"
]
| null | null | null | app/migrations/0013_auto_20200907_1056.py | mapoetto/group2_CTFLab | 5b492ce46875ea37a57701686897bd9613e2dd13 | [
"MIT"
]
| null | null | null | # Generated by Django 2.1.15 on 2020-09-07 10:56
import app.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0012_user_id_ctfd'),
]
operations = [
migrations.CreateModel(
name='CTFd_configs',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('url_API', models.CharField(max_length=220)),
('token_API', models.CharField(max_length=64)),
('port_API', models.IntegerField(default=8000, validators=[app.models.validate_flag])),
],
),
migrations.AddField(
model_name='lab',
name='categoria',
field=models.CharField(default='', max_length=120),
),
migrations.AddField(
model_name='lab',
name='flag',
field=models.CharField(default='', max_length=220),
),
migrations.AddField(
model_name='lab',
name='valore_flag',
field=models.IntegerField(default=10, validators=[app.models.validate_flag]),
),
]
| 31.153846 | 114 | 0.568724 | 1,103 | 0.907819 | 0 | 0 | 0 | 0 | 0 | 0 | 173 | 0.142387 |
a5ef7047358651b5620e1896751f01c69ce61941 | 6,404 | py | Python | products_and_services_client/models/monthly_price.py | pitzer42/opbk-br-quickstart | b3f86b2e5f82a6090aaefb563614e174a452383c | [
"MIT"
]
| 2 | 2021-02-07T23:58:36.000Z | 2021-02-08T01:03:25.000Z | products_and_services_client/models/monthly_price.py | pitzer42/opbk-br-quickstart | b3f86b2e5f82a6090aaefb563614e174a452383c | [
"MIT"
]
| null | null | null | products_and_services_client/models/monthly_price.py | pitzer42/opbk-br-quickstart | b3f86b2e5f82a6090aaefb563614e174a452383c | [
"MIT"
]
| null | null | null | # coding: utf-8
"""
API's OpenData do Open Banking Brasil
As API's descritas neste documento são referentes as API's da fase OpenData do Open Banking Brasil. # noqa: E501
OpenAPI spec version: 1.0.0-rc5.2
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class MonthlyPrice(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'interval': 'PriceIntervals',
'monthly_fee': 'str',
'currency': 'Currency',
'customers': 'Customer'
}
attribute_map = {
'interval': 'interval',
'monthly_fee': 'monthlyFee',
'currency': 'currency',
'customers': 'customers'
}
def __init__(self, interval=None, monthly_fee=None, currency=None, customers=None): # noqa: E501
"""MonthlyPrice - a model defined in Swagger""" # noqa: E501
self._interval = None
self._monthly_fee = None
self._currency = None
self._customers = None
self.discriminator = None
self.interval = interval
self.monthly_fee = monthly_fee
self.currency = currency
self.customers = customers
@property
def interval(self):
"""Gets the interval of this MonthlyPrice. # noqa: E501
:return: The interval of this MonthlyPrice. # noqa: E501
:rtype: PriceIntervals
"""
return self._interval
@interval.setter
def interval(self, interval):
"""Sets the interval of this MonthlyPrice.
:param interval: The interval of this MonthlyPrice. # noqa: E501
:type: PriceIntervals
"""
if interval is None:
raise ValueError("Invalid value for `interval`, must not be `None`") # noqa: E501
self._interval = interval
@property
def monthly_fee(self):
"""Gets the monthly_fee of this MonthlyPrice. # noqa: E501
Valor da mediana de cada faixa relativa ao serviço ofertado, informado no período, conforme Res nº 32 BCB, 2020. p.ex. ''45.00'' (representa um valor monetário. p.ex: 1547368.92. Este valor, considerando que a moeda seja BRL, significa R$ 1.547.368,92. O único separador presente deve ser o ''.'' (ponto) para indicar a casa decimal. Não deve haver separador de milhar) # noqa: E501
:return: The monthly_fee of this MonthlyPrice. # noqa: E501
:rtype: str
"""
return self._monthly_fee
@monthly_fee.setter
def monthly_fee(self, monthly_fee):
"""Sets the monthly_fee of this MonthlyPrice.
Valor da mediana de cada faixa relativa ao serviço ofertado, informado no período, conforme Res nº 32 BCB, 2020. p.ex. ''45.00'' (representa um valor monetário. p.ex: 1547368.92. Este valor, considerando que a moeda seja BRL, significa R$ 1.547.368,92. O único separador presente deve ser o ''.'' (ponto) para indicar a casa decimal. Não deve haver separador de milhar) # noqa: E501
:param monthly_fee: The monthly_fee of this MonthlyPrice. # noqa: E501
:type: str
"""
if monthly_fee is None:
raise ValueError("Invalid value for `monthly_fee`, must not be `None`") # noqa: E501
self._monthly_fee = monthly_fee
@property
def currency(self):
"""Gets the currency of this MonthlyPrice. # noqa: E501
:return: The currency of this MonthlyPrice. # noqa: E501
:rtype: Currency
"""
return self._currency
@currency.setter
def currency(self, currency):
"""Sets the currency of this MonthlyPrice.
:param currency: The currency of this MonthlyPrice. # noqa: E501
:type: Currency
"""
if currency is None:
raise ValueError("Invalid value for `currency`, must not be `None`") # noqa: E501
self._currency = currency
@property
def customers(self):
"""Gets the customers of this MonthlyPrice. # noqa: E501
:return: The customers of this MonthlyPrice. # noqa: E501
:rtype: Customer
"""
return self._customers
@customers.setter
def customers(self, customers):
"""Sets the customers of this MonthlyPrice.
:param customers: The customers of this MonthlyPrice. # noqa: E501
:type: Customer
"""
if customers is None:
raise ValueError("Invalid value for `customers`, must not be `None`") # noqa: E501
self._customers = customers
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(MonthlyPrice, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MonthlyPrice):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 32.841026 | 392 | 0.600562 | 6,037 | 0.940782 | 0 | 0 | 3,276 | 0.510519 | 0 | 0 | 3,542 | 0.551971 |
a5f29eaf88394cd5b49f0dfad22427d8e4654b7c | 481 | py | Python | sqlalchemy_i18n/expressions.py | EdwardBetts/sqlalchemy-i18n | 9cf515be75be6e319416579b32528e1a096c03cf | [
"BSD-3-Clause"
]
| 31 | 2015-02-26T11:08:43.000Z | 2022-03-18T11:53:30.000Z | sqlalchemy_i18n/expressions.py | EdwardBetts/sqlalchemy-i18n | 9cf515be75be6e319416579b32528e1a096c03cf | [
"BSD-3-Clause"
]
| 13 | 2015-01-05T09:40:59.000Z | 2022-01-18T23:57:28.000Z | sqlalchemy_i18n/expressions.py | EdwardBetts/sqlalchemy-i18n | 9cf515be75be6e319416579b32528e1a096c03cf | [
"BSD-3-Clause"
]
| 13 | 2015-01-08T08:24:15.000Z | 2022-02-05T01:59:41.000Z | import sqlalchemy as sa
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql.functions import GenericFunction
class current_locale(GenericFunction):
type = sa.types.String()
@compiles(current_locale)
def compile_current_locale(element, compiler, **kw):
# Lazy import get_locale so that it can be overridden
from sqlalchemy_utils.i18n import get_locale
return '%s' % compiler.process(
sa.bindparam('current_locale', str(get_locale()))
)
| 26.722222 | 57 | 0.756757 | 67 | 0.139293 | 0 | 0 | 286 | 0.594595 | 0 | 0 | 73 | 0.151767 |
a5f2ce8d23f3ea07c4d73928966352c760c23c7e | 47 | py | Python | scripts/portal/OutElfKingRoom.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
]
| 54 | 2019-04-16T23:24:48.000Z | 2021-12-18T11:41:50.000Z | scripts/portal/OutElfKingRoom.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
]
| 3 | 2019-05-19T15:19:41.000Z | 2020-04-27T16:29:16.000Z | scripts/portal/OutElfKingRoom.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
]
| 49 | 2020-11-25T23:29:16.000Z | 2022-03-26T16:20:24.000Z | # 101050010
sm.warp(101050000, 7)
sm.dispose()
| 11.75 | 21 | 0.723404 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 0.234043 |
a5f31b512ae3b988c292e1211f6d15cfb61624fc | 839 | py | Python | suppy/simulator/atomics/divergence_atomic.py | bmaris98/suppy | 8450c6d25ffa492cdedfbbb4c111d22e7f2788a7 | [
"BSD-3-Clause"
]
| null | null | null | suppy/simulator/atomics/divergence_atomic.py | bmaris98/suppy | 8450c6d25ffa492cdedfbbb4c111d22e7f2788a7 | [
"BSD-3-Clause"
]
| null | null | null | suppy/simulator/atomics/divergence_atomic.py | bmaris98/suppy | 8450c6d25ffa492cdedfbbb4c111d22e7f2788a7 | [
"BSD-3-Clause"
]
| null | null | null | from suppy.utils.stats_constants import DIVERGENCE, TYPE
from typing import Any, Dict
from suppy.simulator.atomics.atomic import Atomic
class DivergenceAtomic(Atomic):
def __init__(self, uid: str, seh, name: str):
Atomic.__init__(self, uid, seh, name, 0, 0)
def get_stats(self) -> Dict[str, Any]:
stats = Atomic.get_stats(self)
stats[TYPE] = DIVERGENCE
return stats
def _all_output_clear(self) -> bool:
for output_stream in self._output_streams:
if not output_stream.has_input:
return True
return False
def _do_process(self) -> None:
resource = self._loaded_input[0]
for output_stream in self._output_streams:
if not output_stream.has_input:
output_stream.try_load(resource)
return | 31.074074 | 56 | 0.647199 | 701 | 0.835518 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
a5f71728f2d90dfa8913ba58e0714da23be50b98 | 2,673 | py | Python | train/compute/python/pytorch_benchmark.py | sazanovd/param | 595b81ceb64d8d106d05ab67d2c73e8465d06921 | [
"MIT"
]
| null | null | null | train/compute/python/pytorch_benchmark.py | sazanovd/param | 595b81ceb64d8d106d05ab67d2c73e8465d06921 | [
"MIT"
]
| null | null | null | train/compute/python/pytorch_benchmark.py | sazanovd/param | 595b81ceb64d8d106d05ab67d2c73e8465d06921 | [
"MIT"
]
| null | null | null | import logging
from .lib.init_helper import init_logging, load_modules
# Initialize logging format before loading all other modules
logger = init_logging(logging.INFO)
import argparse
from .lib import pytorch as lib_pytorch
from .lib.config import BenchmarkConfig
from .lib.pytorch.benchmark import (
make_default_benchmark,
ExecutionPass,
get_benchmark_options,
)
from .workloads import pytorch as workloads_pytorch
def main():
parser = argparse.ArgumentParser(description="Microbenchmarks")
parser.add_argument(
"-c", "--config", type=str, required=True, help="The benchmark config file."
)
parser.add_argument(
"-w", "--warmup", type=int, default=5, help="Number of warm up iterations."
)
parser.add_argument(
"-i", "--iteration", type=int, default=1, help="Number of benchmark iterations."
)
parser.add_argument(
"-b", "--backward", action="store_true", help="Include backward pass."
)
parser.add_argument(
"-d", "--device", type=str, default="cpu", help="Target device for benchmark."
)
parser.add_argument(
"-o",
"--output-prefix",
type=str,
default="benchmark_result",
help="File name prefix to write benchmark results.",
)
parser.add_argument(
"-a", "--append", action="store_true", help="Append to output file, rather than overwrite."
)
parser.add_argument(
"-v", "--verbose", action="store_true", help="Increase log output verbosity."
)
args = parser.parse_args()
if args.verbose:
init_logging(logging.DEBUG)
# Load PyTorch implementations for data generator and operators.
load_modules(lib_pytorch)
# Load PyTorch operator workloads.
load_modules(workloads_pytorch)
run_options = get_benchmark_options()
run_options["warmup"] = args.warmup
run_options["iteration"] = args.iteration
run_options["device"] = args.device
if args.backward:
run_options["pass_type"] = ExecutionPass.BACKWARD
logger.info("Pass: FORWARD and BACKWARD")
else:
run_options["pass_type"] = ExecutionPass.FORWARD
logger.info("Pass: FORWARD")
out_file_name = f"{args.output_prefix}.json"
write_option = "a" if args.append else "w"
with open(out_file_name, write_option) as out_file:
run_options["out_stream"] = out_file
bench_config = BenchmarkConfig(run_options)
bench_config.load_json_file(args.config)
benchmark = make_default_benchmark(bench_config)
benchmark.run()
logger.info(f"Log written to {out_file_name}")
if __name__ == "__main__":
main()
| 29.7 | 99 | 0.674523 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 811 | 0.303404 |
a5f7a4ecfa05bf78a585981771c76de8e093cf7a | 5,180 | py | Python | database/BuildDatabase.py | chanzuckerberg/scoreboard | 7ebf783819d0f5b4dd54092201f709b8644c85a4 | [
"MIT"
]
| 8 | 2017-11-28T22:36:37.000Z | 2020-10-20T06:46:19.000Z | database/BuildDatabase.py | chanzuckerberg/scoreboard | 7ebf783819d0f5b4dd54092201f709b8644c85a4 | [
"MIT"
]
| 25 | 2017-12-27T19:05:41.000Z | 2022-03-15T18:35:22.000Z | database/BuildDatabase.py | chanzuckerberg/scoreboard | 7ebf783819d0f5b4dd54092201f709b8644c85a4 | [
"MIT"
]
| 1 | 2018-04-23T11:16:41.000Z | 2018-04-23T11:16:41.000Z | from sqlalchemy import create_engine
from sqlalchemy import Column, Integer, Boolean, String, DateTime, ForeignKey
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql import func
import datetime
import os
import json
database = {
'pg_user': os.environ['SCOREBOARD_PG_USERNAME'],
'pg_pass': os.environ['SCOREBOARD_PG_PASSWORD'],
'pg_host': os.environ.get('SCOREBOARD_PG_HOST', 'localhost'),
'pg_port': os.environ.get('SCOREBOARD_PG_PORT', 5432),
'pg_database': os.environ.get('SCOREBOARD_PG_DATABASE', 'scoreboard')
}
# Build database
engine = create_engine(
"postgresql://{pg_user}:{pg_pass}@{pg_host}:{pg_port}/{pg_database}".format(**database))
Base = declarative_base()
class User(Base):
__tablename__ = 'users'
__table_args__ = {'extend_existing': True}
id = Column(Integer, primary_key=True, nullable=False)
github_username = Column(String, nullable=False)
name = Column(String)
email = Column(String)
is_admin = Column(Boolean, nullable=False)
create_date = Column(DateTime, nullable=False, server_default=func.now())
class Challenge(Base):
__tablename__ = 'challenges'
__table_args__ = {'extend_existing': True}
id = Column(Integer, primary_key=True, nullable=False)
name = Column(String, nullable=False)
description = Column(String)
docker_container = Column(String, nullable=False)
image = Column(String)
data_path = Column(String)
data_size = Column(String)
color = Column(String)
about = Column(String)
example_file = Column(String)
submission_header = Column(JSONB)
submission_separator = Column(String, default=",")
scores = Column(JSONB)
subscores = Column(JSONB)
start_date = Column(DateTime, nullable=False, server_default=func.now())
end_date = Column(DateTime)
is_open = Column(Boolean, nullable=False, default=True)
create_date = Column(DateTime, nullable=False, server_default=func.now())
class Dataset(Base):
__tablename__ = 'datasets'
__table_args__ = {'extend_existing': True}
id = Column(Integer, primary_key=True, nullable=False)
name = Column(String, nullable=False)
description = Column(String)
tree = Column(JSONB)
challenge_id = Column(Integer, ForeignKey("challenges.id", onupdate="CASCADE", ondelete="CASCADE"), nullable=False)
create_date = Column(DateTime, nullable=False, server_default=func.now())
class Submission(Base):
__tablename__ = 'submissions'
__table_args__ = {'extend_existing': True}
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey("users.id", onupdate="CASCADE", ondelete="CASCADE"), nullable=False)
challenge_id = Column(Integer, ForeignKey("challenges.id", onupdate="CASCADE", ondelete="CASCADE"), nullable=False)
name = Column(String, nullable=False)
repository = Column(String, nullable=False)
is_private = Column(Boolean, nullable=False)
institution = Column(String)
publication = Column(String)
is_accepted = Column(Boolean, nullable=False)
create_date = Column(DateTime, nullable=False, server_default=func.now())
class Result(Base):
__tablename__ = 'results'
__table_args__ = {'extend_existing': True}
id = Column(Integer, primary_key=True)
submission_id = Column(Integer, ForeignKey("submissions.id", onupdate="CASCADE", ondelete="CASCADE"), nullable=False)
results_path = Column(String, nullable=False)
score_data = Column(JSONB)
is_current = Column(Boolean, nullable=False)
submission_date = Column(DateTime, nullable=False, server_default=func.now())
create_date = Column(DateTime, nullable=False, server_default=func.now())
class AdminEmailSettings(Base):
__tablename__ = 'email_settings'
__table_args__ = {'extend_existing': True}
id = Column(Integer, primary_key=True, nullable=False)
email_provider = Column(String, nullable=False)
email_address = Column(String, nullable=False)
email_pass = Column(String, nullable=False)
Base.metadata.create_all(engine)
# Load Data
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
with open("initialize.json") as fh:
initialize_data = json.load(fh)
for challenge in initialize_data["challenges"]:
datasets = challenge.pop('datasets', [])
new_challenge = Challenge(**challenge)
session.add(new_challenge)
session.flush()
session.refresh(new_challenge)
challenge_id = new_challenge.id
for dataset in datasets:
dataset["challenge_id"] = challenge_id
new_dataset = Dataset(**dataset)
session.add(new_dataset)
for admin in initialize_data["admins"]:
new_user = User(github_username=admin, is_admin=True)
session.add(new_user)
email_settings = initialize_data["email_settings"]
settings = AdminEmailSettings(email_provider=email_settings["email_provider"],
email_address= email_settings["admin_email"],
email_pass=email_settings["admin_pass"])
session.add(settings)
session.commit()
| 37 | 121 | 0.72471 | 3,287 | 0.634556 | 0 | 0 | 0 | 0 | 0 | 0 | 697 | 0.134556 |
a5f7a9890ad3c832bc0b2c81569f369cddac6df1 | 6,767 | py | Python | docs/tutorials_source/generate_images_tutorial.py | XavierXinweiWang/UnrealCV_Ubuntu | 4fdbb32f2096a4c95c0912d018ff4adb4801fb8b | [
"MIT"
]
| null | null | null | docs/tutorials_source/generate_images_tutorial.py | XavierXinweiWang/UnrealCV_Ubuntu | 4fdbb32f2096a4c95c0912d018ff4adb4801fb8b | [
"MIT"
]
| null | null | null | docs/tutorials_source/generate_images_tutorial.py | XavierXinweiWang/UnrealCV_Ubuntu | 4fdbb32f2096a4c95c0912d018ff4adb4801fb8b | [
"MIT"
]
| null | null | null | # Need at least 20 # characters
"""
===============
Generate Images
===============
This ipython notebook demonstrates how to generate an image dataset with rich
ground truth from a virtual environment.
"""
####################
import time; print(time.strftime("The last update of this file: %Y-%m-%d %H:%M:%S", time.gmtime()))
####################
# Load some python libraries
# The dependencies for this tutorials are
# PIL, Numpy, Matplotlib
from __future__ import division, absolute_import, print_function
import os, sys, time, re, json
import numpy as np
import matplotlib.pyplot as plt
imread = plt.imread
def imread8(im_file):
''' Read image as a 8-bit numpy array '''
im = np.asarray(Image.open(im_file))
return im
def read_png(res):
import StringIO, PIL.Image
img = PIL.Image.open(StringIO.StringIO(res))
return np.asarray(img)
def read_npy(res):
import StringIO
return np.load(StringIO.StringIO(res))
###############################
# Connect to the game
# ===================
# Load unrealcv python client, do :code:`pip install unrealcv` first.
from unrealcv import client
client.connect()
if not client.isconnected():
print('UnrealCV server is not running. Run the game downloaded from http://unrealcv.github.io first.')
sys.exit(-1)
###############################
# Make sure the connection works well
res = client.request('vget /unrealcv/status')
# The image resolution and port is configured in the config file.
print(res)
##############################
# Load a camera trajectory
# ========================
traj_file = './camera_traj.json' # Relative to this python script
import json; camera_trajectory = json.load(open(traj_file))
# We will show how to record a camera trajectory in another tutorial
##############################
# Render an image
# ===============
idx = 1
loc, rot = camera_trajectory[idx]
# Set position of the first camera
client.request('vset /camera/0/location {x} {y} {z}'.format(**loc))
client.request('vset /camera/0/rotation {pitch} {yaw} {roll}'.format(**rot))
# Get image
res = client.request('vget /camera/0/lit lit.png')
print('The image is saved to %s' % res)
# It is also possible to get the png directly without saving to a file
res = client.request('vget /camera/0/lit png')
im = read_png(res)
print(im.shape)
# Visualize the image we just captured
plt.imshow(im)
##############################
# Ground truth generation
# =======================
# Generate ground truth from this virtual scene
res = client.request('vget /camera/0/object_mask png')
object_mask = read_png(res)
res = client.request('vget /camera/0/normal png')
normal = read_png(res)
# Visualize the captured ground truth
plt.imshow(object_mask)
plt.figure()
plt.imshow(normal)
###############################
# Depth is retrieved as a numpy array
# For UnrealCV < v0.3.8, the depth is saved as an exr file, but this has two issues. 1. Exr is not well supported in Linux 2. It depends on OpenCV to read exr file, which is hard to install
res = client.request('vget /camera/0/depth npy')
depth = read_npy(res)
plt.imshow(depth)
##############################
# Get object information
# ======================
# List all the objects of this virtual scene
scene_objects = client.request('vget /objects').split(' ')
print('Number of objects in this scene:', len(scene_objects))
# TODO: replace this with a better implementation
class Color(object):
''' A utility class to parse color value '''
regexp = re.compile('\(R=(.*),G=(.*),B=(.*),A=(.*)\)')
def __init__(self, color_str):
self.color_str = color_str
match = self.regexp.match(color_str)
(self.R, self.G, self.B, self.A) = [int(match.group(i)) for i in range(1,5)]
def __repr__(self):
return self.color_str
id2color = {} # Map from object id to the labeling color
for obj_id in scene_objects:
color = Color(client.request('vget /object/%s/color' % obj_id))
id2color[obj_id] = color
# print('%s : %s' % (obj_id, str(color)))
#############################
# Parse the segmentation mask
def match_color(object_mask, target_color, tolerance=3):
match_region = np.ones(object_mask.shape[0:2], dtype=bool)
for c in range(3): # r,g,b
min_val = target_color[c] - tolerance
max_val = target_color[c] + tolerance
channel_region = (object_mask[:,:,c] >= min_val) & (object_mask[:,:,c] <= max_val)
match_region &= channel_region
if match_region.sum() != 0:
return match_region
else:
return None
id2mask = {}
for obj_id in scene_objects:
color = id2color[obj_id]
mask = match_color(object_mask, [color.R, color.G, color.B], tolerance = 3)
if mask is not None:
id2mask[obj_id] = mask
# This may take a while
# TODO: Need to find a faster implementation for this
##############################
# Print statistics of this virtual scene and this image
# =====================================================
# Load information of this scene
with open('object_category.json') as f:
id2category = json.load(f)
categories = set(id2category.values())
# Show statistics of this frame
image_objects = id2mask.keys()
print('Number of objects in this image:', len(image_objects))
print('%20s : %s' % ('Category name', 'Object name'))
for category in categories:
objects = [v for v in image_objects if id2category.get(v) == category]
if len(objects) > 6: # Trim the list if too long
objects[6:] = ['...']
if len(objects) != 0:
print('%20s : %s' % (category, objects))
##############################
# Show the annotation color of some objects
ids = ['SM_Couch_1seat_5', 'SM_Vase_17', 'SM_Shelving_6', 'SM_Plant_8']
# for obj_id in ids:
obj_id = ids[0]
color = id2color[obj_id]
print('%s : %s' % (obj_id, str(color)))
# color_block = np.zeros((100,100, 3)) + np.array([color.R, color.G, color.B]) / 255.0
# plt.figure(); plt.imshow(color_block); plt.title(obj_id)
##############################
# Plot only one object
mask = id2mask['SM_Plant_8']
plt.figure(); plt.imshow(mask)
##############################
# Show all sofas in this image
couch_instance = [v for v in image_objects if id2category.get(v) == 'Couch']
mask = sum(id2mask[v] for v in couch_instance)
plt.figure(); plt.imshow(mask)
##############################
# Change the annotation color, fixed in v0.3.9
# You can use this to make objects you don't care the same color
client.request('vset /object/SM_Couch_1seat_5/color 255 0 0') # Change to pure red
client.request('vget /object/SM_Couch_1seat_5/color')
res = client.request('vget /camera/0/object_mask png')
object_mask = read_png(res)
plt.imshow(object_mask)
##############################
# Clean up resources
# ==================
client.disconnect()
| 33.171569 | 189 | 0.632186 | 383 | 0.056598 | 0 | 0 | 0 | 0 | 0 | 0 | 3,574 | 0.528151 |
a5f94be0d65ac72db51e3348feb82e21a6da2f05 | 458 | py | Python | blogs/migrations/0003_auto_20161006_1654.py | fenglb/mysite | a5b68ebdb23f4d5c55c5490ffebaa97780a5d6ab | [
"CC0-1.0"
]
| null | null | null | blogs/migrations/0003_auto_20161006_1654.py | fenglb/mysite | a5b68ebdb23f4d5c55c5490ffebaa97780a5d6ab | [
"CC0-1.0"
]
| null | null | null | blogs/migrations/0003_auto_20161006_1654.py | fenglb/mysite | a5b68ebdb23f4d5c55c5490ffebaa97780a5d6ab | [
"CC0-1.0"
]
| null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-10-06 08:54
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blogs', '0002_auto_20161006_1652'),
]
operations = [
migrations.AlterField(
model_name='blogpost',
name='md5',
field=models.CharField(blank=True, max_length=32),
),
]
| 21.809524 | 62 | 0.617904 | 302 | 0.659389 | 0 | 0 | 0 | 0 | 0 | 0 | 116 | 0.253275 |
a5fa4568c42ee2ffd61fc83bd38a5e610ae98af1 | 1,660 | py | Python | examples/ieffileupdate.py | duncan-r/SHIP | 2c4c22c77f9c18ea545d3bce70a36aebbd18256a | [
"MIT"
]
| 6 | 2016-04-10T17:32:44.000Z | 2022-03-13T18:41:21.000Z | examples/ieffileupdate.py | duncan-r/SHIP | 2c4c22c77f9c18ea545d3bce70a36aebbd18256a | [
"MIT"
]
| 19 | 2017-06-23T08:21:53.000Z | 2017-07-26T08:23:03.000Z | examples/ieffileupdate.py | duncan-r/SHIP | 2c4c22c77f9c18ea545d3bce70a36aebbd18256a | [
"MIT"
]
| 6 | 2016-10-26T16:04:38.000Z | 2019-04-25T23:55:06.000Z | """
Summary:
Example use of the fmp package to update file paths in an .ief file
and save the ief file under a new name.
Author:
Duncan Runnacles
Created:
01 Apr 2016
Copyright:
Duncan Runnacles 2016
TODO:
Updates:
"""
import os
from ship.utils.fileloaders import fileloader as fl
# Contains functions for updating file paths and reading/writing files
from ship.utils import filetools
def iefExample():
"""update some key file paths in an ief file.
Updates the .dat file, .tcf file, and results file paths referenced by
the ief file and save it under a new ief file name.
"""
# Load the tuflow model with a tcf file
ief_file = r'C:\path\to\an\isis\ieffile.ief'
loader = fl.FileLoader()
ief = loader.loadFile(ief_file)
# Get the referenced fmp .dat and .tcf files
dat_path = ief.getValue('Datafile')
tcf_path = ief.getValue('2DFile')
results_path = ief.getValue('Results')
# Update the dat, results and tcf file names
root, ext = os.path.splitext(dat_path)
new_dat = root + '_Updated' + ext
root, ext = os.path.splitext(results_path)
new_results = root + '_Updated' + ext
root, ext = os.path.splitext(tcf_path)
new_tcf = root + '_Updated' + ext
ief.setValue('Datafile', new_dat)
ief.setValue('Results', new_results)
ief.setValue('2DFile', new_tcf)
# Update the filename and write contents to disk
ief.path_holder.filename += '_Updated'
ief_path = ief.path_holder.absolutePath()
ief.write(ief_path)
if __name__ == '__main__':
iefExample()
| 26.349206 | 75 | 0.654217 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 870 | 0.524096 |
a5fbd11dbc9a0e80007cdb92a40b5c8dd7191ce7 | 8,387 | py | Python | packages/w3af/w3af/core/data/url/HTTPRequest.py | ZooAtmosphereGroup/HelloPackages | 0ccffd33bf927b13d28c8f715ed35004c33465d9 | [
"Apache-2.0"
]
| null | null | null | packages/w3af/w3af/core/data/url/HTTPRequest.py | ZooAtmosphereGroup/HelloPackages | 0ccffd33bf927b13d28c8f715ed35004c33465d9 | [
"Apache-2.0"
]
| null | null | null | packages/w3af/w3af/core/data/url/HTTPRequest.py | ZooAtmosphereGroup/HelloPackages | 0ccffd33bf927b13d28c8f715ed35004c33465d9 | [
"Apache-2.0"
]
| null | null | null | """
HTTPRequest.py
Copyright 2010 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import copy
import socket
import urllib2
from w3af.core.data.dc.headers import Headers
from w3af.core.data.dc.utils.token import DataToken
from w3af.core.data.parsers.doc.url import URL
from w3af.core.data.request.request_mixin import RequestMixIn
from w3af.core.data.url.constants import MAX_HTTP_RETRIES
class HTTPRequest(RequestMixIn, urllib2.Request):
def __init__(self, url,
data=None,
headers=None,
origin_req_host=None,
unverifiable=False,
cookies=True,
session=None,
cache=False,
method=None,
error_handling=True,
retries=MAX_HTTP_RETRIES,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
new_connection=False,
follow_redirects=False,
use_basic_auth=True,
use_proxy=True,
debugging_id=None,
binary_response=False):
"""
This is a simple wrapper around a urllib2 request object which helps
with some common tasks like serialization, cache, etc.
:param method: None means choose the default (POST if data is not None)
:param data: The post_data as a string
"""
headers = headers or Headers()
#
# Save some information for later access in an easier way
#
self.url_object = url
self.cookies = cookies
self.session = session
self.get_from_cache = cache
self.error_handling = error_handling
self.retries_left = retries
self.timeout = timeout
self.new_connection = new_connection
self.follow_redirects = follow_redirects
self.use_basic_auth = use_basic_auth
self.use_proxy = use_proxy
self.debugging_id = debugging_id
self._binary_response = binary_response
self.method = method
if self.method is None:
self.method = 'POST' if data else 'GET'
if isinstance(headers, Headers):
headers.tokens_to_value()
headers = dict(headers)
# Call the base class constructor
urllib2.Request.__init__(self, url.url_encode(), data,
headers, origin_req_host, unverifiable)
RequestMixIn.__init__(self)
def __eq__(self, other):
return (self.get_method() == other.get_method() and
self.get_uri() == other.get_uri() and
self.get_headers() == other.get_headers() and
self.get_data() == other.get_data() and
self.get_timeout() == other.get_timeout())
def with_binary_response(self):
return self._binary_response
def set_data(self, data):
self.data = data
def add_header(self, key, val):
"""
Override mostly to avoid having header values of DataToken type
:param key: The header name as a string
:param val: The header value (a string of a DataToken)
:return: None
"""
if isinstance(val, DataToken):
val = val.get_value()
self.headers[key.capitalize()] = val
def get_method(self):
return self.method
def set_method(self, method):
self.method = method
def get_netloc(self):
uri = self.get_uri()
return '%s:%s' % (uri.get_domain(), uri.get_port())
def get_domain(self):
return self.get_uri().get_domain()
def get_uri(self):
return self.url_object
def set_uri(self, url_object):
self.url_object = url_object
def get_headers(self):
headers = Headers(self.headers.items())
headers.update(self.unredirected_hdrs.items())
return headers
def set_headers(self, headers):
self.headers = dict(headers)
def get_timeout(self):
return self.timeout
def set_timeout(self, timeout):
self.timeout = timeout
def set_new_connection(self, new_connection):
self.new_connection = new_connection
def get_new_connection(self):
return self.new_connection
def to_dict(self):
serializable_dict = {}
sdict = serializable_dict
sdict['method'] = self.get_method()
sdict['uri'] = self.get_uri().url_string
sdict['headers'] = dict(self.get_headers())
sdict['data'] = self.get_data()
sdict['cookies'] = self.cookies
sdict['session'] = self.session
sdict['cache'] = self.get_from_cache
sdict['timeout'] = None if self.timeout is socket._GLOBAL_DEFAULT_TIMEOUT else self.timeout
sdict['new_connection'] = self.new_connection
sdict['follow_redirects'] = self.follow_redirects
sdict['use_basic_auth'] = self.use_basic_auth
sdict['use_proxy'] = self.use_proxy
sdict['debugging_id'] = self.debugging_id
sdict['binary_response'] = self._binary_response
return serializable_dict
@classmethod
def from_fuzzable_request(cls, fuzzable_request):
"""
:param fuzzable_request: The FuzzableRequest
:return: An instance of HTTPRequest with all the information contained
in the FuzzableRequest passed as parameter
"""
host = fuzzable_request.get_url().get_domain()
data = fuzzable_request.get_data()
headers = fuzzable_request.get_headers()
headers.tokens_to_value()
return cls(fuzzable_request.get_uri(), data=data, headers=headers,
origin_req_host=host)
@classmethod
def from_dict(cls, unserialized_dict):
"""
* msgpack is MUCH faster than cPickle,
* msgpack can't serialize python objects,
* I have to create a dict representation of HTTPRequest to serialize it,
* and a from_dict to have the object back
:param unserialized_dict: A dict just as returned by to_dict()
"""
udict = unserialized_dict
method, uri = udict['method'], udict['uri']
headers, data = udict['headers'], udict['data']
cookies = udict['cookies']
session = udict['session']
cache = udict['cache']
timeout = socket.getdefaulttimeout() if udict['timeout'] is None else udict['timeout']
new_connection = udict['new_connection']
follow_redirects = udict['follow_redirects']
use_basic_auth = udict['use_basic_auth']
use_proxy = udict['use_proxy']
debugging_id = udict['debugging_id']
binary_response = udict['binary_response']
headers_inst = Headers(headers.items())
url = URL(uri)
return cls(url, data=data, headers=headers_inst,
cookies=cookies, session=session,
cache=cache, method=method,
timeout=timeout, new_connection=new_connection,
follow_redirects=follow_redirects,
use_basic_auth=use_basic_auth, use_proxy=use_proxy,
debugging_id=debugging_id, binary_response=binary_response)
def copy(self):
return copy.deepcopy(self)
def __repr__(self):
fmt = '<HTTPRequest "%s" (cookies:%s, cache:%s, did:%s, timeout:%.2f, new_connection:%s)>'
timeout = 3 if self.timeout is socket._GLOBAL_DEFAULT_TIMEOUT else self.timeout
return fmt % (self.url_object.url_string,
self.cookies,
self.get_from_cache,
self.debugging_id,
timeout,
self.new_connection)
| 34.514403 | 99 | 0.623465 | 7,368 | 0.878502 | 0 | 0 | 2,113 | 0.251938 | 0 | 0 | 2,253 | 0.26863 |
a5ff1935416c4a799dc3631e3b180db7559793bf | 817 | py | Python | moldesign/_notebooks/nbscripts/gen_toc.py | Autodesk/molecular-design-toolkit | 5f45a47fea21d3603899a6366cb163024f0e2ec4 | [
"Apache-2.0"
]
| 147 | 2016-07-15T18:53:55.000Z | 2022-01-30T04:36:39.000Z | moldesign/_notebooks/nbscripts/gen_toc.py | cherishyli/molecular-design-toolkit | 5f45a47fea21d3603899a6366cb163024f0e2ec4 | [
"Apache-2.0"
]
| 151 | 2016-07-15T21:35:11.000Z | 2019-10-10T08:57:29.000Z | moldesign/_notebooks/nbscripts/gen_toc.py | cherishyli/molecular-design-toolkit | 5f45a47fea21d3603899a6366cb163024f0e2ec4 | [
"Apache-2.0"
]
| 33 | 2016-08-02T00:04:51.000Z | 2021-09-02T10:05:04.000Z | #!/usr/bin/env python
from __future__ import print_function
import sys, os
from nbformat import v4
def parse_line(line):
if not line.startswith('#'):
return None
ilevel = 0
for char in line:
if char == '#': ilevel += 1
else: break
name = line[ilevel:].strip()
return ilevel, name
if __name__ == '__main__':
with open(sys.argv[1], 'r') as nbfile:
nb = v4.reads(nbfile.read())
print('Contents\n=======\n---')
for cell in nb.cells:
if cell['cell_type'] == 'markdown':
for line in cell['source'].splitlines():
header = parse_line(line)
if header is None: continue
ilevel, name = header
print(' '*(ilevel-1) + ' - [%s](#%s)'%(name, name.replace(' ','-')))
| 19.452381 | 85 | 0.532436 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 117 | 0.143207 |
570013af797984af8b152ded17b276b76de011a7 | 1,102 | py | Python | server/gaiaApi/gaia/serializer.py | JawaBaliIBM/Gaia | 12572330c637cec559f8f122ecc2bd3af3dcf64e | [
"Apache-2.0"
]
| 6 | 2021-07-31T10:52:36.000Z | 2022-03-19T17:10:55.000Z | server/gaiaApi/gaia/serializer.py | JawaBaliIBM/Gaia | 12572330c637cec559f8f122ecc2bd3af3dcf64e | [
"Apache-2.0"
]
| 3 | 2021-07-24T08:17:53.000Z | 2021-08-10T14:41:46.000Z | server/gaiaApi/gaia/serializer.py | JawaBaliIBM/Gaia | 12572330c637cec559f8f122ecc2bd3af3dcf64e | [
"Apache-2.0"
]
| 1 | 2021-07-31T10:15:45.000Z | 2021-07-31T10:15:45.000Z | from rest_framework import serializers
class ArticleSerializer(serializers.Serializer):
date = serializers.CharField(allow_blank=False)
sentiment = serializers.CharField(allow_blank=False)
title = serializers.CharField(allow_blank=False)
description = serializers.CharField(allow_blank=False)
article_id = serializers.CharField(allow_blank=False)
url = serializers.URLField(allow_blank=False)
brand = serializers.CharField(allow_blank=False)
class ArticlesSerializer(serializers.Serializer):
prev = serializers.URLField()
next = serializers.URLField()
results = serializers.ListField(child=ArticleSerializer())
class ArticlesWithIndexSerializer(serializers.Serializer):
total = serializers.IntegerField()
prev = serializers.URLField()
next = serializers.URLField()
results = serializers.ListField(child=ArticleSerializer())
class BrandSerializer(serializers.Serializer):
name = serializers.CharField(allow_blank=False)
score = serializers.FloatField(max_value=1.0, min_value=0)
sentiment = serializers.CharField(allow_blank=False) | 42.384615 | 62 | 0.784029 | 1,056 | 0.958258 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
5701301232a4492ca4d517aea40acc01301fe2f8 | 329 | py | Python | aoc2020/d01_report_repair/methods.py | sflis/aoc2020 | ef6ee81c18b6ec8332b150638b3d78772fe8327a | [
"Unlicense"
]
| null | null | null | aoc2020/d01_report_repair/methods.py | sflis/aoc2020 | ef6ee81c18b6ec8332b150638b3d78772fe8327a | [
"Unlicense"
]
| null | null | null | aoc2020/d01_report_repair/methods.py | sflis/aoc2020 | ef6ee81c18b6ec8332b150638b3d78772fe8327a | [
"Unlicense"
]
| null | null | null | import numpy as np
def sum_pair_equals(values, val_eq):
values = np.array(values)
return np.where(values + values[:, None] == val_eq)[0]
def sum_triad_equals(values, val_eq):
values = np.array(values)
sum_ = values + values[:, None] + values[:, None, None]
return np.array(np.where(sum_ == val_eq))[:, 0]
| 25.307692 | 59 | 0.653495 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
570168b655cd4c5fe01f67c0408794d1cfd928aa | 2,306 | py | Python | tests/test_persona.py | holnburger/persine | cb26d1e275f7ed7e1048bc1e6b66b71386c3e602 | [
"MIT"
]
| 84 | 2020-12-20T20:39:19.000Z | 2022-02-02T01:01:12.000Z | tests/test_persona.py | holnburger/persine | cb26d1e275f7ed7e1048bc1e6b66b71386c3e602 | [
"MIT"
]
| 1 | 2020-12-25T01:07:09.000Z | 2020-12-25T04:05:19.000Z | tests/test_persona.py | holnburger/persine | cb26d1e275f7ed7e1048bc1e6b66b71386c3e602 | [
"MIT"
]
| 9 | 2020-12-23T03:10:35.000Z | 2021-09-08T14:44:18.000Z | import pytest
from persine import Persona
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from unittest.mock import Mock
@pytest.fixture
def engine():
def launch_chrome(user_data_dir):
options = Options()
options.add_argument("--headless")
return webdriver.Chrome(options=options)
eng = Mock()
eng.data_dir = "/tmp/data_dir"
eng.history_path = "/tmp/history.json"
eng.launch = launch_chrome
eng.run = lambda driver, action: { 'action': action }
return eng
def test_context(engine):
with Persona(engine=engine) as persona:
assert persona.driver is not None
assert persona.driver is None
def test_history(engine):
persona = Persona(engine=engine)
assert len(persona.history) == 0
persona.update_history(
{
"key": "test-key-1",
"url": "sample",
"action": "test:sample",
"recommendations": [{"number": 1}, {"number": 2}, {"number": 3}],
}
)
persona.update_history(
{
"key": "test-key-2",
"url": "sample2",
"action": "test:sample",
"recommendations": [{"number": 3}, {"number": 2}, {"number": 1}],
}
)
assert len(persona.history) == 2
assert len(persona.recommendations) == 6
def test_history_notes(engine):
persona = Persona(engine=engine)
assert len(persona.history) == 0
persona.update_history(
{
"key": "test-key-1",
"url": "sample",
"action": "test:sample",
"recommendations": [{"number": 1}, {"number": 2}, {"number": 3}],
},
{
"note_key": "note_value"
}
)
assert persona.history[-1]['note_key'] == 'note_value'
def test_run_notes(engine):
with Persona(engine=engine) as persona:
persona.run('http://jonathansoma.com', {'note_key': 'note_value'})
assert persona.history[-1]['action'] == 'http://jonathansoma.com'
assert persona.history[-1]['note_key'] == 'note_value'
def test_startup_shutdown(engine):
persona = Persona(engine=engine)
assert persona.driver is None
persona.launch()
assert persona.driver is not None
persona.quit()
assert persona.driver is None
| 26.204545 | 77 | 0.598439 | 0 | 0 | 0 | 0 | 388 | 0.168257 | 0 | 0 | 477 | 0.206852 |
57018df18d3cbc94d73679782950464b4f793c17 | 26,556 | py | Python | inference.py | QuPengfei/learnable-triangulation-pytorch | 861d9ccf8b06bd2f130697cd40b7ac57d7f7d9f2 | [
"MIT"
]
| null | null | null | inference.py | QuPengfei/learnable-triangulation-pytorch | 861d9ccf8b06bd2f130697cd40b7ac57d7f7d9f2 | [
"MIT"
]
| null | null | null | inference.py | QuPengfei/learnable-triangulation-pytorch | 861d9ccf8b06bd2f130697cd40b7ac57d7f7d9f2 | [
"MIT"
]
| null | null | null | import torch
import numpy as np
import cv2
import os
import h5py
from collections import defaultdict
from mvn.models.triangulation import RANSACTriangulationNet, AlgebraicTriangulationNet, VolumetricTriangulationNet
from mvn.models.loss import KeypointsMSELoss, KeypointsMSESmoothLoss, KeypointsMAELoss, KeypointsL2Loss, VolumetricCELoss
from mvn.utils import img, multiview, op, vis, misc, cfg
from mvn.utils.img import get_square_bbox, resize_image, crop_image, normalize_image, scale_bbox
from mvn.utils.multiview import Camera
from mvn.utils.multiview import project_3d_points_to_image_plane_without_distortion as project
from mvn.datasets import utils as dataset_utils
from mvn.utils.img import image_batch_to_torch
retval = {
'subject_names': ['S1', 'S5', 'S6', 'S7', 'S8', 'S9', 'S11'],
'camera_names': ['54138969', '55011271', '58860488', '60457274'],
'action_names': [
'Directions-1', 'Directions-2',
'Discussion-1', 'Discussion-2',
'Eating-1', 'Eating-2',
'Greeting-1', 'Greeting-2',
'Phoning-1', 'Phoning-2',
'Posing-1', 'Posing-2',
'Purchases-1', 'Purchases-2',
'Sitting-1', 'Sitting-2',
'SittingDown-1', 'SittingDown-2',
'Smoking-1', 'Smoking-2',
'TakingPhoto-1', 'TakingPhoto-2',
'Waiting-1', 'Waiting-2',
'Walking-1', 'Walking-2',
'WalkingDog-1', 'WalkingDog-2',
'WalkingTogether-1', 'WalkingTogether-2']
}
h5_file="data/human36m/extra/una-dinosauria-data/h36m/cameras.h5"
bbox_file="data/human36m/extra/bboxes-Human36M-GT.npy"
def square_the_bbox(bbox):
top, left, bottom, right = bbox
width = right - left
height = bottom - top
if height < width:
center = (top + bottom) * 0.5
top = int(round(center - width * 0.5))
bottom = top + width
else:
center = (left + right) * 0.5
left = int(round(center - height * 0.5))
right = left + height
return top, left, bottom, right
def fill_bbox(bb_file):
# Fill bounding boxes TLBR
bboxes = np.load(bb_file, allow_pickle=True).item()
for subject in bboxes.keys():
for action in bboxes[subject].keys():
for camera, bbox_array in bboxes[subject][action].items():
for frame_idx, bbox in enumerate(bbox_array):
bbox[:] = square_the_bbox(bbox)
return bboxes
def fill_bbox_subject_action(bb_file, subject, action):
# Fill bounding boxes TLBR
bboxes = np.load(bb_file, allow_pickle=True).item()
bboxes_subject_action = bboxes[subject][action]
for camera, bbox_array in bboxes_subject_action.items():
for frame_idx, bbox in enumerate(bbox_array):
bbox[:] = square_the_bbox(bbox)
return bboxes_subject_action
def get_bbox_subject_action(bboxes, idx):
bbox = {}
for (camera_idx, camera) in enumerate(retval['camera_names']):
bbox[camera] = bboxes[camera][idx]
return bbox
def fill_cameras(h5_cameras):
info = np.empty(
(len(retval['subject_names']), len(retval['camera_names'])),
dtype=[
('R', np.float32, (3,3)),
('t', np.float32, (3,1)),
('K', np.float32, (3,3)),
('dist', np.float32, 5)
]
)
cameras_params = h5py.File(h5_cameras, 'r')
# Fill retval['cameras']
for subject_idx, subject in enumerate(retval['subject_names']):
for camera_idx, camera in enumerate(retval['camera_names']):
assert len(cameras_params[subject.replace('S', 'subject')]) == 4
camera_params = cameras_params[subject.replace('S', 'subject')]['camera%d' % (camera_idx+1)]
camera_retval = info[subject_idx][camera_idx]
def camera_array_to_name(array):
return ''.join(chr(int(x[0])) for x in array)
assert camera_array_to_name(camera_params['Name']) == camera
camera_retval['R'] = np.array(camera_params['R']).T
camera_retval['t'] = -camera_retval['R'] @ camera_params['T']
camera_retval['K'] = 0
camera_retval['K'][:2, 2] = camera_params['c'][:, 0]
camera_retval['K'][0, 0] = camera_params['f'][0]
camera_retval['K'][1, 1] = camera_params['f'][1]
camera_retval['K'][2, 2] = 1.0
camera_retval['dist'][:2] = camera_params['k'][:2, 0]
camera_retval['dist'][2:4] = camera_params['p'][:, 0]
camera_retval['dist'][4] = camera_params['k'][2, 0]
return info
def fill_cameras_subject(h5_cameras,subject):
info = np.empty(
len(retval['camera_names']),
dtype=[
('R', np.float32, (3,3)),
('t', np.float32, (3,1)),
('K', np.float32, (3,3)),
('dist', np.float32, 5)
]
)
cameras = {}
subject_idx = retval['subject_names'].index(subject)
cameras_params = h5py.File(h5_cameras, 'r')
# Fill retval['cameras']
for camera_idx, camera in enumerate(retval['camera_names']):
assert len(cameras_params[subject.replace('S', 'subject')]) == 4
camera_params = cameras_params[subject.replace('S', 'subject')]['camera%d' % (camera_idx+1)]
camera_retval = info[camera_idx]
def camera_array_to_name(array):
return ''.join(chr(int(x[0])) for x in array)
assert camera_array_to_name(camera_params['Name']) == camera
camera_retval['R'] = np.array(camera_params['R']).T
camera_retval['t'] = -camera_retval['R'] @ camera_params['T']
camera_retval['K'] = 0
camera_retval['K'][:2, 2] = camera_params['c'][:, 0]
camera_retval['K'][0, 0] = camera_params['f'][0]
camera_retval['K'][1, 1] = camera_params['f'][1]
camera_retval['K'][2, 2] = 1.0
camera_retval['dist'][:2] = camera_params['k'][:2, 0]
camera_retval['dist'][2:4] = camera_params['p'][:, 0]
camera_retval['dist'][4] = camera_params['k'][2, 0]
cameras[camera] = camera_retval
return cameras
#retval['bboxes'] = fill_bbox(bbox_file)
#retval['cameras'] = fill_cameras(h5_file)
class Detector:
def __init__(self, config, device = "cuda:0"):
super().__init__()
self.model = {
"ransac": RANSACTriangulationNet,
"alg": AlgebraicTriangulationNet,
"vol": VolumetricTriangulationNet
}[config.model.name](config, device=device).to(device)
if config.model.init_weights:
state_dict = torch.load(config.model.checkpoint)
for key in list(state_dict.keys()):
new_key = key.replace("module.", "")
state_dict[new_key] = state_dict.pop(key)
state_dict = torch.load(config.model.checkpoint)
self.model.load_state_dict(state_dict, strict=True)
print("Successfully loaded pretrained weights for whole model")
def infer(self, batch, model_type, device, config):
"""
For a single image inference
"""
outputBatch = {}
inputBatch = {}
images_batch = []
image_batch = image_batch_to_torch(batch['images'])
image_batch = image_batch.to(device)
images_batch.append(image_batch)
images_batch = torch.stack(images_batch, dim=0)
#proj_matricies_batch = [torch.from_numpy(camera.projection).float().to(device) for camera in batch['cameras']]
proj_matricies_batch = torch.stack([torch.from_numpy(camera.projection) for camera in batch['cameras']], dim=0)
proj_matricies_batch = proj_matricies_batch.float().to(device)
proj_matricies_batchs = [] # shape (batch_size, n_views, 3, 4)
proj_matricies_batchs.append(proj_matricies_batch)
proj_matricies_batchs = torch.stack(proj_matricies_batchs,dim=0)
#print(proj_matricies_batchs,proj_matricies_batchs.shape,len(batch),images_batch.shape)
keypoints_2d_pred, cuboids_pred, base_points_pred, volumes_pred, coord_volumes_pred = None, None, None, None, None
if model_type == "alg" or model_type == "ransac":
keypoints_3d_pred, keypoints_2d_pred, heatmaps_pred, confidences_pred = self.model(images_batch, proj_matricies_batchs, batch)
elif model_type == "vol":
keypoints_3d_pred, heatmaps_pred, volumes_pred, confidences_pred, cuboids_pred, coord_volumes_pred, base_points_pred = self.model(images_batch, proj_matricies_batchs, batch)
outputBatch["keypoints_3d_pred"] = keypoints_3d_pred
outputBatch["heatmaps_pred"] = heatmaps_pred
outputBatch["volumes_pred"] = volumes_pred
outputBatch["confidences_pred"] = confidences_pred
outputBatch["cuboids_pred"] = confidences_pred
outputBatch["coord_volumes_pred"] = coord_volumes_pred
outputBatch["base_points_pred"] = base_points_pred
inputBatch["images_batch"] = images_batch
return outputBatch, inputBatch
def inferHuman36Data(self, batch, model_type, device, config, randomize_n_views,
min_n_views,
max_n_views):
"""
For batch inferences
"""
outputBatch = {}
inputBatch = {}
collatFunction = dataset_utils.make_collate_fn(randomize_n_views,
min_n_views,
max_n_views)
batch = collatFunction(batch)
images_batch, keypoints_3d_gt, keypoints_3d_validity_gt, proj_matricies_batch = dataset_utils.prepare_batch(batch, device, config)
#print(proj_matricies_batch,proj_matricies_batch.shape,len(batch),images_batch.shape)
keypoints_2d_pred, cuboids_pred, base_points_pred, volumes_pred, coord_volumes_pred = None, None, None, None, None
if model_type == "alg" or model_type == "ransac":
keypoints_3d_pred, keypoints_2d_pred, heatmaps_pred, confidences_pred = self.model(images_batch, proj_matricies_batch, batch)
elif model_type == "vol":
keypoints_3d_pred, heatmaps_pred, volumes_pred, confidences_pred, cuboids_pred, coord_volumes_pred, base_points_pred = self.model(images_batch, proj_matricies_batch, batch)
outputBatch["keypoints_3d_pred"] = keypoints_3d_pred
outputBatch["heatmaps_pred"] = heatmaps_pred
outputBatch["volumes_pred"] = volumes_pred
outputBatch["confidences_pred"] = confidences_pred
outputBatch["cuboids_pred"] = confidences_pred
outputBatch["coord_volumes_pred"] = coord_volumes_pred
outputBatch["base_points_pred"] = base_points_pred
inputBatch["images_batch"] = images_batch
inputBatch["proj_matricies_batch"] = proj_matricies_batch
return outputBatch, inputBatch
def viewSample(sample,idx=0):
camera_idx = 0
image = sample['images'][camera_idx]
camera = sample['cameras'][camera_idx]
subject = sample['subject'][camera_idx]
action = sample['action'][camera_idx]
display = image.copy()
keypoints_2d = project(camera.projection, sample['keypoints_3d'][:, :3])
for i,(x,y) in enumerate(keypoints_2d):
cv2.circle(display, (int(x), int(y)), 3, (0,0,255), -1)
file = f"./result/{subject}-{action}-{camera.name}-{idx}.png"
cv2.imwrite(file, display)
def viewHeatmaps(sample,idx,prediction,config):
# TODO get the visualization done
images_batch = []
for image_batch in sample['images']:
images_batch.append(image_batch)
heatmaps_vis = vis.visualize_heatmaps(
inputBatch["images_batch"], prediction["heatmaps_pred"],
kind=config.kind,
batch_index=0, size=5,
max_n_rows=10, max_n_cols=10)
heatmaps_vis = heatmaps_vis.transpose(2, 0, 1)
for i in range(0,4):
cv2.imwrite(f"./result/heatmaps_test_{idx}_{i}.png", heatmaps_vis[i,:,:])
def viewVideo(sample):
displays = []
# Project and draw keypoints on images
for camera_idx in range(len(sample['cameras'])): #camera_indexes_to_show:
# import ipdb; ipdb.set_trace()
display = sample['images'][camera_idx]
cv2.putText(display, f"Cam {camera_idx:02}", (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0))
displays.append(display)
# Fancy stacked images
for j, display in enumerate(displays):
if j == 0:
combined = display
else:
combined = np.concatenate((combined, display), axis=1)
return combined
def viewVideoResult(sample,idx, prediction,config,size=(384,384)):
displays = []
keypoints3d_pred = prediction['keypoints_3d_pred'].cpu()
keypoints_3d_pred = keypoints3d_pred[0,:, :3].detach().numpy()
# Project and draw keypoints on images
for camera_idx in range(len(sample['cameras'])): #camera_indexes_to_show:
camera = sample['cameras'][camera_idx]
keypoints_2d_pred = project(camera.projection, keypoints_3d_pred)
# import ipdb; ipdb.set_trace()
img = sample['images'][camera_idx]
pred_kind = config.pred_kind if hasattr(config, "pred_kind") else config.kind
display = vis.draw_2d_pose_cv2(keypoints_2d_pred, img, kind=pred_kind)
cv2.putText(display, f"Cam {camera_idx:02}", (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0))
displays.append(display)
display3 = vis.draw_3d_pose_image(keypoints_3d_pred,kind=pred_kind,radius=450)
display3 = cv2.cvtColor(display3,cv2.COLOR_RGBA2RGB)
display3 = cv2.resize(display3, size, interpolation=cv2.INTER_AREA)
cv2.putText(display3, f"3D prediction", (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0))
displays.append(display3)
# Fancy stacked images
for j, display in enumerate(displays):
if j == 0:
combined = display
else:
combined = np.concatenate((combined, display), axis=1)
return combined
def viewResult(sample,idx,prediction,config,save_images_instead=1,size=(384,384)):
displays = []
camera_idx = 0
camera = sample['cameras'][camera_idx]
subject = sample['subject'][camera_idx]
action = sample['action'][camera_idx]
keypoints3d_pred = prediction['keypoints_3d_pred'].cpu()
keypoints_3d_pred = keypoints3d_pred[0,:, :3].detach().numpy()
keypoints_3d_gt = sample['keypoints_3d'][:, :3]
# Project and draw keypoints on images
for camera_idx in range(len(sample['cameras'])): #camera_indexes_to_show:
camera = sample['cameras'][camera_idx]
keypoints_2d_pred = project(camera.projection, keypoints_3d_pred)
keypoints_2d_gt = project(camera.projection, keypoints_3d_gt)
# import ipdb; ipdb.set_trace()
img = sample['images'][camera_idx]
pred_kind = config.pred_kind if hasattr(config, "pred_kind") else config.kind
display = vis.draw_2d_pose_cv2(keypoints_2d_pred, img, kind=pred_kind)
#display = vis.draw_2d_pose_cv2(keypoints_2d_gt, img, kind=config.kind)
cv2.putText(display, f"Cam {camera_idx:02}", (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0))
displays.append(display)
display3 = vis.draw_3d_pose_image(keypoints_3d_pred,kind=pred_kind,radius=450)
display3 = cv2.cvtColor(display3,cv2.COLOR_RGBA2RGB)
display3 = cv2.resize(display3, size, interpolation=cv2.INTER_AREA)
cv2.putText(display3, f"3D prediction", (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0))
displays.append(display3)
display3_gt = vis.draw_3d_pose_image(sample['keypoints_3d'][:, :3],kind=pred_kind,radius=450)
display3_gt = cv2.cvtColor(display3_gt,cv2.COLOR_RGBA2RGB)
display3_gt = cv2.resize(display3_gt, size, interpolation=cv2.INTER_AREA)
cv2.putText(display3_gt, f"3D GT", (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0))
displays.append(display3_gt)
# Fancy stacked images
for j, display in enumerate(displays):
if j == 0:
combined = display
else:
combined = np.concatenate((combined, display), axis=1)
# Load
if save_images_instead:
file = f"./result/result-{subject}-{action}-{camera.name}-{idx}.png"
cv2.imwrite(file, combined)
else:
cv2.imshow('w', combined)
cv2.setWindowTitle('w', f"Index {idx}")
c = cv2.waitKey(0) % 256
if c == ord('q') or c == 27:
print('Quitting...')
cv2.destroyAllWindows()
def prepareSample(idx, labels, human36mRoot, keyPoint3d = None , imageShape = None, scaleBox = 1.0, crop = True, normImage = False):
sample = defaultdict(list) # return value
shot = labels['table'][idx]
subject = labels['subject_names'][shot['subject_idx']]
action = labels['action_names'][shot['action_idx']]
frame_idx = shot['frame_idx']
for camera_idx, camera_name in enumerate(labels['camera_names']):
bbox = shot['bbox_by_camera_tlbr'][camera_idx][[1,0,3,2]] # TLBR to LTRB
bbox_height = bbox[2] - bbox[0]
if bbox_height == 0:
# convention: if the bbox is empty, then this view is missing
continue
# scale the bounding box
bbox = scale_bbox(bbox, scaleBox)
# load image
image_path = os.path.join(human36mRoot, subject, action, 'imageSequence', camera_name, 'img_%06d.jpg' % (frame_idx+1))
assert os.path.isfile(image_path), '%s doesn\'t exist' % image_path
image = cv2.imread(image_path)
# load camera
shot_camera = labels['cameras'][shot['subject_idx'], camera_idx]
#print(shot_camera)
retval_camera = Camera(shot_camera['R'], shot_camera['t'], shot_camera['K'], shot_camera['dist'], camera_name)
if crop:
# crop image
image = crop_image(image, bbox)
retval_camera.update_after_crop(bbox)
if imageShape is not None:
# resize
image_shape_before_resize = image.shape[:2]
image = resize_image(image, imageShape)
retval_camera.update_after_resize(image_shape_before_resize, imageShape)
sample['image_shapes_before_resize'].append(image_shape_before_resize)
if normImage:
image = normalize_image(image)
sample['images'].append(image)
sample['detections'].append(bbox + (1.0,)) # TODO add real confidences
sample['cameras'].append(retval_camera)
sample['proj_matrices'].append(retval_camera.projection)
sample["action"].append(action)
sample["subject"].append(subject)
sample["frameId"].append(frame_idx)
# 3D keypoints
# add dummy confidences
sample['keypoints_3d'] = np.pad(
shot['keypoints'][:17],
((0,0), (0,1)), 'constant', constant_values=1.0)
# build cuboid
# base_point = sample['keypoints_3d'][6, :3]
# sides = np.array([self.cuboid_side, self.cuboid_side, self.cuboid_side])
# position = base_point - sides / 2
# sample['cuboids'] = volumetric.Cuboid3D(position, sides)
# save sample's index
sample['indexes'] = idx
if keyPoint3d is not None:
sample['pred_keypoints_3d'] = keyPoint3d[idx]
sample.default_factory = None
return sample
def prepareVideoSample(info, images, cameras, bboxes, subject = 'S1', imageShape = [384, 384], scaleBox = 1.0, crop = True, normImage = False):
sample = defaultdict(list) # return value
subject_idx = info['subject_names'].index(subject)
for camera_idx, camera_name in enumerate(info['camera_names']):
bbox = bboxes[camera_name][[1,0,3,2]] # TLBR to LTRB
bbox_height = bbox[2] - bbox[0]
if bbox_height == 0:
# convention: if the bbox is empty, then this view is missing
continue
# scale the bounding box
bbox = scale_bbox(bbox, scaleBox)
# load camera
shot_camera = cameras[camera_name]
image = images[camera_name]
#print(shot_camera)
retval_camera = Camera(shot_camera['R'], shot_camera['t'], shot_camera['K'], shot_camera['dist'], camera_name)
if crop:
# crop image
image = crop_image(image, bbox)
retval_camera.update_after_crop(bbox)
if imageShape is not None:
# resize
image_shape_before_resize = image.shape[:2]
image = resize_image(image, imageShape)
retval_camera.update_after_resize(image_shape_before_resize, imageShape)
sample['images'].append(image)
sample['cameras'].append(retval_camera)
sample['proj_matrices'].append(retval_camera.projection)
# projection matricies
#print(sample['proj_matrices'])
sample.default_factory = None
return sample
def loadHuman36mLabel(path,train = True, withDamageAction=True, retain_every_n_frames_in_test=1):
"""
this load the label, including bouding box, camera matrices
"""
test = not train
labels = np.load(path, allow_pickle=True).item()
train_subjects = ['S1', 'S5', 'S6', 'S7', 'S8']
test_subjects = ['S9', 'S11']
train_subjects = list(labels['subject_names'].index(x) for x in train_subjects)
test_subjects = list(labels['subject_names'].index(x) for x in test_subjects)
indices = []
if train:
mask = np.isin(labels['table']['subject_idx'], train_subjects, assume_unique=True)
indices.append(np.nonzero(mask)[0])
if test:
mask = np.isin(labels['table']['subject_idx'], test_subjects, assume_unique=True)
if not withDamageAction:
mask_S9 = labels['table']['subject_idx'] == labels['subject_names'].index('S9')
damaged_actions = 'Greeting-2', 'SittingDown-2', 'Waiting-1'
damaged_actions = [labels['action_names'].index(x) for x in damaged_actions]
mask_damaged_actions = np.isin(labels['table']['action_idx'], damaged_actions)
mask &= ~(mask_S9 & mask_damaged_actions)
indices.append(np.nonzero(mask)[0][::retain_every_n_frames_in_test])
labels['table'] = labels['table'][np.concatenate(indices)]
return labels
def loadPrePelvis(path):
pred_results = np.load(path, allow_pickle=True)
keypoints_3d_pred = pred_results['keypoints_3d'][np.argsort(pred_results['indexes'])]
return keypoints_3d_pred
def infer(model_type="alg",max_num=5, save_images_instead=1, crop=True):
if model_type == "alg":
config = cfg.load_config("./experiments/human36m/train/human36m_alg.yaml")
elif model_type == "vol":
config = cfg.load_config("./experiments/human36m/train/human36m_vol_softmax.yaml")
pelvis3d = loadPrePelvis(config.dataset.train.pred_results_path)
device = torch.device(0)
labels = loadHuman36mLabel(config.dataset.train.labels_path)
detector = Detector(config, device=device)
for idx in range(max_num):
sample = [prepareSample(100+idx, labels, config.dataset.train.h36m_root, keyPoint3d=None, crop=crop, imageShape=config.image_shape)]
viewSample(sample[0],idx)
prediction, inputBatch = detector.inferHuman36Data(sample, model_type, device, config,
randomize_n_views=config.dataset.val.randomize_n_views,
min_n_views=config.dataset.val.min_n_views,
max_n_views=config.dataset.val.max_n_views)
viewResult(sample[0],idx,prediction,config,save_images_instead=save_images_instead)
def infer_videos(model_type="alg",subject="S1", action="Sitting-1", max_num=5, save_images_instead=True, crop=True):
if model_type == "alg":
config = cfg.load_config("./experiments/human36m/train/human36m_alg.yaml")
elif model_type == "vol":
config = cfg.load_config("./experiments/human36m/train/human36m_vol_softmax.yaml")
pelvis3d = loadPrePelvis(config.dataset.train.pred_results_path)
device = torch.device(0)
detector = Detector(config, device=device)
bboxes = fill_bbox_subject_action(bbox_file, subject, action)
cameras = fill_cameras_subject(h5_file,subject)
cap = {}
wri = None
human36mRoot = "/dataset/experiment-dataset/extracted/"
video_path = os.path.join(human36mRoot, subject, 'Videos')
for (camera_idx, camera) in enumerate(retval['camera_names']):
video_name = video_path+'/'+action.replace("-"," ")+'.'+camera+'.mp4'
assert os.path.isfile(video_name), '%s doesn\'t exist' % video_name
cap[camera] = cv2.VideoCapture(video_name)
size = (int(cap[camera].get(cv2.CAP_PROP_FRAME_WIDTH)),int(cap[camera].get(cv2.CAP_PROP_FRAME_HEIGHT)))
if save_images_instead:
wri = cv2.VideoWriter(
f'./result/result-{subject}-{action}.mp4',cv2.VideoWriter_fourcc('m','p','4','v'),
30,(1920,384))
idx = 0
#while True:
while True:
frames = {}
for (camera_idx, camera) in enumerate(retval['camera_names']):
success,frames[camera] = cap[camera].read()
if success != True:
break
bbox = get_bbox_subject_action(bboxes,idx)
sample = prepareVideoSample(info=retval, images=frames, cameras=cameras, bboxes=bbox, subject = subject, imageShape = [384, 384], scaleBox = 1.0, crop = True, normImage = False)
prediction, inputBatch = detector.infer(sample, model_type, device, config)
combined = viewVideoResult(sample,idx, prediction,config)
#combined = viewVideo(sample)
idx = idx + 1
if save_images_instead:
if idx < max_num:
#file = f"./result/result-video-{subject}-{action}-{camera}-{idx}.png"
#cv2.imwrite(file, combined)
wri.write(combined)
else:
break
else:
cv2.imshow('w', combined)
cv2.setWindowTitle('w', f"Index {idx}")
c = cv2.waitKey(0) % 256
if c == ord('q') or c == 27:
print('Quitting...')
break;
cv2.destroyAllWindows()
for (camera_idx, camera) in enumerate(retval['camera_names']):
cap[camera].release()
if save_images_instead: wri.release()
if __name__ == "__main__":
#infer("alg",max_num=2, crop=True)
infer_videos("alg",max_num=1000, save_images_instead=False, crop=True)
| 42.018987 | 185 | 0.640496 | 4,653 | 0.175215 | 0 | 0 | 0 | 0 | 0 | 0 | 5,126 | 0.193026 |
57028ca06deb47d996805621dce315dc63a9dc8f | 4,846 | py | Python | survol/sources_types/Linux/tcp_sockets.py | AugustinMascarelli/survol | 7a822900e82d1e6f016dba014af5741558b78f15 | [
"BSD-3-Clause"
]
| null | null | null | survol/sources_types/Linux/tcp_sockets.py | AugustinMascarelli/survol | 7a822900e82d1e6f016dba014af5741558b78f15 | [
"BSD-3-Clause"
]
| null | null | null | survol/sources_types/Linux/tcp_sockets.py | AugustinMascarelli/survol | 7a822900e82d1e6f016dba014af5741558b78f15 | [
"BSD-3-Clause"
]
| null | null | null | #!/usr/bin/env python
"""
TCP Linux sockets with netstat
"""
import re
import sys
import socket
import lib_util
import lib_common
from lib_properties import pc
from sources_types import addr as survol_addr
# Many advantages compared to psutil:
# The Python module psutil is not needed
# psutil gives only sockets if the process is accessible.
# It is much faster.
# On the other it is necessary to run netstat in the shell.
# $ netstat -aptn
# (Not all processes could be identified, non-owned process info
# will not be shown, you would have to be root to see it all.)
# Active Internet connections (servers and established)
# Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name
# tcp 0 0 192.168.0.17:8000 0.0.0.0:* LISTEN 25865/python
# tcp 0 0 127.0.0.1:427 0.0.0.0:* LISTEN -
# tcp 0 0 0.0.0.0:5900 0.0.0.0:* LISTEN 4119/vino-server
# tcp 0 0 192.168.122.1:53 0.0.0.0:* LISTEN -
# tcp 0 0 192.168.0.17:44634 192.168.0.14:60685 ESTABLISHED 4118/rygel
# tcp 0 0 192.168.0.17:22 192.168.0.14:60371 ESTABLISHED -
# tcp 0 0 192.168.0.17:44634 192.168.0.14:58478 ESTABLISHED 4118/rygel
# tcp 0 0 192.168.0.17:44634 192.168.0.15:38960 TIME_WAIT -
# tcp 0 0 192.168.0.17:44634 192.168.0.14:58658 ESTABLISHED 4118/rygel
# tcp 0 0 192.168.0.17:44634 192.168.0.14:59694 ESTABLISHED 4118/rygel
# tcp 0 0 fedora22:44634 192.168.0.14:58690 ESTABLISHED 4118/rygel
# tcp 0 0 fedora22:ssh 192.168.0.14:63599 ESTABLISHED -
# tcp 0 0 fedora22:42042 176.103.:universe_suite ESTABLISHED 23512/amule
# tcp6 0 0 [::]:wbem-http [::]:* LISTEN -
# tcp6 0 0 [::]:wbem-https [::]:* LISTEN -
# tcp6 0 0 [::]:mysql [::]:* LISTEN -
# tcp6 0 0 [::]:rfb [::]:* LISTEN 4119/vino-server
# tcp6 0 0 [::]:50000 [::]:* LISTEN 23512/amule
# tcp6 0 0 [::]:43056 [::]:* LISTEN 4125/httpd
# tcp6 0 0 [::]:http [::]:* LISTEN -
# tcp6 0 0 [::]:ssh [::]:* LISTEN -
# tcp6 0 0 localhost:ipp [::]:* LISTEN -
# tcp6 0 0 [::]:telnet [::]:* LISTEN -
#
def Main():
cgiEnv = lib_common.CgiEnv()
args = ["netstat", '-aptn', ]
p = lib_common.SubProcPOpen(args)
grph = cgiEnv.GetGraph()
(netstat_last_output, netstat_err) = p.communicate()
# Converts to string for Python3.
netstat_str = netstat_last_output.decode("utf-8")
netstat_lines = netstat_str.split('\n')
seenHeader = False
for lin in netstat_lines:
# By default, consecutive spaces are treated as one.
linSplit = lin.split()
if len(linSplit) == 0:
continue
if not seenHeader:
if linSplit[0] == "Proto":
seenHeader = True
continue
# TODO: "tcp6"
if linSplit[0] != "tcp":
continue
# sys.stderr.write("tcp_sockets.py lin=%s\n"%lin)
sockStatus = linSplit[5]
if sockStatus not in ["ESTABLISHED","TIME_WAIT"]:
continue
addrLocal = linSplit[3]
ipLocal, portLocal = survol_addr.SplitAddrPort(addrLocal)
# It does not use survol_addr.PsutilAddSocketToGraphOne(node_process,cnt,grph)
# because sometimes we do not have the process id.
localSocketNode = lib_common.gUriGen.AddrUri( ipLocal, portLocal )
grph.add( ( localSocketNode, pc.property_information, lib_common.NodeLiteral(sockStatus) ) )
addrRemot = linSplit[4]
# This is different for IPV6
if addrRemot != "0.0.0.0:*":
ipRemot, portRemot = survol_addr.SplitAddrPort(addrRemot)
remotSocketNode = lib_common.gUriGen.AddrUri( ipRemot, portRemot )
grph.add( ( localSocketNode, pc.property_socket_end, remotSocketNode ) )
pidCommand = linSplit[6]
if pidCommand != "-":
procPid, procNam = pidCommand.split("/")
procNode = lib_common.gUriGen.PidUri(procPid)
grph.add( ( procNode, pc.property_host, lib_common.nodeMachine ) )
grph.add( ( procNode, pc.property_pid, lib_common.NodeLiteral(procPid) ) )
grph.add( ( procNode, pc.property_has_socket, localSocketNode ) )
else:
# If the local process is not known, just link the local socket to the local machine.
grph.add( ( lib_common.nodeMachine, pc.property_host, localSocketNode ) )
cgiEnv.OutCgiRdf()
if __name__ == '__main__':
Main()
| 38.460317 | 98 | 0.577383 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,079 | 0.635369 |
57033e68edf1bc714421c03684cc8349a3a89d3f | 5,832 | py | Python | models.py | JiaMingLin/residual_adapters | a3d32b4fb6c3c252f5adc1ad178b026a111c1a08 | [
"Apache-2.0"
]
| 137 | 2018-03-22T15:45:30.000Z | 2022-03-17T09:39:07.000Z | models.py | JiaMingLin/residual_adapters | a3d32b4fb6c3c252f5adc1ad178b026a111c1a08 | [
"Apache-2.0"
]
| 5 | 2018-09-25T19:44:34.000Z | 2020-12-19T11:26:41.000Z | models.py | JiaMingLin/residual_adapters | a3d32b4fb6c3c252f5adc1ad178b026a111c1a08 | [
"Apache-2.0"
]
| 40 | 2018-04-04T12:36:54.000Z | 2022-02-19T05:46:36.000Z | # models.py
# created by Sylvestre-Alvise Rebuffi [[email protected]]
# Copyright © The University of Oxford, 2017-2020
# This code is made available under the Apache v2.0 licence, see LICENSE.txt for details
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.parameter import Parameter
import config_task
import math
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
def conv1x1_fonc(in_planes, out_planes=None, stride=1, bias=False):
if out_planes is None:
return nn.Conv2d(in_planes, in_planes, kernel_size=1, stride=stride, padding=0, bias=bias)
else:
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=0, bias=bias)
class conv1x1(nn.Module):
def __init__(self, planes, out_planes=None, stride=1):
super(conv1x1, self).__init__()
if config_task.mode == 'series_adapters':
self.conv = nn.Sequential(nn.BatchNorm2d(planes), conv1x1_fonc(planes))
elif config_task.mode == 'parallel_adapters':
self.conv = conv1x1_fonc(planes, out_planes, stride)
else:
self.conv = conv1x1_fonc(planes)
def forward(self, x):
y = self.conv(x)
if config_task.mode == 'series_adapters':
y += x
return y
class conv_task(nn.Module):
def __init__(self, in_planes, planes, stride=1, nb_tasks=1, is_proj=1, second=0):
super(conv_task, self).__init__()
self.is_proj = is_proj
self.second = second
self.conv = conv3x3(in_planes, planes, stride)
if config_task.mode == 'series_adapters' and is_proj:
self.bns = nn.ModuleList([nn.Sequential(conv1x1(planes), nn.BatchNorm2d(planes)) for i in range(nb_tasks)])
elif config_task.mode == 'parallel_adapters' and is_proj:
self.parallel_conv = nn.ModuleList([conv1x1(in_planes, planes, stride) for i in range(nb_tasks)])
self.bns = nn.ModuleList([nn.BatchNorm2d(planes) for i in range(nb_tasks)])
else:
self.bns = nn.ModuleList([nn.BatchNorm2d(planes) for i in range(nb_tasks)])
def forward(self, x):
task = config_task.task
y = self.conv(x)
if self.second == 0:
if config_task.isdropout1:
x = F.dropout2d(x, p=0.5, training = self.training)
else:
if config_task.isdropout2:
x = F.dropout2d(x, p=0.5, training = self.training)
if config_task.mode == 'parallel_adapters' and self.is_proj:
y = y + self.parallel_conv[task](x)
y = self.bns[task](y)
return y
# No projection: identity shortcut
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, shortcut=0, nb_tasks=1):
super(BasicBlock, self).__init__()
self.conv1 = conv_task(in_planes, planes, stride, nb_tasks, is_proj=int(config_task.proj[0]))
self.conv2 = nn.Sequential(nn.ReLU(True), conv_task(planes, planes, 1, nb_tasks, is_proj=int(config_task.proj[1]), second=1))
self.shortcut = shortcut
if self.shortcut == 1:
self.avgpool = nn.AvgPool2d(2)
def forward(self, x):
residual = x
y = self.conv1(x)
y = self.conv2(y)
if self.shortcut == 1:
residual = self.avgpool(x)
residual = torch.cat((residual, residual*0),1)
y += residual
y = F.relu(y)
return y
class ResNet(nn.Module):
def __init__(self, block, nblocks, num_classes=[10]):
super(ResNet, self).__init__()
nb_tasks = len(num_classes)
blocks = [block, block, block]
factor = config_task.factor
self.in_planes = int(32*factor)
self.pre_layers_conv = conv_task(3,int(32*factor), 1, nb_tasks)
self.layer1 = self._make_layer(blocks[0], int(64*factor), nblocks[0], stride=2, nb_tasks=nb_tasks)
self.layer2 = self._make_layer(blocks[1], int(128*factor), nblocks[1], stride=2, nb_tasks=nb_tasks)
self.layer3 = self._make_layer(blocks[2], int(256*factor), nblocks[2], stride=2, nb_tasks=nb_tasks)
self.end_bns = nn.ModuleList([nn.Sequential(nn.BatchNorm2d(int(256*factor)),nn.ReLU(True)) for i in range(nb_tasks)])
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.linears = nn.ModuleList([nn.Linear(int(256*factor), num_classes[i]) for i in range(nb_tasks)])
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, nblocks, stride=1, nb_tasks=1):
shortcut = 0
if stride != 1 or self.in_planes != planes * block.expansion:
shortcut = 1
layers = []
layers.append(block(self.in_planes, planes, stride, shortcut, nb_tasks=nb_tasks))
self.in_planes = planes * block.expansion
for i in range(1, nblocks):
layers.append(block(self.in_planes, planes, nb_tasks=nb_tasks))
return nn.Sequential(*layers)
def forward(self, x):
x = self.pre_layers_conv(x)
task = config_task.task
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.end_bns[task](x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.linears[task](x)
return x
def resnet26(num_classes=10, blocks=BasicBlock):
return ResNet(blocks, [4,4,4],num_classes)
| 39.945205 | 133 | 0.627743 | 4,844 | 0.830447 | 0 | 0 | 0 | 0 | 0 | 0 | 355 | 0.060861 |
5703b571d61c82b1ad4a982fd9b8632e8dd15fd8 | 1,028 | py | Python | Parte 02/Projeto 01.py | andrewyamagata/Python | ac9baf16cd142156829ec6e977ecfcac8a4e3965 | [
"MIT"
]
| null | null | null | Parte 02/Projeto 01.py | andrewyamagata/Python | ac9baf16cd142156829ec6e977ecfcac8a4e3965 | [
"MIT"
]
| null | null | null | Parte 02/Projeto 01.py | andrewyamagata/Python | ac9baf16cd142156829ec6e977ecfcac8a4e3965 | [
"MIT"
]
| null | null | null | #Condições Aninhadas
#if -> elif -> elif -> else # pode usar quantos elif quiser
#Aprovando Empréstimo
casa = float(input('Qual o valor da casa: R$ '))
salario = float(input('Qual o valor do salário: R$ '))
tempo = int(input('Quanto anos para pagar? '))
salario30 = salario * 0.30
prestacao = casa / (tempo * 12)
if salario30 >= prestacao and tempo >= 15:
print('Empréstimo não excede 30 % do seu sálario')
print('-='*30)
print('EMPRÉSTIMO APROVADO COM RESTRIÇÕES')
print('-='*30)
elif salario30 >= prestacao and tempo < 15:
print('Empréstimo não excede 30 % e pagará em ',tempo)
print('-='*30)
print('EMPRÉSTIMO APROVADO SEM RESTRIÇÕES')
print('-='*30)
else:
print('Empréstimo excede 30% do seu salário')
print('-='*30)
print('EMPRÉSTIMO NEGADO')
print('-='*30)
print('Para pagar a casa de R$ {:.2f}.\nCom o salário que recebe de R$ {:.2f}.\nEm {} anos, você deve pagar mensalmente R$ {:.2f}'.format(casa,salario,tempo,prestacao))
#\n quebra linha
#end=' ' puxa a linha de baixo | 38.074074 | 168 | 0.657588 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 611 | 0.582459 |
57053e08159134b657dc6dde4b49efc028c6a0a2 | 2,196 | py | Python | main.py | GauravP2001/courseSniperBot | c3e05d2890f10177ee847a961b957d5e63e7d0ec | [
"BSD-2-Clause-FreeBSD"
]
| null | null | null | main.py | GauravP2001/courseSniperBot | c3e05d2890f10177ee847a961b957d5e63e7d0ec | [
"BSD-2-Clause-FreeBSD"
]
| null | null | null | main.py | GauravP2001/courseSniperBot | c3e05d2890f10177ee847a961b957d5e63e7d0ec | [
"BSD-2-Clause-FreeBSD"
]
| null | null | null | import discord
import os
import requests
import asyncio
import psycopg2
import logging
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from discord.ext import commands
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt=f"%m/%d/%Y %H:%M:%S %Z")
logger = logging.getLogger("Snipe Bot")
client = commands.Bot(command_prefix=".")
scheduler = AsyncIOScheduler()
DATABASE_URL = os.environ.get("DATABASE_URL")
conn = psycopg2.connect(DATABASE_URL, sslmode="require")
cur = conn.cursor()
# with conn:
# cur.execute("CREATE TABLE coursesToBeFound (index VARCHAR primary key);")
# cur.execute("INSERT INTO coursesToBeFound (index) VALUES (%s)", ("00150",))
# cur.execute("DELETE FROM coursesToBeFound where index = %s", ("00150",))
# cur.execute("SELECT * from coursesToBeFound;")
# for row in cur:
# print(row[0])
sectionsFound = []
@client.event
async def on_ready():
logger.info("Bot is ready")
@client.command()
async def addCourse(ctx, arg):
logger.info(arg)
await ctx.send("Successfully Added the Course to Snipe!")
with conn:
cur.execute("INSERT INTO coursesToBeFound (index) VALUES (%s)", (arg,))
async def check_courses():
logger.info("Searching")
url = "https://sis.rutgers.edu/soc/api/openSections.json?year=2022&term=1&campus=NB"
try:
dataJSON = requests.get(url).json()
except Exception as e:
logger.error(e)
return
cur.execute("SELECT * from coursesToBeFound;")
for row in cur:
logger.info(row)
for index in dataJSON:
if row[0] == index:
sectionsFound.append(index)
logger.info(f"Found index: {row[0]}")
await client.get_channel(int(os.environ.get("CHANNEL_ID"))).send(f"Found Index: {index}")
for index in sectionsFound:
cur.execute("DELETE FROM coursesToBeFound where index = %s", (index,))
conn.commit()
if __name__ == "__main__":
logger.info("Starting")
scheduler.add_job(check_courses, "interval", seconds=10)
scheduler.start()
client.run(os.environ.get("token"))
| 28.519481 | 105 | 0.658015 | 0 | 0 | 0 | 0 | 294 | 0.13388 | 1,027 | 0.467668 | 806 | 0.367031 |
57071627a3f7ead2f2e5161d076288e623b02921 | 160 | py | Python | src/utilities/grammar.py | sonishreyas/news_scraper | 7cd1bd9eb14fb903fc7b190b04191237da0a1d23 | [
"MIT"
]
| null | null | null | src/utilities/grammar.py | sonishreyas/news_scraper | 7cd1bd9eb14fb903fc7b190b04191237da0a1d23 | [
"MIT"
]
| null | null | null | src/utilities/grammar.py | sonishreyas/news_scraper | 7cd1bd9eb14fb903fc7b190b04191237da0a1d23 | [
"MIT"
]
| null | null | null | from gingerit.gingerit import GingerIt
def check_grammar(text):
parser = GingerIt()
correct_text = parser.parse(text)
return correct_text['result'] | 26.666667 | 38 | 0.74375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 0.05 |
57075fadbef4087df6eac236abcbc48b853a6d54 | 619 | py | Python | Python_Exercicios/Mundo2/Condições em Python (if..elif)/python_038.py | jbauermanncode/Curso_Em_Video_Python | 330c207d7bed4e663fe1b9ab433ab57a9828b7f1 | [
"MIT"
]
| null | null | null | Python_Exercicios/Mundo2/Condições em Python (if..elif)/python_038.py | jbauermanncode/Curso_Em_Video_Python | 330c207d7bed4e663fe1b9ab433ab57a9828b7f1 | [
"MIT"
]
| null | null | null | Python_Exercicios/Mundo2/Condições em Python (if..elif)/python_038.py | jbauermanncode/Curso_Em_Video_Python | 330c207d7bed4e663fe1b9ab433ab57a9828b7f1 | [
"MIT"
]
| null | null | null | '''
Escreva um programa que leia dois números inteiros e compare- os, mostrando na tela uma mensagem:
- O primeiro valor é maior
- O segundo valor é maior
- não existe valor maior, os dois são iguais
'''
# Ler dois números inteiros
n1 = int(input('Informe o primeiro número: '))
n2 = int(input('Informe o segundo número: '))
# Operadores Lógicos
n1_maior = n1 > n2
n2_maior = n2 > n1
# Estrutura Condicional if, elif, else.
if n1_maior:
print('O número {} é o maior!'.format(n1))
elif n2_maior:
print('O número {} é o maior!'.format(n2))
else:
print('Os números são iguais!')
| 22.925926 | 101 | 0.663974 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 456 | 0.719243 |
57088093d1d0b3cfd26c3d3201f0bca2db2decb3 | 324 | py | Python | ABS/ABC085C.py | fumiyanll23/AtCoder | 362ca9fcacb5415c1458bc8dee5326ba2cc70b65 | [
"MIT"
]
| null | null | null | ABS/ABC085C.py | fumiyanll23/AtCoder | 362ca9fcacb5415c1458bc8dee5326ba2cc70b65 | [
"MIT"
]
| null | null | null | ABS/ABC085C.py | fumiyanll23/AtCoder | 362ca9fcacb5415c1458bc8dee5326ba2cc70b65 | [
"MIT"
]
| null | null | null | def main():
# input
N, Y = map(int, input().split())
# compute
for i in range(N+1):
for j in range(N+1):
if 10000*i+5000*j+1000*(N-i-j)==Y and N-i-j>=0:
print(i, j, N-i-j)
exit()
# output
print(-1, -1, -1)
if __name__ == '__main__':
main()
| 18 | 59 | 0.435185 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.104938 |
5708b5548cafd7c9ca9620325b1633201ca209f8 | 1,127 | py | Python | mail_open_xchange/__openerp__.py | OdooCommunityWidgets/IDEAS-FOR-MODULES | 74c588f6b6058119b8953650b6cb325fe5506cfd | [
"MIT"
]
| 1 | 2015-05-27T19:56:29.000Z | 2015-05-27T19:56:29.000Z | mail_open_xchange/__openerp__.py | OdooCommunityWidgets/IDEAS-FOR-MODULES | 74c588f6b6058119b8953650b6cb325fe5506cfd | [
"MIT"
]
| null | null | null | mail_open_xchange/__openerp__.py | OdooCommunityWidgets/IDEAS-FOR-MODULES | 74c588f6b6058119b8953650b6cb325fe5506cfd | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
{
'name': 'Open-Xchange Odoo',
'version': '1.0',
'category': 'Social Network',
'sequence': 2,
'summary': 'Discussions, Mailing Lists, News',
'description': """
Open-Xchange Integration
=========================
This module is designed to be a standard open-xchange inbox inside Odoo to allow for the use of email inside the Odoo framework as an option alongside Odoo's own mail module.
I would like to slowly add features to this to further integrate Open-Xchange inside Odoo to allow for easier migration to Odoo for those that are not interested in using Odoo's default mail module to completely replace their emails.
Main Features
-------------
* Open-Xchange webmail interface inside Odoo.
* Multi-inbox handling by Open-Xchange.
* More features to be added later to further integrate Open-Xchange with Odoo.
""",
'author': 'Luke Branch',
'website': 'https://github.com/OdooCommunityWidgets/IDEAS-FOR-MODULES/mail_open_xchange',
'depends': ['base', 'base_setup', 'mail'],
'data': [
'',
],
'installable': False,
'application': True,
}
| 38.862069 | 233 | 0.674357 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 993 | 0.8811 |
5708df6ade016849aefe1a0044ec7ee2d375c82f | 10,853 | py | Python | testing/test_pulse_prop.py | ibegleris/w-fopo | e44b83b8ec54d01bb34b89805378a2b0659dfe6f | [
"BSD-3-Clause"
]
| null | null | null | testing/test_pulse_prop.py | ibegleris/w-fopo | e44b83b8ec54d01bb34b89805378a2b0659dfe6f | [
"BSD-3-Clause"
]
| null | null | null | testing/test_pulse_prop.py | ibegleris/w-fopo | e44b83b8ec54d01bb34b89805378a2b0659dfe6f | [
"BSD-3-Clause"
]
| null | null | null | import sys
sys.path.append('src')
from functions import *
import numpy as np
from numpy.testing import assert_allclose
"-----------------------Full soliton--------------------------------------------"
def get_Qs(nm, gama,fv, a_vec, dnerr, index, master_index,lamda, n2):
if nm == 1:
D = loadmat('loading_data/M1_M2_1m_new.mat')
M1_temp, M2 = D['M1'], D['M2']
M2[:, :] -= 1
M1 = np.empty([np.shape(M1_temp)[0]-2,
np.shape(M1_temp)[1]], dtype=np.int64)
M1[:4] = M1_temp[:4] - 1
Q_large = M1_temp[np.newaxis, 4:6, :]
M1[-1] = M1_temp[6, :] - 1
Q_large[:,:,:] = gama / (3*n2*(2*pi/lamda))
else:
M1, M2, dump, Q_large = \
fibre_parameter_loader(fv, a_vec, dnerr, index, master_index,
filename='step_index_2m', filepath='testing/testing_data/step_index/')
print(Q_large.shape)
Q_large[0,0,:] = gama / (3*n2*(2*pi/lamda)) * np.array([1,1,0,0,0,0,1,1])
Q_large[0,1,:] = gama / (3*n2*(2*pi/lamda)) * np.array([1,0,0,1,1,0,0,1])
return Q_large, M1, M2
def pulse_propagations(ram, ss, nm, N_sol=1, cython = True, u = None):
"SOLITON TEST. IF THIS FAILS GOD HELP YOU!"
n2 = 2.5e-20 # n2 for silica [m/W]
# 0.0011666666666666668 # loss [dB/m]
alphadB = np.array([0 for i in range(nm)])
gama = 1e-3 # w/m
"-----------------------------General options------------------------------"
maxerr = 1e-13 # maximum tolerable error per step
"----------------------------Simulation parameters-------------------------"
N = 10
z = np.array([0,70]) # total distance [m]
nplot = 10 # number of plots
nt = 2**N # number of grid points
#dzstep = z/nplot # distance per step
dz_less = 1
dz = 1 # starting guess value of the step
lam_p1 = 1550
lamda_c = 1550e-9
lamda = lam_p1*1e-9
beta2 = -1e-3
P0_p1 = 1
betas = np.array([0, 0, beta2])
T0 = (N_sol**2 * np.abs(beta2) / (gama * P0_p1))**0.5
TFWHM = (2*np.log(1+2**0.5)) * T0
int_fwm = sim_parameters(n2, nm, alphadB)
int_fwm.general_options(maxerr, raman_object, ss, ram)
int_fwm.propagation_parameters(N, z, nplot, dz_less, 1)
int_fwm.woble_propagate(0)
fv, where = fv_creator(lam_p1,lam_p1 + 25,0, 100, int_fwm)
#fv, where = fv_creator(lam_p1, , int_fwm, prot_casc=0)
sim_wind = sim_window(fv, lamda, lamda_c, int_fwm, fv_idler_int=1)
loss = Loss(int_fwm, sim_wind, amax=int_fwm.alphadB)
alpha_func = loss.atten_func_full(sim_wind.fv, int_fwm)
int_fwm.alphadB = alpha_func
int_fwm.alpha = int_fwm.alphadB
dnerr = [0]
index = 1
master_index = 0
a_vec = [2.2e-6]
Q_large,M1,M2 = get_Qs(nm, gama, fv, a_vec, dnerr, index, master_index, lamda, n2)
if nm ==1:
M1, M2, Q_large= np.array([1]), np.array([1]), Q_large[:,0,0]
betas = betas[np.newaxis, :]
# sys.exit()
Dop = dispersion_operator(betas, int_fwm, sim_wind)
print(Dop.shape)
integrator = Integrator(int_fwm)
integrand = Integrand(int_fwm.nm,ram, ss, cython = False, timing = False)
dAdzmm = integrand.dAdzmm
RK = integrator.RK45mm
dAdzmm = integrand.dAdzmm
pulse_pos_dict_or = ('after propagation', "pass WDM2",
"pass WDM1 on port2 (remove pump)",
'add more pump', 'out')
#M1, M2, Q = Q_matrixes(1, n2, lamda, gama=gama)
raman = raman_object(int_fwm.ram, int_fwm.how)
raman.raman_load(sim_wind.t, sim_wind.dt, M2, nm)
if raman.on == 'on':
hf = raman.hf
else:
hf = None
u = np.empty(
[ int_fwm.nm, len(sim_wind.t)], dtype='complex128')
U = np.empty([int_fwm.nm,
len(sim_wind.t)], dtype='complex128')
sim_wind.w_tiled = np.tile(sim_wind.w + sim_wind.woffset, (int_fwm.nm, 1))
u[:, :] = ((P0_p1)**0.5 / np.cosh(sim_wind.t/T0)) * \
np.exp(-1j*(sim_wind.woffset)*sim_wind.t)
U[:, :] = fftshift(sim_wind.dt*fft(u[:, :]))
gam_no_aeff = -1j*int_fwm.n2*2*pi/sim_wind.lamda
u, U = pulse_propagation(u, U, int_fwm, M1, M2.astype(np.int64), Q_large[0].astype(np.complex128),
sim_wind, hf, Dop[0], dAdzmm, gam_no_aeff,RK)
U_start = np.abs(U[ :, :])**2
u[:, :] = u[:, :] * \
np.exp(1j*z[-1]/2)*np.exp(-1j*(sim_wind.woffset)*sim_wind.t)
"""
fig1 = plt.figure()
plt.plot(sim_wind.fv,np.abs(U[1,:])**2)
plt.savefig('1.png')
fig2 = plt.figure()
plt.plot(sim_wind.fv,np.abs(U[1,:])**2)
plt.savefig('2.png')
fig3 = plt.figure()
plt.plot(sim_wind.t,np.abs(u[1,:])**2)
plt.xlim(-10*T0, 10*T0)
plt.savefig('3.png')
fig4 = plt.figure()
plt.plot(sim_wind.t,np.abs(u[1,:])**2)
plt.xlim(-10*T0, 10*T0)
plt.savefig('4.png')
fig5 = plt.figure()
plt.plot(fftshift(sim_wind.w),(np.abs(U[1,:])**2 - np.abs(U[1,:])**2 ))
plt.savefig('error.png')
fig6 = plt.figure()
plt.plot(sim_wind.t,np.abs(u[1,:])**2 - np.abs(u[1,:])**2)
plt.xlim(-10*T0, 10*T0)
plt.savefig('error2.png')
plt.show()
"""
return u, U, maxerr
class Test_cython_nm2(object):
def test_ramoff_s0_nm2(self):
u_c, U_c, maxerr = pulse_propagations('off', 0, nm=2, cython = True)
u_p, U_p, maxerr = pulse_propagations('off', 0, nm=2, cython = False)
a,b = np.sum(np.abs(u_c)**2), np.sum(np.abs(u_p)**2)
assert np.allclose(a,b)
def test_ramon_s0_nm2(self):
u_c, U_c, maxerr = pulse_propagations('on', 0, nm=2, cython = True)
u_p, U_p, maxerr = pulse_propagations('on', 0, nm=2, cython = False)
a,b = np.sum(np.abs(u_c)**2), np.sum(np.abs(u_p)**2)
assert np.allclose(a,b)
def test_ramoff_s1_nm2(self):
u_c, U_c, maxerr = pulse_propagations('off', 1, nm=2, cython = True)
u_p, U_p, maxerr = pulse_propagations('off', 1, nm=2, cython = False)
a,b = np.sum(np.abs(u_c)**2), np.sum(np.abs(u_p)**2)
assert np.allclose(a,b)
def test_ramon_s1_nm2(self):
u_c, U_c, maxerr = pulse_propagations('on', 1, nm=2, cython = True)
u_p, U_p, maxerr = pulse_propagations('on', 1, nm=2, cython = False)
a,b = np.sum(np.abs(u_c)**2), np.sum(np.abs(u_p)**2)
assert np.allclose(a,b)
class Test_cython_nm1(object):
def test_ramoff_s0_nm2(self):
u_c, U_c, maxerr = pulse_propagations('off', 0, nm=1, cython = True)
u_p, U_p, maxerr = pulse_propagations('off', 0, nm=1, cython = False)
a,b = np.sum(np.abs(u_c)**2), np.sum(np.abs(u_p)**2)
assert np.allclose(a,b)
def test_ramon_s0_nm2(self):
u_c, U_c, maxerr = pulse_propagations('on', 0, nm=1, cython = True)
u_p, U_p, maxerr = pulse_propagations('on', 0, nm=1, cython = False)
a,b = np.sum(np.abs(u_c)**2), np.sum(np.abs(u_p)**2)
assert np.allclose(a,b)
def test_ramoff_s1_nm2(self):
u_c, U_c, maxerr = pulse_propagations('off', 1, nm=1, cython = True)
u_p, U_p, maxerr = pulse_propagations('off', 1, nm=1, cython = False)
a,b = np.sum(np.abs(u_c)**2), np.sum(np.abs(u_p)**2)
assert np.allclose(a,b)
def test_ramon_s1_nm2(self):
u_c, U_c, maxerr = pulse_propagations('on', 1, nm=1, cython = True)
u_p, U_p, maxerr = pulse_propagations('on', 1, nm=1, cython = False)
a,b = np.sum(np.abs(u_c)**2), np.sum(np.abs(u_p)**2)
assert np.allclose(a,b)
class Test_pulse_prop(object):
def test_solit_r0_ss0(self):
u, U, maxerr = pulse_propagations('off', 0, nm=1)
assert_allclose(np.abs(u[:, :])**2,
np.abs(u[:, :])**2, atol=9e-4)
def test_solit_r0_ss0_2(self):
u, U, maxerr = pulse_propagations('off', 0, nm=2)
#print(np.linalg.norm(np.abs(u[:, 0])**2 - np.abs(u[:, -1])**2, 2))
assert_allclose(np.abs(u[:, :])**2,
np.abs(u[:, :])**2, atol=9e-3)
def test_energy_r0_ss0(self):
u, U, maxerr = pulse_propagations(
'off', 0, nm=1, N_sol=np.abs(10*np.random.randn()))
E = []
for i in range(np.shape(u)[1]):
E.append(np.linalg.norm(u[:, i], 2)**2)
assert np.all(x == E[0] for x in E)
def test_energy_r0_ss1(self):
u, U, maxerr = pulse_propagations(
'off', 1, nm=1, N_sol=np.abs(10*np.random.randn()))
E = []
for i in range(np.shape(u)[1]):
E.append(np.linalg.norm(u[:, i], 2)**2)
assert np.all(x == E[0] for x in E)
def test_energy_r1_ss0(self):
u, U, maxerr = pulse_propagations(
'on', 0, nm=1, N_sol=np.abs(10*np.random.randn()))
E = []
for i in range(np.shape(u)[1]):
E.append(np.linalg.norm(u[:, i], 2)**2)
assert np.all(x == E[0] for x in E)
def test_energy_r1_ss1(self):
u, U, maxerr = pulse_propagations(
'on', 1, nm=1, N_sol=np.abs(10*np.random.randn()))
E = []
for i in range(np.shape(u)[1]):
E.append(np.linalg.norm(u[:, i], 2)**2)
assert np.all(x == E[0] for x in E)
def test_energy_r0_ss0_2(self):
u, U, maxerr = pulse_propagations(
'off', 0, nm=2, N_sol=np.abs(10*np.random.randn()))
E = []
for i in range(np.shape(u)[1]):
E.append(np.linalg.norm(u[:, i], 2)**2)
assert np.all(x == E[0] for x in E)
def test_energy_r0_ss1_2(self):
u, U, maxerr = pulse_propagations(
'off', 1, nm=2, N_sol=np.abs(10*np.random.randn()))
E = []
for i in range(np.shape(u)[1]):
E.append(np.linalg.norm(u[:, i], 2)**2)
assert np.all(x == E[0] for x in E)
def test_energy_r1_ss0_2(self):
u, U, maxerr = pulse_propagations(
'on', 0, nm=2, N_sol=np.abs(10*np.random.randn()))
E = []
for i in range(np.shape(u)[1]):
E.append(np.linalg.norm(u[:, i], 2)**2)
assert np.all(x == E[0] for x in E)
def test_energy_r1_ss1_2(self):
u, U, maxerr = pulse_propagations(
'on', 1, nm=2, N_sol=np.abs(10*np.random.randn()))
E = []
for i in range(np.shape(u)[1]):
E.append(np.linalg.norm(u[:, i], 2)**2)
assert np.all(x == E[0] for x in E)
def test_bire_pass():
Da = np.random.uniform(0, 2*pi, 100)
b = birfeg_variation(Da,2)
u = np.random.randn(2, 2**14) + 1j * np.random.randn(2, 2**14)
u *= 10
for i in range(100):
ut = b.bire_pass(u,i)
assert_allclose(np.abs(u)**2, np.abs(ut)**2)
u = 1 * ut | 36.056478 | 105 | 0.540404 | 5,177 | 0.477011 | 0 | 0 | 0 | 0 | 0 | 0 | 1,790 | 0.164931 |
570a3a32cbbdc85ab026871552208d720276a1d7 | 1,089 | py | Python | download.py | wujushan/AndroidHeatMap | 1d6ecff8d810ffd63ba84f56c1a44ee5e7770c59 | [
"Apache-2.0"
]
| 1 | 2019-06-13T16:05:36.000Z | 2019-06-13T16:05:36.000Z | download.py | wujushan/AndroidHeatMap | 1d6ecff8d810ffd63ba84f56c1a44ee5e7770c59 | [
"Apache-2.0"
]
| null | null | null | download.py | wujushan/AndroidHeatMap | 1d6ecff8d810ffd63ba84f56c1a44ee5e7770c59 | [
"Apache-2.0"
]
| null | null | null | import os
import requests
def download(url):
download_path = '/root/AndroidHeatMap/download/'
if not os.path.exists(download_path):
os.mkdir(download_path)
all_content = requests.get(url).text
file_line = all_content.split("\n")
if file_line[0] != "#EXTM3U":
raise BaseException(u"not M3U8link")
else:
unknow = True
for index, line in enumerate(file_line):
if "EXTINF" in line:
unknow = False
pd_url = url.rsplit("/", 1)[0] + "/" + file_line[index + 1]
res = requests.get(pd_url)
c_fule_name = str(file_line[index + 1])
with open(download_path + "/" + c_fule_name, 'ab') as f:
f.write(res.content)
f.flush()
if unknow:
raise BaseException("cannot find link")
else:
print("finish downloading")
if __name__ == '__main__':
url = 'https://jjdong5.com/get_file/4/1fa69b06c6276768e95cc0c04d85feec693488a588/13000/13287/13287_360p.m3u8'
download(url)
| 34.03125 | 113 | 0.575758 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 232 | 0.213039 |
570a7fbde091be0d15c77144e4caa11f184860d3 | 4,945 | py | Python | tests/watermarks_test.py | yujialuo/erdos | 7a631b55895f1a473b0f4d38a0d6053851e65b5d | [
"Apache-2.0"
]
| null | null | null | tests/watermarks_test.py | yujialuo/erdos | 7a631b55895f1a473b0f4d38a0d6053851e65b5d | [
"Apache-2.0"
]
| null | null | null | tests/watermarks_test.py | yujialuo/erdos | 7a631b55895f1a473b0f4d38a0d6053851e65b5d | [
"Apache-2.0"
]
| null | null | null | from collections import defaultdict
from time import sleep
from absl import app
from absl import flags
import erdos.graph
from erdos.op import Op
from erdos.utils import frequency
from erdos.message import Message
from erdos.data_stream import DataStream
from erdos.timestamp import Timestamp
from erdos.message import WatermarkMessage
INTEGER_FREQUENCY = 10 # The frequency at which to send the integers.
class FirstOperator(Op):
""" Source operator that publishes increasing integers at a fixed frequency.
The operator also inserts a watermark after a fixed number of messages.
"""
def __init__(self, name, batch_size):
""" Initializes the attributes to be used by the source operator."""
super(FirstOperator, self).__init__(name)
self.batch_size = batch_size
self.counter = 1
self.batch_number = 1
@staticmethod
def setup_streams(input_streams):
""" Outputs a single stream where the messages are sent. """
return [DataStream(data_type = int, name = "integer_out")]
@frequency(INTEGER_FREQUENCY)
def publish_numbers(self):
""" Sends an increasing count of numbers
to the downstream operators. """
output_msg = Message(self.counter,
Timestamp(coordinates = [self.batch_number]))
self.get_output_stream("integer_out").send(output_msg)
# Decide if the watermark needs to be sent.
if self.counter % self.batch_size == 0:
# The batch has completed. We need to send a watermark now.
watermark_msg = WatermarkMessage(Timestamp(coordinates =
[self.batch_number]))
self.batch_number += 1
self.get_output_stream("integer_out").send(watermark_msg)
# Update the counters.
self.counter += 1
def execute(self):
""" Execute the publish number loop. """
self.publish_numbers()
self.spin()
class SecondOperator(Op):
""" Second operator that listens in on the numbers and reports their
sum when the watermark is received. """
def __init__(self, name):
""" Initializes the attributes to be used."""
super(SecondOperator, self).__init__(name)
self.windows = defaultdict(list)
@staticmethod
def setup_streams(input_streams):
""" Subscribes all the input streams to the save numbers callback. """
input_streams.add_callback(SecondOperator.save_numbers)
input_streams.add_completion_callback(SecondOperator.execute_sum)
return [DataStream(data_type = int, name = "sum_out")]
def save_numbers(self, message):
""" Save all the numbers corresponding to a window. """
batch_number = message.timestamp.coordinates[0]
self.windows[batch_number].append(message.data)
def execute_sum(self, message):
""" Sum all the numbers in this window and send out the aggregate. """
batch_number = message.timestamp.coordinates[0]
window_data = self.windows.pop(batch_number, None)
#print("Received a watermark for the timestamp: {}".format(batch_number))
#print("The sum of the window {} is {}".format(
# window_data, sum(window_data)))
output_msg = Message(sum(window_data),
Timestamp(coordinates = [batch_number]))
self.get_output_stream("sum_out").send(output_msg)
def execute(self):
""" Execute the spin() loop to continue processing messages. """
self.spin()
class ThirdOperator(Op):
""" Third operator that listens in on the sum and verifies correctness."""
def __init__(self, name):
"""Initializes the attributes to be used."""
super(ThirdOperator, self).__init__(name)
@staticmethod
def setup_streams(input_streams):
""" Subscribes all the input streams to the assert callback."""
input_streams.add_callback(ThirdOperator.assert_correctness)
return []
def assert_correctness(self, message):
""" Assert the correctness of the results."""
batch_number = message.timestamp.coordinates[0]
sum_data = sum(range((batch_number - 1) * 10 + 1, batch_number * 10 + 1))
print("Received sum: {} for the batch_number {}, expected {}".format(
message.data, batch_number, sum_data))
def main(argv):
# Set up the graph.
graph = erdos.graph.get_current_graph()
# Add the operators.
source_op = graph.add(FirstOperator, name = "gen_op", init_args = {'batch_size' : 10})
sum_op = graph.add(SecondOperator, name = "sum_op")
assert_op = graph.add(ThirdOperator, name = "assert_op")
# Connect the operators.
graph.connect([source_op], [sum_op])
graph.connect([sum_op], [assert_op])
# Execute the graph.
graph.execute('ray')
if __name__ == "__main__":
app.run(main)
| 37.462121 | 90 | 0.658038 | 3,999 | 0.808696 | 0 | 0 | 1,545 | 0.312437 | 0 | 0 | 1,640 | 0.331648 |
570a9547e24dbd1a28701e76c97396c34016c792 | 1,436 | py | Python | test/test_shop/views.py | blakelockley/django-base-shop | 455a2f4465e90cde57719ac29dc090b14f0bd324 | [
"MIT"
]
| 1 | 2020-01-12T04:05:42.000Z | 2020-01-12T04:05:42.000Z | test/test_shop/views.py | blakelockley/django-base-shop | 455a2f4465e90cde57719ac29dc090b14f0bd324 | [
"MIT"
]
| 14 | 2020-03-24T18:11:07.000Z | 2022-03-12T00:15:20.000Z | test/test_shop/views.py | blakelockley/django-base-shop | 455a2f4465e90cde57719ac29dc090b14f0bd324 | [
"MIT"
]
| null | null | null | from django.http import HttpResponse
from django_base_shop.models import ShippingTag
from .models import ConcreteCart, ConcreteProduct
def index(request):
return HttpResponse(b"Hello world")
def check_cart(request):
cart = request.cart
if not cart.is_persisted:
return HttpResponse(b"None")
return HttpResponse(cart.cart_token.encode("utf-8"))
def check_cart_items(request):
cart = request.cart
if not cart.is_persisted:
return HttpResponse(b"None")
body = f"{cart.cart_token}<br /><br />"
for item in cart.items.all():
body += f"{item.product.name} {item.quantity}<br />"
return HttpResponse(body.encode("utf-8"))
def add_cart_item(request, pk):
cart = request.cart
if ConcreteProduct.objects.count() == 0:
ConcreteProduct.objects.create(
handle="ANV-001",
name="Anvil",
price=100.0,
shipping_tag=ShippingTag.objects.create(
name="Medium", category="Size", order=1
),
)
product = ConcreteProduct.objects.get(pk=pk)
cart.add_item(product)
return HttpResponse(b"Item added! <a href='/check_cart_items'>Check items</a>")
def remove_cart_item(request, pk):
cart = request.cart
product = ConcreteProduct.objects.get(pk=pk)
cart.remove_item(product)
return HttpResponse(b"Item removed! <a href='/check_cart_items'>Check items</a>")
| 24.338983 | 85 | 0.660167 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 266 | 0.185237 |
570cfc314b92388cc92855fea7600f5e8b1e443e | 11,600 | py | Python | q3/q3/drivers/ui/pyqt5.py | virtimus/makaronLab | 10b9be7d7d65d3da6219f929ea7070dd5fed3a81 | [
"0BSD"
]
| 2 | 2021-03-16T05:48:36.000Z | 2021-10-11T01:55:48.000Z | q3/q3/drivers/ui/pyqt5.py | virtimus/makaronLab | 10b9be7d7d65d3da6219f929ea7070dd5fed3a81 | [
"0BSD"
]
| null | null | null | q3/q3/drivers/ui/pyqt5.py | virtimus/makaronLab | 10b9be7d7d65d3da6219f929ea7070dd5fed3a81 | [
"0BSD"
]
| 1 | 2021-03-16T05:48:39.000Z | 2021-03-16T05:48:39.000Z |
# PYQT
import sys
#from ...TabPanel import TabPanel
import sip
from q3.ui.engine import qtw,qtc,qtg
from ... import consts, prop, direction
from ...ui import orientation, colors
from ...moduletype import ModuleType
from ...nodeiotype import NodeIoType
from ...q3vector import Q3Vector
from ...EventSignal import EventProps
from ..driverBase import Q3DriverBase
from enum import Enum
from ...valuetype import ValueType
from .IoLinkView import IoLinkView
from .IoNodeView import IoNodeView
from .ModuleViewImpl import ModuleViewImpl
from .GraphViewImpl import GraphViewImpl
#class IoNode:
# pass
class Q3Scene(qtw.QGraphicsScene):
def __init__(self,*args, **kwargs):
super(Q3Scene,self).__init__(*args, **kwargs)
def contextMenuEvent(self, event):
# Check it item exists on event position
item = self.itemAt(event.scenePos(),qtg.QTransform()) #.toPoint(),qtg.QTransform.TxNone)
if item:
# Try run items context if it has one
try:
item.contextMenuEvent(event)
return
except:
pass
menu = qtw.QMenu()
action = menu.addAction('ACTION')
class DetailWindowBaseImpl(qtw.QWidget):
def __init__(self,parent):
self._parent=parent
super(qtw.QWidget, self).__init__()
def resizeEvent(self, event):
self._parent.parent().events().detailWindowResized.emit(EventProps({'event':event}))
#print(f'WinResizeEV{dir(event)}')
def closeEvent(self, event):
evs = self._parent.parent().events()
if evs.callDetailWindowCloseReq.hasHandlers():
evs.callDetailWindowCloseReq.sync()
event.accept()
# .checkSyncHandler()
#windowDidResize
class Q3Driver(Q3DriverBase):
def doModuleView_Init(self):
if self.s().isRoot():#@s:PackageView::PackageView
#sc = qtw.QGraphicsScene(self.pimpl())
sc = Q3Scene(self.pimpl())
#result = qtw.QGraphicsView(sc,self.pimpl())
package = self.s().module().impl()
result = GraphViewImpl(sc,self.pimpl(),self.p(), package) #'''EditorFrame'''
result._self = self.s()
result._scene = sc
'''
wheelEvent = getattr(self.s(), "wheelEvent", None)
if callable(wheelEvent):
result.wheelEvent = wheelEvent
drawBackground = getattr(self.s(), "drawBackground", None)
if callable(drawBackground):
result.drawBackground = drawBackground
'''
else:
if isinstance(self.pimpl(), qtw.QGraphicsView): #//MODULES FIRST LEVEL
result = ModuleViewImpl(None)
result._self = self.s()
self.pimpl()._scene.addItem(result)
el = self.s().module().impl()
result.setElement(el)
else:
result = ModuleViewImpl(self.pimpl()) # next levels
result._self = self.s()
result._self = self.s()
return result;
def doModuleView_AfterInit(self):
tImpl = self.impl()
#tImpl._self = self.s()
#tImpl._element = self.s().module().impl()
tImpl.setElement(self.s().module().impl())
if self.s().isRoot():#@s:PackageView::PackageView
#self.s()._inputsView = self.s().addModuleView('moduleInputs', type=ModuleType.INPUTS)
#self.s()._outputsView = self.s().addModuleView('moduleOutputs', type=ModuleType.OUTPUTS)
#vec2d m_inputsPosition{ -400.0, 0.0 };
self.s()._inputsView.setProp(prop.PositionX,-400.0)
self.s()._inputsView.setProp(prop.PositionY,0.0)
self.s()._outputsView.setProp(prop.PositionX,400.0)
self.s()._outputsView.setProp(prop.PositionY,0.0)
else: #Node::Node
tImpl._nameFont.setFamily("Consolas")
tImpl._nameFont.setPointSize(8)
tImpl.setFlags(qtw.QGraphicsItem.ItemIsMovable | qtw.QGraphicsItem.ItemIsSelectable | qtw.QGraphicsItem.ItemSendsGeometryChanges)
tImpl.collapse()
tImpl.setGraphView(self.pimpl())
pass #nop
self.callAfterInit(tImpl)
#if iscallable(tImpl)
def doApp_Init(self):
result = qtw.QApplication(sys.argv)
app = result
app.setStyle(qtw.QStyleFactory.create("Fusion"));
darkPalette=qtg.QPalette()
c1 = qtg.QColor(55, 55, 55);
c2 = qtg.QColor(25, 25, 25);
c3 = qtg.QColor(45, 130, 220);
darkPalette.setColor(qtg.QPalette.Window, c1);
darkPalette.setColor(qtg.QPalette.WindowText, qtc.Qt.white);
darkPalette.setColor(qtg.QPalette.Base, c2);
darkPalette.setColor(qtg.QPalette.AlternateBase, c1);
darkPalette.setColor(qtg.QPalette.ToolTipBase, qtc.Qt.white);
darkPalette.setColor(qtg.QPalette.ToolTipText, qtc.Qt.white);
darkPalette.setColor(qtg.QPalette.Text, qtc.Qt.white);
darkPalette.setColor(qtg.QPalette.Button, c1);
darkPalette.setColor(qtg.QPalette.ButtonText, qtc.Qt.white);
darkPalette.setColor(qtg.QPalette.BrightText, qtc.Qt.red);
darkPalette.setColor(qtg.QPalette.Link, c3);
darkPalette.setColor(qtg.QPalette.Highlight, c3);
darkPalette.setColor(qtg.QPalette.HighlightedText, qtc.Qt.white);
app.setPalette(darkPalette);
app.setStyleSheet("QToolTip { color: #ffffff; background-color: #2b8bdb; border: 1px solid white; }");
'''
palette = app.palette()
palette.setColor(QPalette.Window, QColor(239, 240, 241))
palette.setColor(QPalette.WindowText, QColor(49, 54, 59))
palette.setColor(QPalette.Base, QColor(252, 252, 252))
palette.setColor(QPalette.AlternateBase, QColor(239, 240, 241))
palette.setColor(QPalette.ToolTipBase, QColor(239, 240, 241))
palette.setColor(QPalette.ToolTipText, QColor(49, 54, 59))
palette.setColor(QPalette.Text, QColor(49, 54, 59))
palette.setColor(QPalette.Button, QColor(239, 240, 241))
palette.setColor(QPalette.ButtonText, QColor(49, 54, 59))
palette.setColor(QPalette.BrightText, QColor(255, 255, 255))
palette.setColor(QPalette.Link, QColor(41, 128, 185))
# palette.setColor(QPalette.Highlight, QColor(126, 71, 130))
# palette.setColor(QPalette.HighlightedText, Qt.white)
palette.setColor(QPalette.Disabled, QPalette.Light, Qt.white)
palette.setColor(QPalette.Disabled, QPalette.Shadow, QColor(234, 234, 234))
app.setPalette(palette)
'''
return result
def doMainWindow_Init(self):
result = qtw.QMainWindow()
if 'title' in self._self._kwargs:
result.setWindowTitle(self._self._kwargs['title'])
#result = qtw.QFrame()
result.resize(1400, 980)
'''
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(result.sizePolicy().hasHeightForWidth())
result.setSizePolicy(sizePolicy)
'''
showEvent = getattr(self._self, "showEvent", None)
if callable(showEvent):
result.showEvent = showEvent
return result
def doMainWindow_Show(self):
result = self.impl().show()
return result
def doMenu_Init(self):
if self._impl == None:
self._self._qtMenu = qtw.QMenu(self._parent.implObject())
self.s()._menu = self._self._qtMenu
pass
else:
self.s()._menu = self._impl
return self._self._menu
def doMenu_AddSeparator(self):
result = self._self.implObject().addSeparator()
return result
def doMenu_addAction(self, label,id,helpStr,onClick):
if (label == None and consts.ID_EXIT == id):
exitAct = qtw.QAction(qtg.QIcon('exit.png'), '&Exit', self._self.implObject())
exitAct.setShortcut('Ctrl+Q')
exitAct.setStatusTip('Exit application')
exitAct.triggered.connect(qtw.qApp.quit)
result = self._self.implObject().addAction(exitAct)
else:
result = self._self.implObject().addAction(label, onClick)
if onClick != None:
result.triggered.connect(onClick)
#!TODO!result.onClick = onClick
return result
def doMenuBar_Init(self):
return self.pimpl().menuBar()
def doMenuBar_AddMenu(self,menuTitle):
return self.impl().addMenu(menuTitle)
'''
else:
result = Menu(self._parent)
self._wxMenuBar.Append(result.implObject(),menuTitle)
return result
'''
def doMdiPanel_Init(self):
result = qtw.QMdiArea(self._parent.impl())
return result
def doTabPanel_Init(self):
result = qtw.QTabWidget(self._parent.impl())
'''
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(10)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(result.sizePolicy().hasHeightForWidth())
result.setSizePolicy(sizePolicy)
'''
#result.setMinimumSize(QtCore.QSize(2080, 1630))
result.setTabsClosable(True)
return result
def doTabPanel_AddTab(self, obj, title):
return self.impl().addTab(obj.impl(),title)
def doTabPanel_CurrentIndex(self):
return self.impl().currentIndex()
def doTab_Init(self):
result = qtw.QWidget()
self._parent.impl().addTab(result,"test")
return result
def doLayout_Init(self):
orient = self.s()._kwargs['orient'] if 'orient' in self.s()._kwargs else None
result = qtw.QVBoxLayout() if orient == orientation.VERTICAL else qtw.QHBoxLayout()
return result
def doLayout_AddElement(self, element):
result = self.impl().addWidget(element.impl())
return result
def doLayout_Add(self,label, sizerFlags):
result = self.impl().addWidget(label.inpl())
return result
def doElement_Init(self):
result = qtw.QWidget(self.pimpl())
return result
def doElement_Resize(self,w,h):
result = self.impl().resize(w,h)
return result
def doElement_SizePolicy(self):
result = self.impl().sizePolicy()
return result
def doElement_SetSizePolicy(self, sizePolicy):
result = self.impl().setSizePolicy(sizePolicy)
return result
def doPanel_Init(self):
result = qtw.QFrame(self.pimpl())
return result
def doLabel_Init(self):
result = qtw.QLabel(self.pimpl())
if 'label' in self.s()._kwargs:
result.setText(self.s()._kwargs['label'])
return result
def doLabel_GetFont(self):
result = self.impl.font()
return result
def doLabel_SetFont(self, font):
result = self.impl().setFont(font)
return result
def doDetailWindow_Init(self):
result = DetailWindowBaseImpl(self.s())
result._self = self.s()
return result
def doDetailWindow_Show(self):
result = self.impl().show()
return result
| 34.017595 | 141 | 0.617931 | 10,978 | 0.946379 | 0 | 0 | 0 | 0 | 0 | 0 | 3,326 | 0.286724 |
570eadcaa613e66d764e81bda74fc4c5ac38c715 | 2,538 | py | Python | 2. ExaminingBivariateandMultivariateRelationships/2. scatter_plots.py | michaelbwalker/Data-Cleaning-and-Exploration-with-Machine-Learning | 9de44e5ad2e8d197b0a3c1b362b0377339278bd2 | [
"MIT"
]
| 7 | 2021-10-02T03:19:59.000Z | 2022-03-21T21:24:14.000Z | 2. ExaminingBivariateandMultivariateRelationships/2. scatter_plots.py | michaelbwalker/Data-Cleaning-and-Exploration-with-Machine-Learning | 9de44e5ad2e8d197b0a3c1b362b0377339278bd2 | [
"MIT"
]
| null | null | null | 2. ExaminingBivariateandMultivariateRelationships/2. scatter_plots.py | michaelbwalker/Data-Cleaning-and-Exploration-with-Machine-Learning | 9de44e5ad2e8d197b0a3c1b362b0377339278bd2 | [
"MIT"
]
| 6 | 2021-08-30T02:58:02.000Z | 2022-02-01T07:46:49.000Z | # import pandas, matplotlib, and seaborn
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
pd.set_option('display.width', 53)
pd.set_option('display.max_columns', 5)
pd.set_option('display.max_rows', 200)
pd.options.display.float_format = '{:,.0f}'.format
covidtotals = pd.read_csv("data/covidtotals.csv")
covidtotals.set_index("iso_code", inplace=True)
landtemps = pd.read_csv("data/landtemps2019avgs.csv")
# do a scatterplot of total_cases by total_deaths
ax = sns.regplot(x="total_cases_mill", y="total_deaths_mill", data=covidtotals)
ax.set(xlabel="Cases Per Million", ylabel="Deaths Per Million", title="Total Covid Cases and Deaths by Country")
plt.show()
fig, axes = plt.subplots(1,2, sharey=True)
sns.regplot(x=covidtotals.aged_65_older, y=covidtotals.total_cases_mill, ax=axes[0])
sns.regplot(x=covidtotals.gdp_per_capita, y=covidtotals.total_cases_mill, ax=axes[1])
axes[0].set_xlabel("Aged 65 or Older")
axes[0].set_ylabel("Cases Per Million")
axes[1].set_xlabel("GDP Per Capita")
axes[1].set_ylabel("")
plt.suptitle("Age 65 Plus and GDP with Cases Per Million")
plt.tight_layout()
fig.subplots_adjust(top=0.92)
plt.show()
# show the high elevation points in a different color
low, high = landtemps.loc[landtemps.elevation<=1000], landtemps.loc[landtemps.elevation>1000]
low.shape[0], low.avgtemp.mean()
high.shape[0], high.avgtemp.mean()
plt.scatter(x="latabs", y="avgtemp", c="blue", data=low)
plt.scatter(x="latabs", y="avgtemp", c="red", data=high)
plt.legend(('low elevation', 'high elevation'))
plt.xlabel("Latitude (N or S)")
plt.ylabel("Average Temperature (Celsius)")
plt.title("Latitude and Average Temperature in 2019")
plt.show()
# show scatter plot with different regression lines by elevation group
landtemps['elevation_group'] = np.where(landtemps.elevation<=1000,'low','high')
sns.lmplot(x="latabs", y="avgtemp", hue="elevation_group", palette=dict(low="blue", high="red"), legend_out=False, data=landtemps)
plt.xlabel("Latitude (N or S)")
plt.ylabel("Average Temperature")
plt.legend(('low elevation', 'high elevation'), loc='lower left')
plt.yticks(np.arange(-60, 40, step=20))
plt.title("Latitude and Average Temperature in 2019")
plt.tight_layout()
plt.show()
# show this as a 3D plot
fig = plt.figure()
plt.suptitle("Latitude, Temperature, and Elevation in 2019")
ax = plt.axes(projection='3d')
ax.set_xlabel("Elevation")
ax.set_ylabel("Latitude")
ax.set_zlabel("Avg Temp")
ax.scatter3D(landtemps.elevation, landtemps.latabs, landtemps.avgtemp)
plt.show()
| 39.046154 | 130 | 0.754137 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,022 | 0.402679 |
570ee5f940cb42c1bdadbd336c5d3471836f1133 | 7,359 | py | Python | tests/test/base/test_map_set.py | Eve-ning/reamber_base_py | 6d19c84f2c110b60e633b82b73e0516396466f56 | [
"MIT"
]
| 10 | 2020-06-28T11:16:36.000Z | 2021-08-09T21:41:43.000Z | tests/test/base/test_map_set.py | Eve-ning/reamberPy | 6d19c84f2c110b60e633b82b73e0516396466f56 | [
"MIT"
]
| 35 | 2020-06-18T13:05:50.000Z | 2022-02-18T10:13:35.000Z | tests/test/base/test_map_set.py | Eve-ning/reamber_base_py | 6d19c84f2c110b60e633b82b73e0516396466f56 | [
"MIT"
]
| 2 | 2021-05-26T17:05:06.000Z | 2021-06-12T18:42:13.000Z | import unittest
import numpy as np
from reamber.base import Bpm, Hit, Hold, Map, MapSet
from reamber.base.lists import BpmList
from reamber.base.lists.notes import HitList, HoldList
# noinspection PyTypeChecker,DuplicatedCode
class TestMapSet(unittest.TestCase):
""" Not much to test here since Bpm is basically Note. """
def setUp(self) -> None:
"""
BPM 300: 200ms/beat
BPM 200: 300ms/beat
BPM 300 300 200 200
OFF 0 200 400 600 800 1000 1200 1400 1600 1900 2200 2500
BEAT 0 1 2 3 0 1 2 3 0 1 2 3
HIT 0 1 2 3 0 1 2 3 0 1 2 3
HOLD 2-----2 0--0 1--------1 0--------------------0
3--3 3--------3
"""
self.bpm_offsets = np.asarray([0, 800, 1600, 2500])
self.bpm_bpms = np.asarray([300, 300, 200, 200])
self.bpm_metronomes = np.asarray([4, 4, 3, 5])
self.hit_offsets = np.asarray([0, 200, 300, 400, 500, 600, 900, 1000, 1400, 1600, 2200, 2350])
self.hit_columns = np.asarray([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3])
self.hold_offsets = np.asarray([0, 100, 300, 600, 1000, 1500])
self.hold_columns = np.asarray([2, 3, 0, 3, 1, 0])
self.hold_lengths = np.asarray([200, 100, 100, 300, 300, 1000])
self.bpms = [Bpm(offset=o, bpm=b, metronome=m) for o, b, m in
zip(self.bpm_offsets, self.bpm_bpms, self.bpm_metronomes)]
self.hits = [Hit(offset=o, column=c) for o, c in zip(self.hit_offsets, self.hit_columns)]
self.holds = [Hold(offset=o, column=c, length=l) for o, c, l in
zip(self.hold_offsets, self.hold_columns, self.hold_lengths)]
self.map1 = Map()
self.map1.hits = HitList(self.hits)
self.map1.holds = HoldList(self.holds)
self.map1.bpms = BpmList(self.bpms)
self.map2 = self.map1.deepcopy()
self.map_set = MapSet([self.map1, self.map2])
def test_type(self):
self.assertIsInstance(self.map_set.maps, list)
def test_stack(self):
s = self.map_set.stack()
self.assertListEqual(self.hit_offsets.tolist(), self.map_set.maps[0][HitList][0].offset.tolist())
self.assertListEqual(self.hold_offsets.tolist(), self.map_set.maps[0][HoldList][0].offset.tolist())
self.assertListEqual(self.hit_offsets.tolist(), self.map_set.maps[1][HitList][0].offset.tolist())
self.assertListEqual(self.hold_offsets.tolist(), self.map_set.maps[1][HoldList][0].offset.tolist())
s.offset += 1000
self.assertListEqual((self.hit_offsets + 1000).tolist(), self.map_set.maps[0][HitList][0].offset.tolist())
self.assertListEqual((self.hold_offsets + 1000).tolist(), self.map_set.maps[0][HoldList][0].offset.tolist())
self.assertListEqual((self.hit_offsets + 1000).tolist(), self.map_set.maps[1][HitList][0].offset.tolist())
self.assertListEqual((self.hold_offsets + 1000).tolist(), self.map_set.maps[1][HoldList][0].offset.tolist())
def test_stack_loop(self):
for m in self.map_set:
stack = m.stack(['hits'])
stack.loc[stack.offset < 1000, 'column'] += 1
self.assertEqual(self.hit_columns[0] + 1, self.map_set[0].hits.column[0])
self.assertEqual(self.hold_columns[0], self.map_set[0].holds.column[0])
self.assertEqual(self.hit_columns[-1], self.map_set[0].hits.column.tolist()[-1])
self.assertEqual(self.hold_columns[-1], self.map_set[0].holds.column.tolist()[-1])
def test_stack_offset(self):
s = self.map_set.stack()
s.offset *= 2
self.assertListEqual((self.hit_offsets*2).tolist(), self.map_set.maps[0][HitList][0].offset.tolist())
self.assertListEqual((self.hold_offsets*2).tolist(), self.map_set.maps[0][HoldList][0].offset.tolist())
self.assertListEqual(((self.hold_offsets*2) + self.hold_lengths).tolist(),
self.map_set.maps[0][HoldList][0].tail_offset.tolist())
self.assertListEqual((self.hit_offsets*2).tolist(), self.map_set.maps[1][HitList][0].offset.tolist())
self.assertListEqual((self.hold_offsets*2).tolist(), self.map_set.maps[1][HoldList][0].offset.tolist())
self.assertListEqual(((self.hold_offsets*2) + self.hold_lengths).tolist(),
self.map_set.maps[1][HoldList][0].tail_offset.tolist())
def test_stack_column(self):
s = self.map_set.stack()
s.column *= 2
self.assertListEqual((self.hit_columns*2).tolist() ,self.map_set.maps[0][HitList][0].column.tolist())
self.assertListEqual((self.hold_columns*2).tolist(), self.map_set.maps[0][HoldList][0].column.tolist())
self.assertListEqual((self.hit_columns*2).tolist() ,self.map_set.maps[1][HitList][0].column.tolist())
self.assertListEqual((self.hold_columns*2).tolist(), self.map_set.maps[1][HoldList][0].column.tolist())
def test_stack_inline(self):
""" Checks if inline stacking works """
self.map_set.stack().column *= 2
self.assertListEqual((self.hit_columns * 2).tolist(), self.map_set[0][HitList][0].column.tolist())
self.assertListEqual((self.hold_columns * 2).tolist(), self.map_set[0][HoldList][0].column.tolist())
self.assertListEqual((self.hit_columns * 2).tolist(), self.map_set[1][HitList][0].column.tolist())
self.assertListEqual((self.hold_columns * 2).tolist(), self.map_set[1][HoldList][0].column.tolist())
def test_rate(self):
ms = self.map_set.rate(0.5)
self.assertListEqual((self.hit_offsets*2).tolist(), ms[0][HitList][0].offset.tolist())
self.assertListEqual((self.hold_offsets*2).tolist(), ms[0][HoldList][0].offset.tolist())
self.assertListEqual((self.hold_offsets * 2 + self.hold_lengths * 2).tolist(),
ms[0][HoldList][0].tail_offset.tolist())
self.assertListEqual((self.hit_offsets*2).tolist(), ms[1][HitList][0].offset.tolist())
self.assertListEqual((self.hold_offsets*2).tolist(), ms[1][HoldList][0].offset.tolist())
self.assertListEqual((self.hold_offsets * 2 + self.hold_lengths * 2).tolist(),
ms[1][HoldList][0].tail_offset.tolist())
def test_deepcopy(self):
ms = self.map_set.deepcopy()
ms.stack().column *= 2
self.assertListEqual((self.hit_columns*2).tolist(), ms[0][HitList][0].column.tolist())
self.assertListEqual((self.hold_columns*2).tolist(), ms[0][HoldList][0].column.tolist())
self.assertListEqual((self.hit_columns*2).tolist(), ms[1][HitList][0].column.tolist())
self.assertListEqual((self.hold_columns*2).tolist(), ms[1][HoldList][0].column.tolist())
self.assertListEqual(self.hit_columns.tolist(), self.map_set[0][HitList][0].column.tolist())
self.assertListEqual(self.hold_columns.tolist(), self.map_set[0][HoldList][0].column.tolist())
self.assertListEqual(self.hit_columns.tolist(), self.map_set[1][HitList][0].column.tolist())
self.assertListEqual(self.hold_columns.tolist(), self.map_set[1][HoldList][0].column.tolist())
if __name__ == '__main__':
unittest.main()
| 54.110294 | 116 | 0.621552 | 7,079 | 0.961951 | 0 | 0 | 0 | 0 | 0 | 0 | 686 | 0.093219 |
570f7be4fc6a73c331b26ffda6ddfc47a075df88 | 1,252 | py | Python | minifyoperation.py | seece/cbpp | b6771c7933fa07444e660eafda6f06cf60edce01 | [
"MIT"
]
| null | null | null | minifyoperation.py | seece/cbpp | b6771c7933fa07444e660eafda6f06cf60edce01 | [
"MIT"
]
| null | null | null | minifyoperation.py | seece/cbpp | b6771c7933fa07444e660eafda6f06cf60edce01 | [
"MIT"
]
| null | null | null | import re
from util import *
from operation import Operation, OperationResult
class Replacement:
def __init__(self, regex, substitution):
self.regex = regex
self.substitution = substitution
class MinifyOperation(Operation):
def __init__(self):
self.inMultilineComment = False
pass
def apply(self, line, state):
result = OperationResult(line, False)
if not state.args.minify:
return result
l = stripComments(line)
strings = scanForStrings(l)
commentStart = len(l)
stringRegex = r'(("[^"]+")|(|[^"]*?)([^\s]*?))?'
comments = r'(?P<comment>(|(\'|//)*$))'
def string(s):
if not s:
return ""
return s
def replace(m, group):
if checkIfInsideString(m.start(group), strings):
return string(m.group(0))
return string(m.group(1)) + string(m.group(group))
ops = []
ops.append(Replacement(re.compile(r'' + stringRegex + '\s*(?P<op>[=+\-*/\><,\^]{1,2})\s*'), lambda m: replace(m, "op")))
ops.append(Replacement(re.compile(r'' + stringRegex + r'(?<=\D)(0)(?P<digit>\.\d+)'), lambda m: replace(m, "digit") ))
#l = l.lstrip("\t")
for o in ops:
l = o.regex.sub(o.substitution, l)
l = l.rstrip("\r\n")
result.line = strInsert(result.line, 0, commentStart-1, l)
return result
| 24.076923 | 122 | 0.626997 | 1,169 | 0.933706 | 0 | 0 | 0 | 0 | 0 | 0 | 170 | 0.135783 |
570f8d367a6c727fc6ef795d72a90ef7bea75141 | 2,024 | py | Python | graph.py | mrpatiwi/k-walk-py | a800f64079024716b26c0ebb9c3a2c5b6a935b78 | [
"MIT"
]
| null | null | null | graph.py | mrpatiwi/k-walk-py | a800f64079024716b26c0ebb9c3a2c5b6a935b78 | [
"MIT"
]
| null | null | null | graph.py | mrpatiwi/k-walk-py | a800f64079024716b26c0ebb9c3a2c5b6a935b78 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
from index_matrix import Matrix
__author__ = 'Patricio Lopez Juri'
class Graph:
def __init__(self, V, E, K):
self.V = V
self.E = E
self.K = K
self.A = Matrix.square(items=V)
for a, b in self.E:
self.A[a, b] = 1
@property
def order(self):
return len(self.V)
@property
def n(self):
return self.order
@property
def k(self):
return len(self.K)
def d(self, i):
return self.A.row(i).sum()
def D(self):
D = Matrix.square(items=self.V)
for item in self.V:
D[item, item] = self.d(item)
return D
def p(self, i, j):
return self.A[i, j] / self.d(i)
def p_star(self, start, i, j):
absorbents = [k for k in self.K if k != start]
if i in absorbents and i == j:
return 1
elif i in absorbents and i != j:
return 0
else:
return self.p(i, j)
def P(self):
return self.D().inverse() * self.A
def P_star(self, start):
def function(i, j):
return self.p_star(start, i, j)
P_star = self.A.map(function=function)
absorbents = [k for k in self.K if k != start]
for i, absorbent in enumerate(absorbents):
P_star.swap_columns(P_star.horizontal_items[-(1 + i)], absorbent)
return P_star
def Q_star(self, start):
size = self.n - self.k + 1
return self.P_star(start)[0:size, 0:size]
def R_star(self, start):
size = self.n - self.k + 1
return self.P_star(start)[0:size, size:(size + self.k - 1)]
def Zero_star(self, start):
pass
def I_star(self, start):
size = self.n - self.k + 1
range = size + self.k - 1
return self.P_star(start)[size:range, size:range]
def N_star(self, start):
Q_star = self.Q_star(start)
identity = Q_star.clone_identity()
return (identity - Q_star).inverse()
| 23.264368 | 77 | 0.535079 | 1,928 | 0.952569 | 0 | 0 | 162 | 0.08004 | 0 | 0 | 44 | 0.021739 |
570fe23611397bcc46c1ab733771a0e34fdc4ba4 | 1,302 | py | Python | ep004_helper.py | jpch89/effectivepython | 97ba297bf987f346219bf8de5198c0817f5146e0 | [
"MIT"
]
| null | null | null | ep004_helper.py | jpch89/effectivepython | 97ba297bf987f346219bf8de5198c0817f5146e0 | [
"MIT"
]
| null | null | null | ep004_helper.py | jpch89/effectivepython | 97ba297bf987f346219bf8de5198c0817f5146e0 | [
"MIT"
]
| null | null | null | from urllib.parse import parse_qs
# 解析查询字符串 query string
my_values = parse_qs('red=5&blue=0&green=',
keep_blank_values=True)
# print(repr(my_values)) # 原书写法
print(my_values) # 返回的是字典,直接这样写就行了
# >>>
# {'red': ['5'], 'blue': ['0'], 'green': ['']}
# 查询字符串中的参数可能有:多个值和空白 blank 值。
# 有些参数则没有出现。
# 使用 get 方法可以不报错的从字典中取值。
print('Red: ', my_values.get('red'))
print('Green: ', my_values.get('green'))
print('Opacity: ', my_values.get('opacity'))
print('-' * 50)
# 需求:当查询的参数没有出现在查询字符串中
# 或者参数的值为空白的时候
# 可以返回 0
# 思路:空值和零值都是 False
red = my_values.get('red', [''])[0] or 0
green = my_values.get('green', [''])[0] or 0
opacity = my_values.get('opacity', [''])[0] or 0
print('Red: %r' % red)
print('Green: %r' % green)
print('Opacity: %r' % opacity)
print('-' * 50)
# 需求:最后要用到的是整数类型
# 思路:类型转换
red = int(my_values.get('red', [''])[0] or 0)
# 这种长表达式的写法看上去很乱!
# 改进1:使用 Python 2.5 添加的三元表达式
red = my_values.get('red', [''])
red = int(red[0]) if red[0] else 0
# 改进2:使用跨行的 if/else 语句
green = my_values.get('green', [''])
if green[0]:
green = int(green[0])
else:
green = 0
# 改进3:频繁使用的逻辑,需要封装成辅助函数
def get_first_value(values, key, default=0):
found = values.get(key, [''])
if found[0]:
found = int(found[0])
else:
found = default
return found
| 23.25 | 48 | 0.609831 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 935 | 0.55 |
570febe7fa7b5748adbff547439e5061a41d7ecb | 2,643 | py | Python | Piece_class_stuff.py | krystianpietryka/Chess | e65afbe3ac51441327a5057b3677334ceb54d916 | [
"MIT"
]
| null | null | null | Piece_class_stuff.py | krystianpietryka/Chess | e65afbe3ac51441327a5057b3677334ceb54d916 | [
"MIT"
]
| null | null | null | Piece_class_stuff.py | krystianpietryka/Chess | e65afbe3ac51441327a5057b3677334ceb54d916 | [
"MIT"
]
| null | null | null | from enum import Enum
import pygame
class Colour(Enum):
WHITE = 0
BLACK = 1
class Piece:
def __init__(self, column, row, model, colour):
self.row = row
self.column = column
self.model = model
self.colour = colour
# Load sprites into pygame
class Sprites:
board_image = pygame.image.load(r'Sprites/Board.png')
WB = pygame.image.load(r'Sprites/WB.png')
WH = pygame.image.load(r'Sprites/WH.png')
WP = pygame.image.load(r'Sprites/WP.png')
WR = pygame.image.load(r'Sprites/WR.png')
WQ = pygame.image.load(r'Sprites/WQ.png')
WK = pygame.image.load(r'Sprites/WK.png')
BB = pygame.image.load(r'Sprites/BB.png')
BH = pygame.image.load(r'Sprites/BH.png')
BP = pygame.image.load(r'Sprites/BP.png')
BR = pygame.image.load(r'Sprites/BR.png')
BQ = pygame.image.load(r'Sprites/BQ.png')
BK = pygame.image.load(r'Sprites/BK.png')
Red_cell = pygame.image.load(r'Sprites/Red_cell.png')
# Piece class Objects
class Piece_Objects:
Pawn1 = Piece(0, 6, Sprites.WP, Colour.WHITE)
Pawn2 = Piece(1, 6, Sprites.WP, Colour.WHITE)
Pawn3 = Piece(2, 6, Sprites.WP, Colour.WHITE)
Pawn4 = Piece(3, 6, Sprites.WP, Colour.WHITE)
Pawn5 = Piece(4, 6, Sprites.WP, Colour.WHITE)
Pawn6 = Piece(5, 6, Sprites.WP, Colour.WHITE)
Pawn7 = Piece(6, 6, Sprites.WP, Colour.WHITE)
Pawn8 = Piece(7, 6, Sprites.WP, Colour.WHITE)
Pawn9 = Piece(0, 0, Sprites.BP, Colour.BLACK)
Pawn10 = Piece(1, 1, Sprites.BP, Colour.BLACK)
Pawn11 = Piece(2, 1, Sprites.BP, Colour.BLACK)
Pawn12 = Piece(3, 1, Sprites.BP, Colour.BLACK)
Pawn13 = Piece(4, 1, Sprites.BP, Colour.BLACK)
Pawn14 = Piece(5, 1, Sprites.BP, Colour.BLACK)
Pawn15 = Piece(6, 1, Sprites.BP, Colour.BLACK)
Pawn16 = Piece(7, 1, Sprites.BP, Colour.BLACK)
Knight1 = Piece(1, 7, Sprites.WH, Colour.WHITE)
Knight2 = Piece(6, 7, Sprites.WH, Colour.WHITE)
Knight3 = Piece(1, 0, Sprites.BH, Colour.BLACK)
Knight4 = Piece(6, 0, Sprites.BH, Colour.BLACK)
Rook1 = Piece(0, 7, Sprites.WR, Colour.WHITE)
Rook2 = Piece(7, 7, Sprites.WR, Colour.WHITE)
Rook3 = Piece(0, 0, Sprites.BR, Colour.BLACK)
Rook4 = Piece(7, 0, Sprites.BR, Colour.BLACK)
Bishop1 = Piece(2, 7, Sprites.WB, Colour.WHITE)
Bishop2 = Piece(5, 7, Sprites.WB, Colour.WHITE)
Bishop3 = Piece(2, 0, Sprites.BB, Colour.BLACK)
Bishop4 = Piece(5, 0, Sprites.BB, Colour.BLACK)
King1 = Piece(4, 7, Sprites.WK, Colour.WHITE)
King2 = Piece(4, 0, Sprites.BK, Colour.BLACK)
Queen1 = Piece(3, 7, Sprites.WQ, Colour.WHITE)
Queen2 = Piece(3, 0, Sprites.BQ, Colour.BLACK)
| 37.757143 | 57 | 0.650397 | 2,546 | 0.963299 | 0 | 0 | 0 | 0 | 0 | 0 | 294 | 0.111237 |
5710d9d404ac2b132ecaaa64415a70da05239921 | 656 | py | Python | pubsub.py | basecue/micropython-pubsub | fcb6189d648515f1a7106ed5f54e332ba069793d | [
"Apache-2.0"
]
| null | null | null | pubsub.py | basecue/micropython-pubsub | fcb6189d648515f1a7106ed5f54e332ba069793d | [
"Apache-2.0"
]
| null | null | null | pubsub.py | basecue/micropython-pubsub | fcb6189d648515f1a7106ed5f54e332ba069793d | [
"Apache-2.0"
]
| null | null | null | from micropython import schedule
_subscribers = {}
def publisher(topic):
def _publish(func):
def _wrapper(*args, **kwargs):
value = func(*args, **kwargs)
publish(topic, value)
return value
return _wrapper
return _publish
def publish(topic, value) -> None:
if topic not in _subscribers:
return
for subscriber_func in _subscribers[topic]:
schedule(subscriber_func, value)
def subscriber(topic):
def _wrapper(func):
try:
_subscribers[topic].append(func)
except KeyError:
_subscribers[topic] = [func]
return _wrapper
| 19.294118 | 47 | 0.609756 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
5712c5f2bba3745161134c95e4c1fe8d35033684 | 5,808 | py | Python | sc_cost_meter/utils.py | zaro0508/lambda-sc-cost-meter | 2e10fa102af983f61a352ae633651fc3eaf64b19 | [
"Apache-2.0"
]
| null | null | null | sc_cost_meter/utils.py | zaro0508/lambda-sc-cost-meter | 2e10fa102af983f61a352ae633651fc3eaf64b19 | [
"Apache-2.0"
]
| null | null | null | sc_cost_meter/utils.py | zaro0508/lambda-sc-cost-meter | 2e10fa102af983f61a352ae633651fc3eaf64b19 | [
"Apache-2.0"
]
| null | null | null | import boto3
import logging
import os
from datetime import datetime
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
def get_ec2_client():
return boto3.client('ec2')
def get_ssm_client():
return boto3.client('ssm')
def get_ce_client():
return boto3.client('ce')
def get_meteringmarketplace_client():
return boto3.client('meteringmarketplace')
def get_dynamo_client():
return boto3.client('dynamodb')
def get_env_var_value(env_var):
'''Get the value of an environment variable
:param env_var: the environment variable
:returns: the environment variable's value, None if env var is not found
'''
value = os.getenv(env_var)
if not value:
log.warning(f'cannot get environment variable: {env_var}')
return value
def get_marketplace_synapse_ids():
'''Get Synapse IDs from the Marketplace Dynamo DB, these are the Marketplace customers.
Assumes that there is a Dynamo DB with a table containing a mapping of Synapse
IDs to SC subscriber data
:return a list of synapse IDs, otherwise return empty list if no customers are in DB
'''
synapse_ids = []
ddb_marketplace_table_name = get_env_var_value('MARKETPLACE_ID_DYNAMO_TABLE_NAME')
ddb_marketplace_synapse_user_id_attribute = "SynapseUserId"
if ddb_marketplace_table_name:
client = get_dynamo_client()
response = client.scan(
TableName=ddb_marketplace_table_name,
ProjectionExpression=ddb_marketplace_synapse_user_id_attribute,
)
if "Items" in response.keys():
for item in response["Items"]:
synapse_ids.append(item[ddb_marketplace_synapse_user_id_attribute]["S"])
return synapse_ids
def get_marketplace_customer_id(synapse_id):
'''Get the Service Catalog customer ID from the Marketplace Dynamo DB.
Assumes that there is a Dynamo DB with a table containing a mapping of Synapse
IDs to SC subscriber data
:param synapse_id: synapse user id
:return the Marketplace customer ID, otherwise return None if cannot find an
associated customer ID
'''
customer_id = None
ddb_marketplace_table_name = get_env_var_value('MARKETPLACE_ID_DYNAMO_TABLE_NAME')
if ddb_marketplace_table_name:
ddb_customer_id_attribute = 'MarketplaceCustomerId'
client = get_dynamo_client()
response = client.get_item(
Key={
'SynapseUserId': {
'S': synapse_id,
}
},
TableName=ddb_marketplace_table_name,
ConsistentRead=True,
AttributesToGet=[
ddb_customer_id_attribute
]
)
if "Item" in response.keys():
customer_id = response["Item"][ddb_customer_id_attribute]["S"]
else:
log.info(f'cannot find registration for synapse user: {synapse_id}')
return customer_id
def get_marketplace_product_code(synapse_id):
'''Get the registered Service Catalog customer product code.
Assumes that there is a Dynamo DB with a table containing a mapping of Synapse
IDs to SC subscriber data
:param synapse_id: synapse user id
:return the Marketplace product code, None if cannot find customer ID
'''
product_code = None
ddb_marketplace_table_name = get_env_var_value('MARKETPLACE_ID_DYNAMO_TABLE_NAME')
if ddb_marketplace_table_name:
ddb_product_code_attribute = 'ProductCode'
client = get_dynamo_client()
response = client.get_item(
Key={
'SynapseUserId': {
'S': synapse_id,
}
},
TableName=ddb_marketplace_table_name,
ConsistentRead=True,
AttributesToGet=[
ddb_product_code_attribute
]
)
if "Item" in response.keys():
product_code = response["Item"][ddb_product_code_attribute]["S"]
else:
log.info(f'cannot find registration for synapse user: {synapse_id}')
return product_code
def get_customer_cost(customer_id, time_period, granularity):
'''
Get the total cost of all resources tagged with the customer_id for a given
time_period. The time_period and time granularity must match.
:param customer_id: the Marketplace customer ID
:param time_period: the cost time period
:param granularity: the granularity of time HOURLY|DAILY|MONTHLY
:return: the total cost of all resources and the currency unit
'''
client = get_ce_client()
response = client.get_cost_and_usage(
TimePeriod=time_period,
Granularity=granularity,
Filter={
"Tags": {
"Key": "marketplace:customerId",
"Values": [
customer_id
]
}
},
Metrics=["UnblendedCost"]
)
results_by_time = response['ResultsByTime']
cost = results_by_time[0]["Total"]["UnblendedCost"]["Amount"]
unit = results_by_time[0]["Total"]["UnblendedCost"]["Unit"]
return float(cost), unit
def report_cost(cost, customer_id, product_code):
'''
Report the incurred cost of the customer's resources to the AWS Marketplace
:param cost: the cost (as a float value)
:param customer_id: the Marketplace customer ID
:param product_code: the Marketplace product code
'''
cost_accrued_rate = 0.001 # TODO: use mareketplace get_dimension API to get this info
quantity = int(cost / cost_accrued_rate)
mrktpl_client = get_meteringmarketplace_client()
response = mrktpl_client.batch_meter_usage(
UsageRecords=[
{
'Timestamp': datetime.utcnow(),
'CustomerIdentifier': customer_id,
'Dimension': 'costs_accrued',
'Quantity': quantity
}
],
ProductCode=product_code
)
log.debug(f'batch_meter_usage response: {response}')
results = response["Results"][0]
status = results["Status"]
if status == 'Success':
log.info(f'usage record: {results}')
else:
# TODO: need to add a retry mechanism for failed reports
unprocessed_records = response["UnprocessedRecords"][0]
log.error(f'unprocessed record: {unprocessed_records}')
| 31.394595 | 89 | 0.719697 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,595 | 0.446798 |
5714de071955ec101c9d0bd2f8b9cad2f55c7b5c | 8,000 | py | Python | source/metadata.py | sanmik/brain-network-viz | 9c881e49c14c94e3f7ef4b7776d98c930716ee91 | [
"MIT"
]
| 5 | 2017-09-01T14:05:03.000Z | 2019-07-13T07:52:49.000Z | source/metadata.py | sanmik/brain-network-viz | 9c881e49c14c94e3f7ef4b7776d98c930716ee91 | [
"MIT"
]
| null | null | null | source/metadata.py | sanmik/brain-network-viz | 9c881e49c14c94e3f7ef4b7776d98c930716ee91 | [
"MIT"
]
| 1 | 2017-09-01T14:05:03.000Z | 2017-09-01T14:05:03.000Z | # Library Imports
from itertools import islice
import csv
# Local Module Imports
import config
class Metadata(object):
"""
Base class for maintaining metadata (properties and their attributes) about
Node and Edge objects. This base class handles parsing and storing the CSV
data, and providing accessor methods. The NodeMetadata and EdgeMetadata add
some specific methods.
The top rows of an input CSV file define metadata and should look like the
following example.
+-------------+-----------+-----------+-----------+
| Primary Key | Property1 | Property2 | Property3 | ...
+-------------------------------------------------+ <-+
| Attribute1 | | | | |
+-------------------------------------------------+ |
| Attribute2 | | | | | Metadata
+-------------------------------------------------+ |
| Attribute3 | | | | |
+-------------------------------------------------+ <-+
| Item1 | | | | |
+-------------------------------------------------+ |
| Item2 | | | | | Data
+-------------------------------------------------+ |
| Item3 | | | | |
+-------------+-----------+-----------+-----------+ <-+
Class usage example:
m = NodeMetadata('sample_nodes.csv', 3, 'Id')
m.get('X', 'MIN_VAL')
"""
def __init__(self, in_file, num_rows, prime_key):
"""
Construct a Metadata object
Args:
in_file: A file handle for an input csv file
num_rows: The number of rows of the csv file defining metadata
prime_key: The name of the column of primary keys. EG: Attribute names or
Item (Node, Edge) IDs.
"""
# 'data' will contain the property name row + all metadata rows as lists
self.data = []
# Lookup table mapping property names to column indices
self.prop_indices = {}
# Lookup table mapping attribute names to row indices
self.attr_indices = {}
# Detect and use correct delimiter. Commas and Tabs are supported.
dialect = csv.Sniffer().sniff(in_file.read(1024), delimiters=",\t")
in_file.seek(0)
reader = csv.reader(in_file, dialect)
# Populate data structs
while reader.line_num < num_rows + 1:
row = next(reader)
if reader.line_num == 1:
for i, name in enumerate(row):
self.prop_indices[name] = i
self.attr_indices[row[0]] = reader.line_num - 1
self.data.append(row)
def get(self, prop, attr):
"""
Gets the value of a specific property attribute.
Treats the CSV matrix shown up top as a 2D array, using prop to lookup
the column, and attr to lookup the row.
EG: To get the minimum value of a node's x coordinate.
m = Metadata('sample_nodes.csv', 3, 'Name')
m.get('X', 'MIN_VAL')
Args:
prop: The metadata property
attr: The attribute of that given property
Return:
The string value of the specified metadata property attribute
"""
# Get indices into 2D data array
j = self.getPropIdx(prop)
i = self.getAttrIdx(attr)
# Get value
return self.data[i][j]
def getPropIdx(self, prop):
"""
Gets the index of a metadata property (Column index).
Args:
prop: The name of the metadata property
Return:
Integer column index
"""
return self.prop_indices[prop]
def getAttrIdx(self, attr):
"""
Gets the index of a metadata attribute (Row index).
Args:
attr: The name of the metadata attribute
Return:
Integer row index
"""
return self.attr_indices[attr]
class NodeMetadata(Metadata):
"""
Subclass to implement Node specific Metadata functionality
"""
def __init__(self, in_file, num_rows, prime_key):
super(NodeMetadata, self).__init__(in_file, num_rows, prime_key)
"""
A list of dicts for looking up Property names by layer:
self.layers[0] =>
{
'C': (Property Name, CSV column index, min val, max val),
'D': (Property Name, CSV column index, min val, max val),
...
}.
And to get the property name used for color in layer 2 you would access as:
self.layers[0]['C'][0]
In the value tuples, Property Name and CSV column index will be None if
no such property is specified in the input file.
"""
self.layers = [{k: None for k in config.NODE_USE_AS_KEYS}]
# Populate self.layers
row_i = self.attr_indices['USE_AS']
for col_i in range(config.NODE_LAYER_COLS_BEGIN, len(self.data[0])):
prop_use_as = self.data[row_i][col_i]
assert prop_use_as in config.NODE_USE_AS_KEYS
# Find or create the destination layer object and property
dest_layer = None
for layer in self.layers:
if not layer[prop_use_as]:
dest_layer = layer
break
if not dest_layer:
dest_layer = {k: None for k in config.NODE_USE_AS_KEYS}
self.layers.append(dest_layer)
#min_val = self.data[self.getAttrIdx('MIN_VAL')][col_i]
min_val = self.data[self.getAttrIdx('MIN_VAL')][col_i]
max_val = self.data[self.getAttrIdx('MAX_VAL')][col_i]
prop_name = self.data[self.getAttrIdx(prime_key)][col_i]
dest_layer[prop_use_as] = (prop_name, col_i, min_val, max_val)
""" Fill in any gaps in self.layers. If a layer didn't have property
metadata explicitly set - it takes on default metadata values """
for layer_i, layer in enumerate(self.layers):
for use_as_key, v in layer.items():
if not v:
layer[use_as_key] = config.NODE_DEFAULT_META[use_as_key]
def getPropertyName(self, use_as, layer_i):
"""
Get the Property name associated with the given USE_AS string for the given
layer.
Args:
use_as: A USE_AS value. EG: C, D, etc.
layer_i: The layer index
Return:
The string name of the associated property. None if that property wasn't
set in the input file.
"""
return self.layers[layer_i][use_as][0]
def getPropertyIdx(self, use_as, layer_i):
"""
Get the CSV column of the Property associated with the given USE_AS value
for every node's layer i.
Return
Numeric index of the CSV colum. None that property was not set in the
input file.
"""
return self.layers[layer_i][use_as][1]
def getPropertyMinVal(self, use_as, layer_i):
"""
Get the minimum value of the Property associated with the given USE_AS
value for every node's layer i.
Return:
String minimum value for the property.
"""
return self.layers[layer_i][use_as][2]
def getPropertyMaxVal(self, use_as, layer_i):
"""
Get the maximum value of the Property associated with the given USE_AS
value for every node's layer i.
Return
String maximum value for the property.
"""
return self.layers[layer_i][use_as][3]
def numLabeledLayers(self):
"""
Return the number of node layers that a label property explicitly set.
"""
return len(filter(lambda l: l['L'][0] != None, self.layers))
# TODO: Write Unit Test
class EdgeMetadata(Metadata):
"""
Subclass to implement Node specific Metadata functionality
TODO: Consider constructing a lookup table in the same way NodeMetadata does.
"""
def getPropertyName(self, use_as):
"""
Get the Property name associated with the given USE_AS string.
Args:
use_as: A USE_AS value. EG: C, D, etc.
Return:
The string name of the associated property
"""
row_i = self.getAttrIdx('USE_AS')
use_as_row = self.data[row_i]
col_i = 0
for val in use_as_row:
if val == use_as:
return self.data[0][col_i]
col_i += 1
| 33.057851 | 79 | 0.58775 | 7,897 | 0.987125 | 0 | 0 | 0 | 0 | 0 | 0 | 5,212 | 0.6515 |
571574f8dd7e9bd961d512815c9fd6535e05f1d8 | 20,165 | py | Python | src/src_python/antares_xpansion/driver.py | pelefebvre/antares-xpansion | c62ed1a982e970325dec6007eb57a9c6288ef0c7 | [
"Apache-2.0"
]
| null | null | null | src/src_python/antares_xpansion/driver.py | pelefebvre/antares-xpansion | c62ed1a982e970325dec6007eb57a9c6288ef0c7 | [
"Apache-2.0"
]
| null | null | null | src/src_python/antares_xpansion/driver.py | pelefebvre/antares-xpansion | c62ed1a982e970325dec6007eb57a9c6288ef0c7 | [
"Apache-2.0"
]
| 1 | 2021-05-27T13:06:26.000Z | 2021-05-27T13:06:26.000Z | """
Class to control the execution of the optimization session
"""
import shutil
import configparser
import glob
import os
import subprocess
import sys
from pathlib import Path
from antares_xpansion.input_checker import check_candidates_file
from antares_xpansion.input_checker import check_settings_file
from antares_xpansion.xpansion_utils import read_and_write_mps
class XpansionDriver():
"""
Class to control the execution of the optimization session
"""
def __init__(self, config):
"""
Initialise driver with a given antaresXpansion configuration,
the system platform and parses the arguments
:param config: configuration to use for the optimization
:type config: XpansionConfig object
"""
self.platform = sys.platform
self.config = config
self.candidates_list = []
self.check_candidates()
self.check_settings()
print(self.candidates_list)
def exe_path(self, exe):
"""
prefixes the input exe with the install direcectory containing the binaries
:param exe: executable name
:return: path to specified executable
"""
return os.path.normpath(os.path.join(self.config.installDir, exe))
def solver_cmd(self, solver):
"""
returns a list consisting of the path to the required solver and its launching options
"""
assert solver in [self.config.MERGE_MPS,
self.config.BENDERS_MPI,
self.config.BENDERS_SEQUENTIAL]
if solver == self.config.MERGE_MPS:
return self.exe_path(solver) +" "+ self.config.OPTIONS_TXT
elif solver == self.config.BENDERS_MPI:
return self.config.MPI_LAUNCHER +" "+\
self.config.MPI_N +" "+ str(self.config.n_mpi)+\
" "+ self.exe_path(solver) +" "+ self.config.OPTIONS_TXT
#solver == self.config.BENDERS_SEQUENTIAL:
return self.exe_path(solver) +" "+ self.config.OPTIONS_TXT
def antares(self):
"""
returns antares binaries location
"""
return os.path.normpath(os.path.join(self.config.installDir, self.config.ANTARES))
def general_data(self):
"""
returns path to general data ini file
"""
return os.path.normpath(os.path.join(self.data_dir(),
self.config.SETTINGS, self.config.GENERAL_DATA_INI))
def settings(self):
"""
returns path to setting ini file
"""
return os.path.normpath(os.path.join(self.data_dir(), self.config.USER,
self.config.EXPANSION, self.config.SETTINGS_INI))
def candidates(self):
"""
returns path to candidates ini file
"""
return os.path.normpath(os.path.join(self.data_dir(), self.config.USER,
self.config.EXPANSION, self.config.CANDIDATES_INI))
def capacity_file(self, filename):
"""
returns path to input capacity file
"""
return os.path.normpath(os.path.join(self.data_dir(), self.config.USER,
self.config.EXPANSION, self.config.CAPADIR, filename))
def weights_file(self, filename):
"""
returns the path to a yearly-weights file
:param filename: name of the yearly-weights file
:return: path to input yearly-weights file
"""
return os.path.normpath(os.path.join(self.data_dir(), self.config.USER,
self.config.EXPANSION, filename))
def antares_output(self):
"""
returns path to antares output data directory
"""
return os.path.normpath(os.path.join(self.data_dir(), self.config.OUTPUT))
def data_dir(self):
"""
returns path to the data directory
"""
return self.config.dataDir
def is_accurate(self):
"""
indicates if method to use is accurate by reading the uc_type in the settings file
"""
with open(self.settings(), 'r') as file_l:
options = dict(
{line.strip().split('=')[0].strip(): line.strip().split('=')[1].strip()
for line in file_l.readlines()})
uc_type = options.get(self.config.UC_TYPE,
self.config.settings_default[self.config.UC_TYPE])
assert uc_type in [self.config.EXPANSION_ACCURATE, self.config.EXPANSION_FAST]
return uc_type == self.config.EXPANSION_ACCURATE
assert False
def is_relaxed(self):
"""
indicates if method to use is relaxed by reading the relaxation_type
from the settings file
"""
with open(self.settings(), 'r') as file_l:
options = dict(
{line.strip().split('=')[0].strip(): line.strip().split('=')[1].strip()
for line in file_l.readlines()})
relaxation_type = options.get('master',
self.config.settings_default["master"])
assert relaxation_type in ['integer', 'relaxed', 'full_integer']
return relaxation_type == 'relaxed'
assert False
def optimality_gap(self):
"""
prints and returns the optimality gap read from the settings file
:return: gap value or 0 if the gap is set to -Inf
"""
with open(self.settings(), 'r') as file_l:
options = dict(
{line.strip().split('=')[0].strip(): line.strip().split('=')[1].strip()
for line in file_l.readlines()})
optimality_gap_str = options.get('optimality_gap',
self.config.settings_default["optimality_gap"])
assert not '%' in optimality_gap_str
print('optimality_gap_str :', optimality_gap_str)
return float(optimality_gap_str) if optimality_gap_str != '-Inf' else 0
assert False
def max_iterations(self):
"""
prints and returns the maximum iterations read from the settings file
:return: max iterations value or -1 if the parameter is is set to +Inf
"""
with open(self.settings(), 'r') as file_l:
options = dict(
{line.strip().split('=')[0].strip(): line.strip().split('=')[1].strip()
for line in file_l.readlines()})
max_iterations_str = options.get('max_iteration',
self.config.settings_default["max_iteration"])
assert not '%' in max_iterations_str
print('max_iterations_str :', max_iterations_str)
return float(max_iterations_str) if ( (max_iterations_str != '+Inf') and (max_iterations_str != '+infini') ) else -1
assert False
def additional_constraints(self):
"""
returns path to additional constraints file
"""
with open(self.settings(), 'r') as file_l:
options = dict(
{line.strip().split('=')[0].strip(): line.strip().split('=')[1].strip()
for line in file_l.readlines()})
additional_constraints_filename = options.get("additional-constraints",
self.config.settings_default["additional-constraints"])
if additional_constraints_filename == "" :
return ""
return os.path.normpath(os.path.join(self.data_dir(), self.config.USER,
self.config.EXPANSION, additional_constraints_filename))
def nb_years(self):
"""
returns the nubyears parameter value read from the general data file
"""
ini_file = configparser.ConfigParser()
ini_file.read(self.general_data())
return float(ini_file['general']['nbyears'])
def launch(self):
"""
launch antares xpansion steps
"""
self.clear_old_log()
if self.config.step == "full":
lp_path = self.generate_mps_files()
self.launch_optimization(lp_path)
elif self.config.step == "antares":
self.pre_antares()
self.launch_antares()
elif self.config.step == "getnames":
if self.config.simulationName:
self.get_names(self.config.simulationName)
else:
print("Missing argument simulationName")
sys.exit(1)
elif self.config.step == "lp":
if self.config.simulationName:
self.lp_step(self.config.simulationName)
output_path = os.path.normpath(os.path.join(self.antares_output(), self.config.simulationName))
self.set_options(output_path)
else:
print("Missing argument simulationName")
sys.exit(1)
elif self.config.step == "optim":
if self.config.simulationName:
lp_path = os.path.normpath(os.path.join(self.antares_output(),
self.config.simulationName, 'lp'))
self.launch_optimization(lp_path)
else:
print("Missing argument simulationName")
sys.exit(1)
else:
print("Launching failed")
sys.exit(1)
def clear_old_log(self):
"""
clears old log files for antares and the lp_namer
"""
if (self.config.step in ["full", "antares"]) and (os.path.isfile(self.antares() + '.log')):
os.remove(self.antares() + '.log')
if (self.config.step in ["full", "lp"])\
and (os.path.isfile(self.exe_path(self.config.LP_NAMER) + '.log')):
os.remove(self.exe_path(self.config.LP_NAMER) + '.log')
def check_candidates(self):
"""
checks that candidates file has correct format
"""
#check file existence
if not os.path.isfile(self.candidates()):
print('Missing file : %s was not retrieved.' % self.candidates())
sys.exit(1)
check_candidates_file(self)
def check_settings(self):
"""
checks that settings file has correct format
"""
#check file existence
if not os.path.isfile(self.settings()):
print('Missing file : %s was not retrieved.' % self.settings())
sys.exit(1)
check_settings_file(self)
def pre_antares(self):
"""
modifies the general data file to configure antares execution
"""
ini_file = configparser.ConfigParser()
ini_file.read(self.general_data())
ini_file[self.config.OPTIMIZATION][self.config.EXPORT_MPS] = "true"
ini_file[self.config.OPTIMIZATION][self.config.EXPORT_STRUCTURE] = "true"
ini_file[self.config.OPTIMIZATION][self.config.USE_XPRS] = "false"
ini_file.remove_option(self.config.OPTIMIZATION, self.config.USE_XPRS)
ini_file.remove_option(self.config.OPTIMIZATION, self.config.INBASIS)
ini_file.remove_option(self.config.OPTIMIZATION, self.config.OUTBASIS)
if self.is_accurate():
ini_file['general']['mode'] = 'expansion'
ini_file['other preferences']['unit-commitment-mode'] = 'accurate'
ini_file[self.config.OPTIMIZATION]['include-tc-minstablepower'] = 'true'
ini_file[self.config.OPTIMIZATION]['include-tc-min-ud-time'] = 'true'
ini_file[self.config.OPTIMIZATION]['include-dayahead'] = 'true'
else:
ini_file['general']['mode'] = 'Economy'
ini_file['other preferences']['unit-commitment-mode'] = 'fast'
ini_file[self.config.OPTIMIZATION]['include-tc-minstablepower'] = 'false'
ini_file[self.config.OPTIMIZATION]['include-tc-min-ud-time'] = 'false'
ini_file[self.config.OPTIMIZATION]['include-dayahead'] = 'false'
with open(self.general_data(), 'w') as out_file:
ini_file.write(out_file)
def launch_antares(self):
"""
launch antares
:return: name of the new simulation's directory
"""
# if not os.path.isdir(driver.antares_output()):
# os.mkdir(driver.antares_output(), )
old_output = os.listdir(self.antares_output())
print([self.antares(), self.data_dir()])
with open(self.antares() + '.log', 'w') as output_file:
returned_l = subprocess.call(self.antares() +" "+ self.data_dir(), shell=True,
stdout=output_file,
stderr=output_file)
if returned_l != 0:
print("WARNING: exited antares with status %d" % returned_l)
new_output = os.listdir(self.antares_output())
print(old_output)
print(new_output)
assert len(old_output) + 1 == len(new_output)
diff = list(set(new_output) - set(old_output))
return diff[0]
def post_antares(self, antares_output_name):
"""
creates necessary files for simulation using the antares simulation output files,
the existing configuration files, get_names and the lpnamer executable
:param antares_output_name: name of the antares simulation output directory
:return: path to the lp output directory
"""
output_path = os.path.normpath(os.path.join(self.antares_output(), antares_output_name))
self.get_names(antares_output_name)
lp_path = self.lp_step(antares_output_name)
self.set_options(output_path)
return lp_path
def get_names(self, antares_output_name):
"""
produces a .txt file describing the weekly problems:
each line of the file contains :
- mps file name
- variables file name
- constraints file name
:param antares_output_name: name of the antares simulation output directory
produces a file named with xpansionConfig.MPS_TXT
"""
output_path = os.path.normpath(os.path.join(self.antares_output(), antares_output_name))
mps_txt = read_and_write_mps(output_path)
# print(mps_txt)
with open(os.path.normpath(os.path.join(output_path, self.config.MPS_TXT)), 'w') as file_l:
for line in mps_txt.items():
file_l.write(line[1][0] + ' ' + line[1][1] + ' ' + line[1][2] + '\n')
glob_path= Path(output_path)
area_files = [str(pp) for pp in glob_path.glob("area*.txt")]
interco_files = [str(pp) for pp in glob_path.glob("interco*.txt")]
assert len(area_files) == 1
assert len(interco_files) == 1
shutil.copy(area_files[0], os.path.normpath(os.path.join(output_path, 'area.txt')))
shutil.copy(interco_files[0], os.path.normpath(os.path.join(output_path, 'interco.txt')))
def lp_step(self, antares_output_name):
"""
copies area and interco files and launches the lp_namer
:param output_path: path to the antares simulation output directory
produces a file named with xpansionConfig.MPS_TXT
"""
output_path = os.path.normpath(os.path.join(self.antares_output(), antares_output_name))
lp_path = os.path.normpath(os.path.join(output_path, 'lp'))
if os.path.isdir(lp_path):
shutil.rmtree(lp_path)
os.makedirs(lp_path)
is_relaxed = 'relaxed' if self.is_relaxed() else 'integer'
with open(self.exe_path(self.config.LP_NAMER) + '.log', 'w') as output_file:
lp_cmd = self.exe_path(self.config.LP_NAMER) +" "+ output_path +" "+ is_relaxed +" "+ self.additional_constraints()
returned_l = subprocess.call(lp_cmd,
shell=True,
stdout=output_file,
stderr=output_file)
if returned_l != 0:
print("ERROR: exited lpnamer with status %d" % returned_l)
sys.exit(1)
return lp_path
def launch_optimization(self, lp_path):
"""
launch the optimization of the antaresXpansion problem using the specified solver
:param lp_path: path to the lp directory containing input files
(c.f. generate_mps_files)
:param solver: name of the solver to be used
:type solver: value in [XpansionConfig.MERGE_MPS, XpansionConfig.BENDERS_MPI,
XpansionConfig.BENDERS_SEQUENTIAL]
"""
old_cwd = os.getcwd()
os.chdir(lp_path)
print('Current directory is now : ', os.getcwd())
solver = None
if self.config.method == "mpibenders":
solver = self.config.BENDERS_MPI
elif self.config.method == "mergeMPS":
solver = self.config.MERGE_MPS
mergemps_lp_log = "log_merged.lp"
if os.path.isfile(mergemps_lp_log):
os.remove(mergemps_lp_log)
mergemps_mps_log = "log_merged.mps"
if os.path.isfile(mergemps_mps_log):
os.remove(mergemps_lp_log)
elif self.config.method == "sequential":
solver = self.config.BENDERS_SEQUENTIAL
elif self.config.method == "both":
print("metod both is not handled yet")
sys.exit(1)
else:
print("Illegal optim method")
sys.exit(1)
#delete execution logs
logfile_list = glob.glob('./' +solver + 'Log*')
for file_path in logfile_list:
try:
os.remove(file_path)
except OSError:
print("Error while deleting file : ", file_path)
if os.path.isfile(solver + '.log'):
os.remove(solver + '.log')
print('Launching {}, logs will be saved to {}.log'.format(solver,
os.path.normpath(os.path.join(
os.getcwd(), solver))))
with open(solver + '.log', 'w') as output_file:
returned_l = subprocess.call(self.solver_cmd(solver), shell=True,
stdout=output_file,
stderr=output_file)
if returned_l != 0:
print("ERROR: exited solver with status %d" % returned_l)
sys.exit(1)
os.chdir(old_cwd)
def set_options(self, output_path):
"""
generates a default option file for the solver
"""
# computing the weight of slaves
options_values = self.config.options_default
options_values["SLAVE_WEIGHT_VALUE"] = str(self.nb_years())
print('Number of years is {}, setting SLAVE_WEIGHT_VALUE to {} '.
format(self.nb_years(), options_values["SLAVE_WEIGHT_VALUE"]))
options_values["GAP"] = self.optimality_gap()
options_values["MAX_ITERATIONS"] = self.max_iterations()
# generate options file for the solver
options_path = os.path.normpath(os.path.join(output_path, 'lp', self.config.OPTIONS_TXT))
with open(options_path, 'w') as options_file:
options_file.writelines(["%30s%30s\n" % (kvp[0], kvp[1])
for kvp in options_values.items()])
def generate_mps_files(self):
"""
launches antares to produce mps files
"""
print("starting mps generation")
# setting antares options
print("-- pre antares")
self.pre_antares()
# launching antares
print("-- launching antares")
antares_output_name = self.launch_antares()
# writting things
print("-- post antares")
lp_path = self.post_antares(antares_output_name)
return lp_path
| 41.069246 | 129 | 0.579965 | 19,788 | 0.981304 | 0 | 0 | 0 | 0 | 0 | 0 | 6,066 | 0.300818 |
571589111619e9fd5ae98d9e183ad96ef4ec5ca8 | 1,186 | py | Python | CRASHLACMA/slistener.py | carlynorama/CRASHLACMA | d59890a5a0702940a0d678b600230f0f53384710 | [
"CC0-1.0"
]
| null | null | null | CRASHLACMA/slistener.py | carlynorama/CRASHLACMA | d59890a5a0702940a0d678b600230f0f53384710 | [
"CC0-1.0"
]
| null | null | null | CRASHLACMA/slistener.py | carlynorama/CRASHLACMA | d59890a5a0702940a0d678b600230f0f53384710 | [
"CC0-1.0"
]
| null | null | null | from tweepy import StreamListener
import json, time, sys
class SListener(StreamListener):
def __init__(self, api = None, fprefix = 'streamer'):
self.api = api or API()
self.filecounter = 1
self.fprefix = fprefix
def on_data(self, data):
if 'in_reply_to_status' in data:
self.on_status(data)
elif 'delete' in data:
delete = json.loads(data)['delete']['status']
if self.on_delete(delete['id'], delete['user_id']) is False:
return False
elif 'limit' in data:
if self.on_limit(json.loads(data)['limit']['track']) is False:
return False
elif 'warning' in data:
warning = json.loads(data)['warnings']
print warning['message']
return false
def on_status(self, status):
filename = self.fprefix + '.' + time.strftime('%Y%m%d-%H%M') + '_' + str(self.filecounter) + '.json'
self.output = open(filename, 'wb')
print('Writing to file %s') % filename
self.output.write(status + "\n")
self.filecounter += 1
self.output.close()
return
| 29.65 | 108 | 0.555649 | 1,126 | 0.94941 | 0 | 0 | 0 | 0 | 0 | 0 | 170 | 0.143339 |
5715f8e7099f46fd56b68c4c665702f7dc7e68e3 | 379 | py | Python | Curso-em-video-Python/PycharmProjects/pythonExercicios/Ex052.py | sartinicj/curso-em-video-python | 8cb4ca05a88351c44aa4a7befc59c9596a50f268 | [
"MIT"
]
| null | null | null | Curso-em-video-Python/PycharmProjects/pythonExercicios/Ex052.py | sartinicj/curso-em-video-python | 8cb4ca05a88351c44aa4a7befc59c9596a50f268 | [
"MIT"
]
| null | null | null | Curso-em-video-Python/PycharmProjects/pythonExercicios/Ex052.py | sartinicj/curso-em-video-python | 8cb4ca05a88351c44aa4a7befc59c9596a50f268 | [
"MIT"
]
| null | null | null | '''
N = int(input('Digite um NÚMERO: '))
tot = 0
for c in range(1, n+1):
if n%c == 0:
print('\033[33m', end=' ')
tot+=1
else:
print('\033[31m', end=' ')
print('{}'.format(c), end=' ')
print('\n\033[mO numero {} foi divisivel {} vezes'.format(n, tot))
if tot == 2:
print('E POR ISSO É PRIMO')
else:
print('E POR ISSO NÃO É PRIMO')
'''
| 21.055556 | 66 | 0.506596 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 382 | 0.997389 |
57167335582e22b6c5a86bb8546c9e43ffff5640 | 158 | py | Python | core/views.py | TonyLuque/marketing_website | 2ebbbf7788c439afc8192926fa3dc3e231b1e69e | [
"MIT"
]
| null | null | null | core/views.py | TonyLuque/marketing_website | 2ebbbf7788c439afc8192926fa3dc3e231b1e69e | [
"MIT"
]
| null | null | null | core/views.py | TonyLuque/marketing_website | 2ebbbf7788c439afc8192926fa3dc3e231b1e69e | [
"MIT"
]
| null | null | null | from django.shortcuts import render
# Create your views here.
def home(request):
return render(request,'core/index.html')
def detail(request):
pass
| 17.555556 | 44 | 0.734177 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 42 | 0.265823 |
57172ba3fed738751a65af00ff67b7037fd96b96 | 206 | py | Python | qclib/qclab.py | hagne/qclib | c90e06cb22708d610126710715a5d66bd4dc0898 | [
"MIT"
]
| null | null | null | qclib/qclab.py | hagne/qclib | c90e06cb22708d610126710715a5d66bd4dc0898 | [
"MIT"
]
| null | null | null | qclib/qclab.py | hagne/qclib | c90e06cb22708d610126710715a5d66bd4dc0898 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
import qclib.tag_times
def qc_by_tagging_times(path2data, path2database):
out = qclib.tag_times.Controller(path2data=path2data, path2database=path2database)
return out
| 25.75 | 86 | 0.747573 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 0.11165 |
57179805f172aec6d932cd77e1ce3e7e6d275877 | 317 | py | Python | t_server.py | iloghyr/easy_python | b750f6817d54562b23630e2419bace19da0abf8b | [
"Apache-2.0"
]
| 1 | 2018-03-01T02:42:52.000Z | 2018-03-01T02:42:52.000Z | t_server.py | iloghyr/easy_python | b750f6817d54562b23630e2419bace19da0abf8b | [
"Apache-2.0"
]
| null | null | null | t_server.py | iloghyr/easy_python | b750f6817d54562b23630e2419bace19da0abf8b | [
"Apache-2.0"
]
| null | null | null | import socket
address = ('127.0.0.1', 31500)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # s = socket.socket()
s.bind(address)
s.listen(5)
ss, addr = s.accept()
print 'got connected from',addr
ss.send('hihi')
ra = ss.recv(512)
print ra
ss.close()
s.close() | 19.8125 | 78 | 0.583596 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 61 | 0.192429 |
5717a38a4849d391d82607edadcfd400d7080783 | 1,906 | py | Python | py_solutions_71-80/Euler_77.py | tijko/Project-Euler | d953a2bf6932c2c4e1235409fedf760add65a0ba | [
"MIT"
]
| null | null | null | py_solutions_71-80/Euler_77.py | tijko/Project-Euler | d953a2bf6932c2c4e1235409fedf760add65a0ba | [
"MIT"
]
| 1 | 2022-03-15T02:49:09.000Z | 2022-03-15T02:49:09.000Z | py_solutions_71-80/Euler_77.py | tijko/Project-Euler | d953a2bf6932c2c4e1235409fedf760add65a0ba | [
"MIT"
]
| null | null | null | # what is the first number to have 5000 different ways to sum with prime numbers?
import math
import timeit
start = timeit.default_timer()
def is_prime(x):
if x == 2:
return True
if x % 2 == 0 or x == 1:
return False
for i in range(3,int(math.sqrt(x))+1,2):
if x % i == 0:
return False
return True
def prime_sum(lim):
lim += 1
o_set = [i for i in xrange(lim - 1) if is_prime(i)][::-1]
s_set = [0] * len(o_set)
ways, pos, flag = 0, -2, 0
while True:
for n in xrange(0, lim, o_set[-1]):
if sum(s_set[:-1] + [n]) == (lim - 1):
ways += 1
s_set = s_set[:-1] + [n]
flag = 1
break
if sum(s_set[:-1] + [n]) > (lim - 1):
s_set = s_set[:-1] + [n]
flag = 1
break
if pos < -(len(o_set)):
return ways
if flag == 1 and pos < -2:
for i, v in enumerate(s_set[pos:][::-1], 1):
if v != 0:
s_set[-i] = 0
s_set[-i - 1] += o_set[-i - 1]
flag = 0
if s_set[pos] >= (lim - 1):
pos -= 1
break
elif s_set[pos] <= (lim - 1):
if s_set[pos] == (lim - 1):
pos -= 1
for i, v in enumerate(s_set[pos:-1][::-1], 2):
if v < (lim - 1):
s_set[-i] += o_set[-i]
break
if v >= (lim - 1):
s_set[-i] = 0
elif s_set[pos] >= (lim - 1) and pos == -2:
pos -= 1
def euler_77():
start, ways = 10, 0
while ways < 5000:
start += 1
ways = prime_sum(start)
return start
print "Answer: %s" % euler_77()
stop = timeit.default_timer()
print "Time: %f" % (stop - start)
| 28.029412 | 82 | 0.407135 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 104 | 0.054565 |
57199578121fb89b3db3e976c4737dd3dcc14bf5 | 2,258 | py | Python | lambdas/get_users.py | charvi-a/320-S20-Track1 | ac97504fc1fdedb1c311773b015570eeea8a8663 | [
"BSD-3-Clause"
]
| 9 | 2019-12-30T16:32:22.000Z | 2020-03-03T20:14:47.000Z | lambdas/get_users.py | charvi-a/320-S20-Track1 | ac97504fc1fdedb1c311773b015570eeea8a8663 | [
"BSD-3-Clause"
]
| 283 | 2020-02-03T15:16:03.000Z | 2020-05-05T03:18:59.000Z | lambdas/get_users.py | charvi-a/320-S20-Track1 | ac97504fc1fdedb1c311773b015570eeea8a8663 | [
"BSD-3-Clause"
]
| 3 | 2020-04-16T15:23:29.000Z | 2020-05-12T00:38:41.000Z | from package.query_db import query
from package.lambda_exception import LambdaException
def handler(event, context):
is_admin = event['is_admin']
is_supporter = event['is_supporter']
is_student = event['is_student']
if is_admin == "" and is_supporter == "" and is_student == "":
get_users_sql = "SELECT id, first_name, last_name, email FROM users;"
params = []
else:
if is_admin != "":
is_admin = event['is_admin'].lower()
if is_admin == "true":
is_admin = True
else:
is_admin = False
else:
is_admin = False
if is_supporter != "":
is_supporter = event['is_supporter'].lower()
if is_supporter == "true":
is_supporter = True
else:
is_supporter = False
else:
is_supporter = False
if is_student != "":
is_student = event['is_student'].lower()
if is_student == "true":
is_student = True
else:
is_student = False
else:
is_student = True
is_admin_param = {'name' : 'is_admin', 'value' : {'booleanValue' : is_admin}}
is_supporter_param = {'name' : 'is_supporter', 'value' : {'booleanValue' : is_supporter}}
is_student_param = {'name' : 'is_student', 'value' : {'booleanValue' : is_student}}
get_users_sql = "SELECT id, first_name, last_name, email FROM users WHERE is_admin = :is_admin AND is_supporter = :is_supporter AND is_student = :is_student;"
params = [is_admin_param, is_supporter_param, is_student_param]
try:
users = query(get_users_sql, params)['records']
except Exception as e:
raise LambdaException("500: Failed to get users, " + str(e))
response = {
'users' : []
}
for u_id, f_name, l_name, email in users:
current_users = response['users']
next_user = {'user_id' : u_id["longValue"], 'first_name' : f_name["stringValue"], 'last_name' : l_name["stringValue"], 'email' : email["stringValue"]}
current_users.append(next_user)
response['users'] = current_users
return response
| 34.738462 | 171 | 0.574402 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 561 | 0.24845 |
571ab14954af261729cb1d3fc0d5e206657e96fa | 705 | py | Python | leetCode/swap_nodes_in_pairs.py | yskang/AlgorithmPracticeWithPython | f7129bd1924a7961489198f0ee052d2cd1e9cf40 | [
"MIT"
]
| null | null | null | leetCode/swap_nodes_in_pairs.py | yskang/AlgorithmPracticeWithPython | f7129bd1924a7961489198f0ee052d2cd1e9cf40 | [
"MIT"
]
| 1 | 2019-11-04T06:44:04.000Z | 2019-11-04T06:46:55.000Z | leetCode/swap_nodes_in_pairs.py | yskang/AlgorithmPractice | 31b76e38b4c2f1e3e29fb029587662a745437912 | [
"MIT"
]
| null | null | null | # Title: Swap Nodes in Pairs
# Link: https://leetcode.com/problems/swap-nodes-in-pairs
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Problem:
def swap_pairs(self, head: ListNode) -> ListNode:
pre, pre.next = self, head
while pre.next and pre.next.next:
a = pre.next
b = a.next
pre.next, b.next, a.next = b, a, b.next
pre = a
return self.next
def solution():
head = ListNode(1, ListNode(2, ListNode(3, ListNode(4))))
problem = Problem()
return problem.swap_pairs(head)
def main():
print(solution())
if __name__ == '__main__':
main() | 22.741935 | 61 | 0.58156 | 395 | 0.560284 | 0 | 0 | 0 | 0 | 0 | 0 | 95 | 0.134752 |
571ac253ee844d994243e9c2e1443c9c4aa20002 | 16,967 | py | Python | detect_actions.py | CTewan/ACAM_Demo | b76cf4ce1289b8c311dbad1588f299ff67f7eaf3 | [
"MIT"
]
| null | null | null | detect_actions.py | CTewan/ACAM_Demo | b76cf4ce1289b8c311dbad1588f299ff67f7eaf3 | [
"MIT"
]
| null | null | null | detect_actions.py | CTewan/ACAM_Demo | b76cf4ce1289b8c311dbad1588f299ff67f7eaf3 | [
"MIT"
]
| null | null | null | import numpy as np
import cv2
import imageio
import tensorflow as tf
import json
import csv
import os
import sys
sys.path.append("object_detection")
sys.path.append("object_detection/deep_sort")
sys.path.append("action_detection")
import argparse
import object_detection.object_detector as obj
import action_detection.action_detector as act
import time
DISPLAY = False
SHOW_CAMS = False
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--video_path', type=str, required=False, default="")
parser.add_argument('-d', '--display', type=str, required=False, default="True")
args = parser.parse_args()
display = (args.display == "True" or args.display == "true")
#actor_to_display = 6 # for cams
video_path = args.video_path
basename = os.path.basename(video_path).split('.')[0]
out_vid_path = "./output_videos/%s_output.mp4" % (basename if not SHOW_CAMS else basename+'_cams_actor_%.2d' % actor_to_display)
clf_out_path = "./clf_output/{}_output.csv".format(basename if not SHOW_CAMS else basename+'_cams_actor_{}'.format(actor_to_display))
#out_vid_path = './output_videos/testing.mp4'
# video_path = "./tests/chase1Person1View3Point0.mp4"
# out_vid_path = 'output.mp4'
main_folder = './'
# NAS
obj_detection_model = 'ssd_mobilenet_v1_fpn_shared_box_predictor_640x640_coco14_sync_2018_07_03'
obj_detection_graph = os.path.join("object_detection", "weights", obj_detection_model, "frozen_inference_graph.pb")
print("Loading object detection model at %s" % obj_detection_graph)
obj_detector = obj.Object_Detector(obj_detection_graph)
tracker = obj.Tracker()
print("Reading video file %s" % video_path)
reader = imageio.get_reader(video_path, 'ffmpeg')
action_freq = 8
# fps_divider = 1
print('Running actions every %i frame' % action_freq)
fps = reader.get_meta_data()['fps'] #// fps_divider
print("FPS: {}".format(fps))
W, H = reader.get_meta_data()['size']
T = tracker.timesteps
#if not display:
writer = imageio.get_writer(out_vid_path, fps=fps)
csv_file = open(clf_out_path, 'w', newline='')
csv_writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_writer.writerow(['Time', 'Person', 'Action', 'Probability'])
print("Writing output to %s" % out_vid_path)
# act_detector = act.Action_Detector('i3d_tail')
# ckpt_name = 'model_ckpt_RGB_i3d_pooled_tail-4'
act_detector = act.Action_Detector('soft_attn')
#ckpt_name = 'model_ckpt_RGB_soft_attn-16'
#ckpt_name = 'model_ckpt_soft_attn_ava-23'
ckpt_name = 'model_ckpt_soft_attn_pooled_cosine_drop_ava-130'
#input_frames, temporal_rois, temporal_roi_batch_indices, cropped_frames = act_detector.crop_tubes_in_tf([T,H,W,3])
memory_size = act_detector.timesteps - action_freq
updated_frames, temporal_rois, temporal_roi_batch_indices, cropped_frames = act_detector.crop_tubes_in_tf_with_memory([T,H,W,3], memory_size)
rois, roi_batch_indices, pred_probs = act_detector.define_inference_with_placeholders_noinput(cropped_frames)
ckpt_path = os.path.join(main_folder, 'action_detection', 'weights', ckpt_name)
act_detector.restore_model(ckpt_path)
prob_dict = {}
frame_cnt = 0
# Tewan
min_teacher_features = 3
teacher_identified = 0
#missed_frame_cnt = 0
#max_age = 120
#frame_skips = 60
#next_frame = 0
teacher_ids = []
matched_id = None
# Tewan
for cur_img in reader:
frame_cnt += 1
#if frame_cnt < next_frame:
# continue
# Detect objects and make predictions every 8 frames (0.3 seconds)
#if frame_cnt % action_freq == 0:
# Object Detection
expanded_img = np.expand_dims(cur_img, axis=0)
detection_list = obj_detector.detect_objects_in_np(expanded_img)
detection_info = [info[0] for info in detection_list]
# Updates active actors in tracker
tracker.update_tracker(detection_info, cur_img)
no_actors = len(tracker.active_actors)
"""
if no_actors == 0:
missed_frame_cnt += 1
if missed_frame_cnt >= max_age:
tracker.update_tracker(detection_info, cur_img)
no_actors = len(tracker.active_actors)
teacher_identified = False
tracker.set_invalid_track()
missed_frame_cnt = 0
print("Reset active actors. Current number: {}".format(no_actors))
"""
if frame_cnt % action_freq == 0 and frame_cnt > 16:
if no_actors == 0:
print("No actor found.")
continue
video_time = round(frame_cnt / fps, 1)
valid_actor_ids = [actor["actor_id"] for actor in tracker.active_actors]
print("frame count: {}, video time: {}s".format(frame_cnt, video_time))
probs = []
cur_input_sequence = np.expand_dims(np.stack(tracker.frame_history[-action_freq:], axis=0), axis=0)
rois_np, temporal_rois_np = tracker.generate_all_rois()
if teacher_identified < min_teacher_features:
prompt_img = visualize_detection_results(img_np=tracker.frame_history[-16],
active_actors=tracker.active_actors,
prob_dict=None)
cv2.imshow('prompt_img', prompt_img[:,:,::-1])
cv2.waitKey(500)
teacher_present = False
teacher_id = _prompt_user_input()
if not _check_teacher_in_frame(teacher_id=teacher_id):
print("Teacher not in this frame. Continuing.")
cv2.destroyWindow("prompt_img")
pass
else:
if _check_valid_teacher_id(teacher_id=teacher_id, valid_actor_ids=valid_actor_ids):
teacher_id = int(teacher_id)
teacher_identified += 1
teacher_present = True
else:
while not teacher_present:
print("Invalid ID.")
teacher_id = _prompt_user_input()
if not _check_teacher_in_frame(teacher_id=teacher_id):
print("Teacher not in this frame. Continuing.")
cv2.destroyWindow("prompt_img")
break
else:
if _check_valid_teacher_id(teacher_id=teacher_id, valid_actor_ids=valid_actor_ids):
teacher_id = int(teacher_id)
teacher_identified += 1
teacher_present = True
else:
pass
# Move on to next frame if teacher not in current frame
if not teacher_present:
continue
cv2.destroyWindow("prompt_img")
if teacher_id not in teacher_ids:
teacher_ids.append(teacher_id)
tracker.update_teacher_candidate_ids(teacher_candidate_id=teacher_id)
else:
tracker.set_valid_track()
# Identify idx of teacher for ROI selection
roi_idx = None
found_id = False
for idx, actor_info in enumerate(tracker.active_actors):
actor_id = actor_info["actor_id"]
for i in range(len(teacher_ids)-1, -1, -1):
if actor_id == teacher_ids[i]:
roi_idx = idx
matched_id = actor_info["actor_id"]
found_id = True
break
if found_id:
break
# Identify ROI and temporal ROI using ROI idx
if roi_idx is not None:
rois_np = rois_np[roi_idx]
temporal_rois_np = temporal_rois_np[roi_idx]
rois_np = np.expand_dims(rois_np, axis=0)
temporal_rois_np = np.expand_dims(temporal_rois_np, axis=0)
no_actors = 1
# If teacher not found (i.e. roi_idx is None) in current frame, move on to next frame
else:
continue
#max_actors = 5
#if no_actors > max_actors:
# no_actors = max_actors
# rois_np = rois_np[:max_actors]
# temporal_rois_np = temporal_rois_np[:max_actors]
# Might have issue of not using attention map because only predict action for 1 actor (memory issue)
feed_dict = {updated_frames:cur_input_sequence, # only update last #action_freq frames
temporal_rois: temporal_rois_np,
temporal_roi_batch_indices: np.zeros(no_actors),
rois:rois_np,
roi_batch_indices:np.arange(no_actors)}
run_dict = {'pred_probs': pred_probs}
if SHOW_CAMS:
run_dict['cropped_frames'] = cropped_frames
run_dict['final_i3d_feats'] = act_detector.act_graph.get_collection('final_i3d_feats')[0]
run_dict['cls_weights'] = act_detector.act_graph.get_collection('variables')[-2] # this is the kernel
out_dict = act_detector.session.run(run_dict, feed_dict=feed_dict)
probs = out_dict['pred_probs']
# associate probs with actor ids
print_top_k = 5
for bb in range(no_actors):
#act_probs = probs[bb]
#order = np.argsort(act_probs)[::-1]
#cur_actor_id = tracker.active_actors[bb]['actor_id']
act_probs = probs[bb]
order = np.argsort(act_probs)[::-1]
cur_actor_id = tracker.active_actors[roi_idx]["actor_id"]
#print(cur_actor_id == actor_id)
#print("Person %i" % cur_actor_id)
#print("act_probs: {}".format(act_probs))
#print("order: {}".format(order))
#print("tracker.active_actors[bb]: {}".format(tracker.active_actors[bb]))
cur_results = []
for pp in range(print_top_k):
#print('\t %s: %.3f' % (act.ACTION_STRINGS[order[pp]], act_probs[order[pp]]))
cur_results.append((act.ACTION_STRINGS[order[pp]], act_probs[order[pp]]))
csv_writer.writerow([video_time, cur_actor_id, act.ACTION_STRINGS[order[pp]], act_probs[order[pp]]])
prob_dict[cur_actor_id] = cur_results
if frame_cnt > 16:
out_img = visualize_detection_results(tracker.frame_history[-16],
tracker.active_actors,
prob_dict=prob_dict,
teacher_id=matched_id)
if SHOW_CAMS:
if tracker.active_actors:
actor_indices = [ii for ii in range(no_actors) if tracker.active_actors[ii]['actor_id'] == actor_to_display]
if actor_indices:
out_img = visualize_cams(out_img, cur_input_sequence, out_dict, actor_indices[0])
else:
continue
else:
continue
if display:
cv2.imshow('results', out_img[:,:,::-1])
cv2.waitKey(10)
writer.append_data(out_img)
#if not display:
writer.close()
csv_file.close()
def _prompt_user_input():
teacher_id = input("Enter the id of the teacher (type None if teacher is not present in this frame): ")
return teacher_id
def _check_teacher_in_frame(teacher_id):
if teacher_id == "None" or teacher_id == "none":
return False
return True
def _check_valid_teacher_id(teacher_id, valid_actor_ids):
try:
teacher_id = int(teacher_id)
if teacher_id in valid_actor_ids:
return True
else:
return False
except:
return False
np.random.seed(10)
COLORS = np.random.randint(0, 255, [1000, 3])
def visualize_detection_results(img_np, active_actors, prob_dict=None, teacher_id=None):
score_th = 0.30
action_th = 0.20
# copy the original image first
disp_img = np.copy(img_np)
H, W, C = img_np.shape
#for ii in range(len(active_actors)):
for ii in range(len(active_actors)):
cur_actor = active_actors[ii]
actor_id = cur_actor['actor_id']
if teacher_id is not None:
if actor_id != teacher_id:
continue
if prob_dict:
cur_act_results = prob_dict[actor_id] if actor_id in prob_dict else []
try:
if len(cur_actor["all_boxes"]) > 0:
cur_box, cur_score, cur_class = cur_actor['all_boxes'][-16], cur_actor['all_scores'][0], 1
else:
cur_box, cur_score, cur_class = cur_actor['all_boxes'][0], cur_actor['all_scores'][0], 1
except IndexError:
continue
if cur_score < score_th:
continue
top, left, bottom, right = cur_box
left = int(W * left)
right = int(W * right)
top = int(H * top)
bottom = int(H * bottom)
conf = cur_score
label = obj.OBJECT_STRINGS[cur_class]['name']
message = '%s_%i: %% %.2f' % (label, actor_id,conf)
if prob_dict:
action_message_list = ["%s:%.3f" % (actres[0][0:7], actres[1]) for actres in cur_act_results if actres[1]>action_th]
color = COLORS[actor_id]
color = (int(color[0]), int(color[1]), int(color[2]))
cv2.rectangle(disp_img, (left,top), (right,bottom), color, 3)
font_size = max(0.5,(right - left)/50.0/float(len(message)))
cv2.rectangle(disp_img, (left, top-int(font_size*40)), (right,top), color, -1)
cv2.putText(disp_img, message, (left, top-12), 0, font_size, (255-color[0], 255-color[1], 255-color[2]), 1)
if prob_dict:
#action message writing
cv2.rectangle(disp_img, (left, top), (right,top+10*len(action_message_list)), color, -1)
for aa, action_message in enumerate(action_message_list):
offset = aa*10
cv2.putText(disp_img, action_message, (left, top+5+offset), 0, 0.5, (255-color[0], 255-color[1], 255-color[2]), 1)
return disp_img
def visualize_cams(image, input_frames, out_dict, actor_idx):
#classes = ["walk", "bend", "carry"]
#classes = ["sit", "ride"]
classes = ["talk to", "watch (a", "listen to"]
action_classes = [cc for cc in range(60) if any([cname in act.ACTION_STRINGS[cc] for cname in classes])]
feature_activations = out_dict['final_i3d_feats']
cls_weights = out_dict['cls_weights']
input_frames = out_dict['cropped_frames'].astype(np.uint8)
probs = out_dict["pred_probs"]
class_maps = np.matmul(feature_activations, cls_weights)
min_val = np.min(class_maps[:,:, :, :, :])
max_val = np.max(class_maps[:,:, :, :, :]) - min_val
normalized_cmaps = np.uint8((class_maps-min_val)/max_val * 255.)
t_feats = feature_activations.shape[1]
t_input = input_frames.shape[1]
index_diff = (t_input) // (t_feats+1)
img_new_height = 400
img_new_width = int(image.shape[1] / float(image.shape[0]) * img_new_height)
img_to_show = cv2.resize(image.copy(), (img_new_width,img_new_height))[:,:,::-1]
#img_to_concat = np.zeros((400, 800, 3), np.uint8)
img_to_concat = np.zeros((400, 400, 3), np.uint8)
for cc in range(len(action_classes)):
cur_cls_idx = action_classes[cc]
act_str = act.ACTION_STRINGS[action_classes[cc]]
message = "%s:%%%.2d" % (act_str[:20], 100*probs[actor_idx, cur_cls_idx])
for tt in range(t_feats):
cur_cam = normalized_cmaps[actor_idx, tt,:,:, cur_cls_idx]
cur_frame = input_frames[actor_idx, (tt+1) * index_diff, :,:,::-1]
resized_cam = cv2.resize(cur_cam, (100,100))
colored_cam = cv2.applyColorMap(resized_cam, cv2.COLORMAP_JET)
overlay = cv2.resize(cur_frame.copy(), (100,100))
overlay = cv2.addWeighted(overlay, 0.5, colored_cam, 0.5, 0)
img_to_concat[cc*125:cc*125+100, tt*100:(tt+1)*100, :] = overlay
cv2.putText(img_to_concat, message, (20, 13+100+125*cc), 0, 0.5, (255,255,255), 1)
final_image = np.concatenate([img_to_show, img_to_concat], axis=1)
return final_image[:,:,::-1]
if __name__ == '__main__':
main()
| 38.386878 | 145 | 0.590263 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,712 | 0.218778 |
571af6febfc1dc4cb09b37f0fb44cc848ccf1059 | 5,556 | py | Python | tests/test_parametric_shapes/test_SweepMixedShape.py | RemDelaporteMathurin/paramak | 10552f1b89820dd0f7a08e4a126834877e3106b4 | [
"MIT"
]
| null | null | null | tests/test_parametric_shapes/test_SweepMixedShape.py | RemDelaporteMathurin/paramak | 10552f1b89820dd0f7a08e4a126834877e3106b4 | [
"MIT"
]
| null | null | null | tests/test_parametric_shapes/test_SweepMixedShape.py | RemDelaporteMathurin/paramak | 10552f1b89820dd0f7a08e4a126834877e3106b4 | [
"MIT"
]
| null | null | null | import os
import unittest
from pathlib import Path
import pytest
from paramak import SweepMixedShape
class test_object_properties(unittest.TestCase):
def test_solid_construction(self):
"""checks that a SweepMixedShape solid can be created"""
test_shape = SweepMixedShape(
points=[
(-10, -10, "straight"),
(-10, 10, "spline"),
(0, 20, "spline"),
(10, 10, "circle"),
(0, 0, "circle"),
(10, -10, "straight")
],
path_points=[
(50, 0),
(20, 50),
(50, 100)
]
)
test_shape.create_solid()
assert test_shape.solid is not None
def test_solid_construction(self):
"""checks that a SweepMixedShape solid can be created with workplane
YZ"""
test_shape = SweepMixedShape(
points=[
(-10, -10, "straight"),
(-10, 10, "spline"),
(0, 20, "spline"),
(10, 10, "circle"),
(0, 0, "circle"),
(10, -10, "straight")
],
path_points=[
(50, 0),
(20, 50),
(50, 100)
],
workplane='YZ',
path_workplane="YX"
)
assert test_shape.solid is not None
def test_solid_construction_workplane_XZ(self):
"""checks that a SweepMixedShape solid can be created with workplane
XZ"""
test_shape = SweepMixedShape(
points=[
(-10, -10, "straight"),
(-10, 10, "spline"),
(0, 20, "spline"),
(10, 10, "circle"),
(0, 0, "circle"),
(10, -10, "straight")
],
path_points=[
(50, 0),
(20, 50),
(50, 100)
],
workplane='XZ',
path_workplane="XY"
)
assert test_shape.solid is not None
def test_relative_shape_volume(self):
"""creates two SweepMixedShapes and checks that their relative volumes
are correct"""
test_shape_1 = SweepMixedShape(
points=[
(-10, -10, "straight"),
(-10, 10, "spline"),
(0, 20, "spline"),
(10, 10, "circle"),
(0, 0, "circle"),
(10, -10, "straight")
],
path_points=[
(50, 0),
(30, 50),
(50, 100)
]
)
test_shape_1.create_solid()
test_shape_2 = SweepMixedShape(
points=[
(-20, -20, "straight"),
(-20, 20, "spline"),
(0, 40, "spline"),
(20, 20, "circle"),
(0, 0, "circle"),
(20, -20, "straight")
],
path_points=[
(50, 0),
(30, 50),
(50, 100)
]
)
test_shape_2.create_solid()
assert test_shape_1.volume == pytest.approx(
test_shape_2.volume * 0.25, rel=0.01)
def test_iterable_azimuthal_placement(self):
"""checks that swept solids can be placed at multiple azimuth placement angles"""
test_shape = SweepMixedShape(
points=[
(-10, -10, "straight"),
(-10, 10, "spline"),
(0, 20, "spline"),
(10, 10, "circle"),
(0, 0, "circle"),
(10, -10, "straight")
],
path_points=[
(50, 0),
(30, 50),
(60, 100),
(50, 150)
]
)
test_shape.create_solid()
test_volume = test_shape.volume
test_shape.azimuth_placement_angle = [0, 90, 180, 270]
assert test_shape.volume == pytest.approx(test_volume * 4, rel=0.01)
def test_workplane_path_workplane_error_raises(self):
"""checks that errors are raised when disallowed workplane and path_workplane
combinations are used"""
def workplane_and_path_workplane_equal():
test_shape = SweepMixedShape(
points=[
(-10, -10, "straight"),
(-10, 10, "spline"),
(0, 20, "spline"),
(10, 10, "circle"),
(0, 0, "circle"),
(10, -10, "straight")
],
path_points=[(50, 0), (30, 50), (60, 100), (50, 150)],
workplane="XZ",
path_workplane="XZ"
)
def invalid_relative_workplane_and_path_workplane():
test_shape = SweepMixedShape(
points=[
(-10, -10, "straight"),
(-10, 10, "spline"),
(0, 20, "spline"),
(10, 10, "circle"),
(0, 0, "circle"),
(10, -10, "straight")
],
path_points=[(50, 0), (30, 50), (60, 100), (50, 150)],
workplane="XZ",
path_workplane="YZ"
)
self.assertRaises(ValueError, workplane_and_path_workplane_equal)
self.assertRaises(
ValueError,
invalid_relative_workplane_and_path_workplane)
if __name__ == "__main__":
unittest.main()
| 29.089005 | 89 | 0.430166 | 5,401 | 0.972102 | 0 | 0 | 0 | 0 | 0 | 0 | 962 | 0.173146 |
571d88262578b13c16efd4393da7b28acd2cd972 | 458 | py | Python | upload.py | newkisoft/newki-sql-backup | 82690b25cfa57dc770210e5a3398954949271c0e | [
"MIT"
]
| null | null | null | upload.py | newkisoft/newki-sql-backup | 82690b25cfa57dc770210e5a3398954949271c0e | [
"MIT"
]
| null | null | null | upload.py | newkisoft/newki-sql-backup | 82690b25cfa57dc770210e5a3398954949271c0e | [
"MIT"
]
| null | null | null | from boto3.s3.transfer import S3Transfer
import boto3
import glob
import os
def upload_to_aws(local_file, bucket, s3_file):
client = boto3.client('s3', aws_access_key_id='###',aws_secret_access_key='###')
transfer = S3Transfer(client)
transfer.upload_file(local_file, bucket, local_file)
files = glob.glob("*.bak")
for file in files:
print(file)
uploaded = upload_to_aws(file, 'newki-backup', file)
os.remove(file)
| 28.625 | 84 | 0.69869 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 35 | 0.076419 |
571dbed119712d82f6343f841d5c39a1d78ee427 | 996 | py | Python | run_rnn.py | iqbaalmuhmd/CNNnumpyTest | eaecf5bc53a7b5c932a82d38cc6ca2a40430af4b | [
"MIT"
]
| 332 | 2017-06-13T10:40:05.000Z | 2022-03-11T15:10:02.000Z | run_rnn.py | iqbaalmuhmd/CNNnumpyTest | eaecf5bc53a7b5c932a82d38cc6ca2a40430af4b | [
"MIT"
]
| 9 | 2017-06-16T02:36:06.000Z | 2021-05-09T06:01:34.000Z | run_rnn.py | iqbaalmuhmd/CNNnumpyTest | eaecf5bc53a7b5c932a82d38cc6ca2a40430af4b | [
"MIT"
]
| 105 | 2017-06-15T06:40:44.000Z | 2022-03-09T06:38:59.000Z | import numpy as np
from deepnet.nnet import RNN
from deepnet.solver import sgd_rnn
def text_to_inputs(path):
"""
Converts the given text into X and y vectors
X : contains the index of all the characters in the text vocab
y : y[i] contains the index of next character for X[i] in the text vocab
"""
with open(path) as f:
txt = f.read()
X, y = [], []
char_to_idx = {char: i for i, char in enumerate(set(txt))}
idx_to_char = {i: char for i, char in enumerate(set(txt))}
X = np.array([char_to_idx[i] for i in txt])
y = [char_to_idx[i] for i in txt[1:]]
y.append(char_to_idx['.'])
y = np.array(y)
vocab_size = len(char_to_idx)
return X, y, vocab_size, char_to_idx, idx_to_char
if __name__ == "__main__":
X, y, vocab_size, char_to_idx, idx_to_char = text_to_inputs('data/Rnn.txt')
rnn = RNN(vocab_size,vocab_size,char_to_idx,idx_to_char)
rnn = sgd_rnn(rnn,X,y,10,10,0.1)
| 27.666667 | 79 | 0.625502 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 231 | 0.231928 |
571ddbe314e19b402b88195037ee31e371ecdddf | 5,421 | py | Python | lcclassifier/experiments/attnstats.py | oscarpimentel/astro-lightcurves-classifier | f697b43e22bd8c92c1b9df514be8565c736dd7cc | [
"MIT"
]
| 1 | 2021-12-31T18:00:08.000Z | 2021-12-31T18:00:08.000Z | lcclassifier/experiments/attnstats.py | oscarpimentel/astro-lightcurves-classifier | f697b43e22bd8c92c1b9df514be8565c736dd7cc | [
"MIT"
]
| null | null | null | lcclassifier/experiments/attnstats.py | oscarpimentel/astro-lightcurves-classifier | f697b43e22bd8c92c1b9df514be8565c736dd7cc | [
"MIT"
]
| null | null | null | from __future__ import print_function
from __future__ import division
from . import _C
import torch
from fuzzytorch.utils import TDictHolder, tensor_to_numpy, minibatch_dict_collate
import numpy as np
from fuzzytools.progress_bars import ProgressBar, ProgressBarMulti
import fuzzytools.files as files
import fuzzytools.datascience.metrics as fcm
from fuzzytools.matplotlib.utils import save_fig
import matplotlib.pyplot as plt
import fuzzytorch.models.seq_utils as seq_utils
from scipy.optimize import curve_fit
from lchandler import _C as _Clchandler
from lchandler.plots.lc import plot_lightcurve
from .utils import check_attn_scores
EPS = _C.EPS
###################################################################################################################################################
def local_slope_f(time, m, n):
return time*m+n
def get_local_slope(days, obs, j, dj,
p0=[0,0],
):
assert not dj%2==0
assert dj>=3
ji = max(0, j-dj//2)
jf = min(j+dj//2+1, len(obs))
sub_days = days[ji:jf] # sequence steps
sub_obs = obs[ji:jf] # sequence steps
popt, pcov = curve_fit(local_slope_f, sub_days, sub_obs, p0=p0)
local_slope_m, local_slope_n = popt
return local_slope_m, local_slope_n, sub_days, sub_obs
###################################################################################################################################################
def save_attnstats(train_handler, data_loader, save_rootdir,
eps:float=EPS,
dj=3,
min_len=3,
**kwargs):
train_handler.load_model() # important, refresh to best model
train_handler.model.eval() # important, model eval mode
dataset = data_loader.dataset # get dataset
is_parallel = 'Parallel' in train_handler.model.get_name()
if not is_parallel:
return
attn_scores_collection = {b:[] for kb,b in enumerate(dataset.band_names)}
with torch.no_grad():
tdicts = []
for ki,in_tdict in enumerate(data_loader):
train_handler.model.autoencoder['encoder'].add_extra_return = True
_tdict = train_handler.model(TDictHolder(in_tdict).to(train_handler.device))
train_handler.model.autoencoder['encoder'].add_extra_return = False
tdicts += [_tdict]
tdict = minibatch_dict_collate(tdicts)
for kb,b in enumerate(dataset.band_names):
p_onehot = tdict[f'input/onehot.{b}'][...,0] # (n,t)
#p_rtime = tdict[f'input/rtime.{b}'][...,0] # (n,t)
#p_dtime = tdict[f'input/dtime.{b}'][...,0] # (n,t)
#p_x = tdict[f'input/x.{b}'] # (n,t,i)
#p_rerror = tdict[f'target/rerror.{b}'] # (n,t,1)
#p_rx = tdict[f'target/recx.{b}'] # (n,t,1)
# print(tdict.keys())
uses_attn = any([f'attn_scores' in k for k in tdict.keys()])
if not uses_attn:
return
### attn scores
attn_scores = tdict[f'model/attn_scores/encz.{b}'] # (n,h,qt)
assert check_attn_scores(attn_scores)
attn_scores_mean = attn_scores.mean(dim=1)[...,None] # (n,h,qt)>(n,qt)>(n,qt,1) # mean attention score among the heads: not a distribution
attn_scores_min_max = seq_utils.seq_min_max_norm(attn_scores_mean, p_onehot) # (n,qt,1)
### stats
lcobj_names = dataset.get_lcobj_names()
bar = ProgressBar(len(lcobj_names))
for k,lcobj_name in enumerate(lcobj_names):
lcobj = dataset.lcset[lcobj_name]
lcobjb = lcobj.get_b(b) # complete lc
p_onehot_k = tensor_to_numpy(p_onehot[k]) # (n,t)>(t)
b_len = p_onehot_k.sum()
assert b_len<=len(lcobjb), f'{b_len}<={len(lcobjb)}'
if not b_len>=min_len:
continue
attn_scores_k = tensor_to_numpy(attn_scores_mean[k,:b_len,0]) # (n,qt,1)>(t)
attn_scores_min_max_k = tensor_to_numpy(attn_scores_min_max[k,:b_len,0]) # (n,qt,1)>(t)
days = lcobjb.days[:b_len] # (t)
obs = lcobjb.obs[:b_len] # (t)
obse = lcobjb.obse[:b_len] # (t)
snr = lcobjb.get_snr(max_len=b_len)
max_obs = np.max(obs)
peak_day = days[np.argmax(obs)]
duration = days[-1]-days[0]
bar(f'b={b}; lcobj_name={lcobj_name}; b_len={b_len}; snr={snr}; max_obs={max_obs}')
lc_features = []
for j in range(0, b_len):
j_features = {
f'j':j,
f'attn_scores_k.j':attn_scores_k[j],
f'attn_scores_min_max_k.j':attn_scores_min_max_k[j],
f'days.j':days[j],
f'obs.j':obs[j],
f'obse.j':obse[j],
}
local_slope_m, local_slope_n, sub_days, sub_obs = get_local_slope(days, obs, j, dj) # get local slope
j_features.update({
f'local_slope_m.j~dj={dj}':local_slope_m,
f'local_slope_n.j~dj={dj}':local_slope_n,
f'peak_distance.j~dj={dj}~mode=local':days[j]-peak_day,
f'peak_distance.j~dj={dj}~mode=mean':np.mean(sub_days)-peak_day,
f'peak_distance.j~dj={dj}~mode=median':np.median(sub_days)-peak_day,
})
lc_features += [j_features]
attn_scores_collection[b] += [{
f'c':dataset.class_names[lcobj.y],
f'b_len':b_len,
f'peak_day':peak_day,
f'duration':duration,
f'snr':snr,
f'max_obs':max_obs,
f'lc_features':lc_features,
}]
bar.done()
results = {
'model_name':train_handler.model.get_name(),
'survey':dataset.survey,
'band_names':dataset.band_names,
'class_names':dataset.class_names,
'max_day':dataset.max_day,
'attn_scores_collection':attn_scores_collection,
}
### save file
save_filedir = f'{save_rootdir}/{dataset.lcset_name}/id={train_handler.id}.d'
files.save_pickle(save_filedir, results) # save file
dataset.reset_max_day() # very important!!
dataset.calcule_precomputed() # very important!!
return | 35.431373 | 147 | 0.65855 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,568 | 0.289246 |
571ea096124b732422144c10209f4cc5cb3c06c7 | 1,473 | py | Python | get_item_by_key.py | flyco2016/my_python_module_project | 6e1ac7f074f7b57403d7b7c6adadab17a26fc27d | [
"Apache-2.0"
]
| null | null | null | get_item_by_key.py | flyco2016/my_python_module_project | 6e1ac7f074f7b57403d7b7c6adadab17a26fc27d | [
"Apache-2.0"
]
| 1 | 2019-01-04T06:37:06.000Z | 2019-01-04T06:37:06.000Z | get_item_by_key.py | flyco2016/my_python_module_project | 6e1ac7f074f7b57403d7b7c6adadab17a26fc27d | [
"Apache-2.0"
]
| null | null | null | # 处理嵌套的根据键取值
def getItemByKey(obj, key, result=None):
if isinstance(obj, dict):
for k in obj:
if key == k:
if isinstance(result, list):
if isinstance(obj[k], list):
result.extend(obj[k])
else:
result.append(obj[k])
elif result is None:
result = obj[k]
else:
result = [result]
result.append(obj[k])
else:
if isinstance(obj[k], dict) or isinstance(obj[k], list):
result = getItemByKey(obj[k], key, result)
elif isinstance(obj, list):
for i in obj:
if isinstance(i, dict) or isinstance(i, list):
result = getItemByKey(i, key, result)
return result[0] if isinstance(result, list) and len(result) == 1 else result
def getItemByKeyInMyMethod(dict_obj, key, default=None):
import types
for k ,v in dict_obj.items():
if k == key:
return v
else:
if type(v) is dict:
ret = getItemByKeyInMyMethod(v, key, default)
if ret is not default:
return ret
return default
if __name__ == "__main__":
test_dic = {'a': 1, 'b': 2, 'c': {'a': 1, 'b': {'b': 4}}}
r1 = getItemByKey(test_dic, 'b')
r2 = getItemByKeyInMyMethod(test_dic, 'b')
print(r1, r2, sep='\n') | 33.477273 | 81 | 0.491514 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 70 | 0.046885 |
571ee3c5442f3448677cce9af4cb7b0165e2aa98 | 148 | py | Python | tests/test_tail_chunks.py | moskytw/tacit | 58286a71140be150438d10acf93028ef5f78f6d1 | [
"MIT"
]
| null | null | null | tests/test_tail_chunks.py | moskytw/tacit | 58286a71140be150438d10acf93028ef5f78f6d1 | [
"MIT"
]
| null | null | null | tests/test_tail_chunks.py | moskytw/tacit | 58286a71140be150438d10acf93028ef5f78f6d1 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from tacit import tac_slices
for chunk in tac_slices('data/ordered.list', 2):
print repr(chunk)
| 18.5 | 48 | 0.675676 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 63 | 0.425676 |
571f77622a48c2fb03cc44698429e534d7932593 | 7,166 | py | Python | calories.py | davidsvaughn/har-pytorch | 334733a1e870637c9077d16fc15e0b1954a6dfc5 | [
"MIT"
]
| 5 | 2020-09-17T12:17:13.000Z | 2022-02-28T08:07:49.000Z | calories.py | davidsvaughn/har-pytorch | 334733a1e870637c9077d16fc15e0b1954a6dfc5 | [
"MIT"
]
| null | null | null | calories.py | davidsvaughn/har-pytorch | 334733a1e870637c9077d16fc15e0b1954a6dfc5 | [
"MIT"
]
| null | null | null | import numpy as np
import pandas as pd
import json
from datetime import datetime
import psycopg2
import functools
import requests
##############################################################
## https://www.exrx.net/Calculators/WalkRunMETs
## https://www.cdc.gov/growthcharts/clinical_charts.htm
## https://help.fitbit.com/articles/en_US/Help_article/1141
##############################################################
URL = 'https://f73lzrw31i.execute-api.us-west-2.amazonaws.com/default/demo_data_server'
HEADER = {'x-api-key': 'XXXXXX'}
class adict(dict):
def __init__(self, *av, **kav):
dict.__init__(self, *av, **kav)
self.__dict__ = self
def tofloat(x):
try:
return float(x.strip())
except:
return None
@functools.lru_cache(maxsize=250)
def request_demo_data(pid):
payload = {'pid': pid}
r = requests.post(URL, headers=HEADER, data=json.dumps(payload))
return adict((k.strip("' "), tofloat(v)) for k,v in (item.split(':') for item in r.text[2:-2].split(',')))
#############################################################################################
#############################################################################################
revibe = adict()
revibe.DBNAME = 'revibe'
revibe.HOST = 'prd.c5fw7irdcxik.us-west-2.rds.amazonaws.com'
#revibe.PORT = '5432'
revibe.USER = 'dave'
revibe.PASS = 'tnoiSLoHjEBZE6JKsFgY'
revibe.SSLMODE = 'require'
CONN = None
def get_conn_string(creds):
conn_str = 'host='+ creds.HOST \
+' dbname='+ creds.DBNAME +' user=' + creds.USER \
+' password='+ creds.PASS \
+ (' sslmode='+ creds.SSLMODE if 'SSLMODE' in creds else '') \
+ (' port='+ creds.PORT if 'PORT' in creds else '')
return conn_str
def get_conn(creds):
conn_str = get_conn_string(creds)
return psycopg2.connect(conn_str)
def run_sql(sql, verbose=False):
global CONN
if CONN is None:
CONN = get_conn(revibe)
if verbose: print(sql)
with CONN:
data = pd.read_sql(sql, CONN)
if verbose: print(data.shape)
return (data)
def get_pid_data(pid):
table = 'private.person_demographic_view'
sql_command = "SELECT * FROM {} WHERE (person_id={});".format(table, pid)
df = run_sql(sql_command)
if df.size==0:
raise ValueError('SQL returned no records:\n\t{}'.format(sql_command))
data = adict()
bday = df.birthday.values[0]
sex = df.sex_id.values[0]
grade = df.grade.values[0]
ht = df.height.values[0]
wt = df.weight.values[0]
wrist = df.wrist_id.values[0]
data.pid = pid
data.age = None
if bday is not None:
data.bday = str(bday)
bday = pd.Timestamp(str(bday)).to_pydatetime()
data.age = np.round((datetime.now()-bday).total_seconds() / (60*60*2*365), 2) ## in months
data.sex = None if sex==0 or sex>2 else sex
data.grade = None if grade==0 else grade
data.ht = None if ht==0 else ht
data.wt = None if wt==0 else wt
data.wrist = None if wrist==0 else wrist
return data
## revised Harris-Benedict BMR equations...
def bmr_hb(dd, sex=None):
try:
sex = dd.sex if sex is None else sex
if sex==1:
return 6.078*dd.wt + 12.192*dd.ht - 0.473*dd.age + 88.4
if sex==2:
return 4.196*dd.wt + 7.874*dd.ht - 0.36*dd.age + 447.6
return None
except ex:
return None
## basal metabolic rate (kCals per day)
def BMR(dd):
try:
if dd.sex is None:
bmr = (bmr_hb(dd,1) + bmr_hb(dd,2))/2
else:
bmr = bmr_hb(dd)
return int(round(bmr))
except ex:
return None
## find index j into y that minimizes abs(x-y[j])
def xminy(x, y):
return abs(x-y).argmin(axis=-1)
class GrowthChart(object):
## columns: age ht_boy ht_girl wt_boy wt_girl
def __init__(self, fn='growth.tsv'):#, path=None):
# if path is None: path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
# fn = os.path.join(path, fn)
df = pd.read_csv(fn, sep='\t')
self.G = df.values
self.S = np.array([[0.415, 0.413], [0.675, 0.57]])
def fill_data(self, d):
if d.age is None:
if d.ht is None or d.wt is None:
raise ValueError('Either birthday, or both height and weight, must be non-null')
else:
row = xminy(d.age, self.G[:,0])
cols = np.array([d.sex] if d.sex is not None else [1, 2])
if d.ht is None:
d.ht = self.G[row, cols].mean()
if d.wt is None:
d.wt = self.G[row, cols+2].mean()
d.ws = np.round(d.ht * self.S[0, cols-1].mean(), 2) ## walk stride
d.rs = np.round(d.ht * self.S[1, cols-1].mean(), 2) ## run stride
#d.bmr = BMR(d) ## basal metabolic rate (kCals per day)
GC = None
@functools.lru_cache(maxsize=250)
def get_demo_data(pid):
data = get_pid_data(pid)
global GC
if GC is None:
GC = GrowthChart()
GC.fill_data(data)
return data
def fixnum(x, dtype=float):
if x is None: return None
x = dtype(x)
if x==0: return None
return x
def validate_demo_data(data):
data.ht = fixnum(data.ht)
data.wt = fixnum(data.wt)
data.sex = fixnum(data.sex, int)
if data.sex is not None and data.sex>2:
data.sex = None
data.age = None
if data.bday is None:
if data.ht is None or data.wt is None:
raise ValueError('Either birthday, or both height and weight, must be non-null')
else:
bday = pd.Timestamp(str(data.bday)).to_pydatetime()
data.age = np.round((datetime.now()-bday).total_seconds() / (60*60*2*365), 2) ## in months
# data.bday = data.bday.strftime('%Y-%m-%d')
@functools.lru_cache(maxsize=250)
def make_demo_data(bday=None, ht=None, wt=None, sex=None):
data = adict()
data.bday = bday or None
data.ht = ht or None
data.wt = wt or None
data.sex = sex or None
validate_demo_data(data)
#########
global GC
if GC is None:
GC = GrowthChart()
GC.fill_data(data)
return data
## s : speed in mph... sec by second vector of speeds....
## w : weight in lbs
## mode : 2=='walk', 3=='run'
## returns : calories summed across all seconds
def calsum(s, w, mode=2):
su, wu = 26.8, 2.2
s = s*su
w = w/wu
if mode==3:## run mode
vo = 0.2*s
else: ## walk mode == 2
fwvo = 21.11 - 0.3593*s + 0.003*s*s - 3.5
wvo = 0.1*s
d = 30
a = np.clip((s-(100-d))/(2*d), 0, 1)
vo = wvo*(1.-a) + fwvo*a
#############################
return np.sum(vo*w) / 12000.0
###################################
if __name__ == "__main__":
pid = 135 ## 135,"1974-05-28",1,0,74,196,1
pid = 169 ## 169,"1980-12-01",1,12,72,170,2
pid = 18947 ## 18947,"2010-08-28",0,0,0,0,0
pid = 10885 ##
# dd = request_demo_data(pid)
# print(dd)
# dd = get_demo_data(pid)
# print(dd)
#############
dd = make_demo_data(bday='2010-08-28', ht='54.035', wt='69.69', sex='3')
# dd = make_demo_data(ht='70', wt='120', sex='2')
print(dd)
| 30.887931 | 110 | 0.558889 | 1,205 | 0.168155 | 0 | 0 | 813 | 0.113452 | 0 | 0 | 2,055 | 0.286771 |
5720538c6a907cf78b7908572f98d96063191e14 | 1,651 | py | Python | pagelets/conf.py | rysdyk/django-pagelets | 31669771b7ecf8ade3dae64465d3fa984d88d0f9 | [
"BSD-3-Clause"
]
| null | null | null | pagelets/conf.py | rysdyk/django-pagelets | 31669771b7ecf8ade3dae64465d3fa984d88d0f9 | [
"BSD-3-Clause"
]
| null | null | null | pagelets/conf.py | rysdyk/django-pagelets | 31669771b7ecf8ade3dae64465d3fa984d88d0f9 | [
"BSD-3-Clause"
]
| null | null | null | from django.conf import settings
CONTENT_AREAS = getattr(settings, 'PAGELET_CONTENT_AREAS', (
('main', 'Main'),
))
CONTENT_AREA_DEFAULT = getattr(settings, 'PAGELET_CONTENT_AREA_DEFAULT', 'main')
CONTENT_TYPES = getattr(settings, 'PAGELET_CONTENT_TYPES', (
('html', 'HTML',
(),
{},),
('markdown', 'Markdown',
(),
{},),
('wymeditor', 'WYMeditor',
('wymeditor/jquery.wymeditor.js',),
{},),
('textile', 'Textile',
(),
{},),
)) + getattr(settings, 'PAGELET_CONTENT_TYPES_EXTRA', ())
CONTENT_TYPE_CHOICES = tuple((c[0], c[1]) for c in CONTENT_TYPES)
CONTENT_TYPE_DEFAULT = getattr(settings, 'PAGELET_CONTENT_TYPE_DEFAULT', 'html')
try:
ATTACHMENT_PATH = settings.PAGELET_ATTACHMENT_PATH
except AttributeError:
ATTACHMENT_PATH = getattr(settings, 'PAGE_ATTACHMENT_PATH', 'attachments/pages/')
# settings.PAGELET_TEMPLATE_TAGS is a list of template tag names that
# will load before each pagelet is rendered, allowing custom template
# tags to be included without including {% load <template_tag> %}
tags = set(['pagelet_tags'])
if hasattr(settings, 'PAGELET_TEMPLATE_TAGS'):
for tag in settings.PAGELET_TEMPLATE_TAGS:
tags.add(tag)
AUTO_LOAD_TEMPLATE_TAGS = '{%% load %s %%}' % ' '.join(tags)
BASE_TEMPLATES = getattr(settings, 'PAGELET_BASE_TEMPLATES', [])
BASE_TEMPLATE_DEFAULT = getattr(settings, 'PAGELET_BASE_TEMPLATE_DEFAULT', None)
INLINE_PAGELET_EXTRA = getattr(settings, 'PAGELET_INLINE_PAGELET_EXTRA', 0)
INLINE_SHARED_EXTRA = getattr(settings, 'PAGELET_INLINE_SHARED_EXTRA', 0)
INLINE_ATTACHMENT_EXTRA = getattr(settings, 'PAGELET_INLINE_ATTACHMENT_EXTRA', 0)
| 36.688889 | 85 | 0.723198 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 711 | 0.430648 |
5721e5bf810d647e593fd1d82e6a86cb2fa7e570 | 14,744 | py | Python | alphad3m/alphad3m/metalearning/grammar_builder.py | VIDA-NYU/alphad3m | db40193a448300d87442c451f9da17fa5cb845fd | [
"Apache-2.0"
]
| null | null | null | alphad3m/alphad3m/metalearning/grammar_builder.py | VIDA-NYU/alphad3m | db40193a448300d87442c451f9da17fa5cb845fd | [
"Apache-2.0"
]
| null | null | null | alphad3m/alphad3m/metalearning/grammar_builder.py | VIDA-NYU/alphad3m | db40193a448300d87442c451f9da17fa5cb845fd | [
"Apache-2.0"
]
| null | null | null | import logging
import numpy as np
from scipy import stats
from collections import OrderedDict
from alphad3m.metalearning.resource_builder import load_metalearningdb
from alphad3m.metalearning.dataset_similarity import get_similar_datasets
from alphad3m.primitive_loader import load_primitives_by_name, load_primitives_by_id
logger = logging.getLogger(__name__)
def load_related_pipelines(dataset_path, target_column, task_keywords):
available_primitives = load_primitives_by_id()
all_pipelines = load_metalearningdb()
similar_datasets = get_similar_datasets('dataprofiles', dataset_path, target_column, task_keywords)
task_pipelines = []
for similar_dataset in similar_datasets.keys():
if similar_dataset not in all_pipelines['pipeline_performances']:
continue
for pipeline_id, pipeline_performances in all_pipelines['pipeline_performances'][similar_dataset].items():
primitive_ids = all_pipelines['pipeline_structure'][pipeline_id]
if is_available_primitive(primitive_ids, available_primitives):
for index in range(len(pipeline_performances['score'])):
primitives = [available_primitives[p] for p in primitive_ids] # Use the current names of primitives
score = pipeline_performances['score'][index]
metric = pipeline_performances['metric'][index]
task_pipelines.append({'pipeline': primitives, 'score': score, 'metric': metric, 'dataset': similar_dataset,
'pipeline_repr': '_'.join(primitives)})
logger.info('Found %d related pipelines', len(task_pipelines))
return task_pipelines
def create_metalearningdb_grammar(task_name, dataset_path, target_column, task_keywords):
pipelines = load_related_pipelines(dataset_path, target_column, task_keywords)
patterns, primitives = extract_patterns(pipelines)
merged_patterns, empty_elements = merge_patterns(patterns)
grammar = format_grammar(task_name, merged_patterns, empty_elements)
return grammar, primitives
def format_grammar(task_name, patterns, empty_elements):
if len(patterns) == 0:
logger.info('Empty patterns, no grammar have been generated')
return None
grammar = 'S -> %s\n' % task_name
grammar += task_name + ' -> ' + ' | '.join([' '.join(p) for p in patterns])
for element in sorted(set([e for sublist in patterns for e in sublist])): # Sort to have a deterministic grammar
production_rule = element + " -> 'primitive_terminal'"
if element in empty_elements:
production_rule += " | 'E'"
grammar += '\n' + production_rule
logger.info('Grammar obtained:\n%s', grammar)
return grammar
def extract_patterns(pipelines, max_nro_patterns=15, min_frequency=3, adtm_threshold=0.5, mean_score_threshold=0.5, ratio_datasets=0.2):
available_primitives = load_primitives_by_name()
pipelines = calculate_adtm(pipelines)
patterns = {}
for pipeline_data in pipelines:
if pipeline_data['adtm'] > adtm_threshold:
# Skip pipelines with average distance to the minimum higher than the threshold
continue
primitive_types = [available_primitives[p]['type'] for p in pipeline_data['pipeline']]
pattern_id = ' '.join(primitive_types)
if pattern_id not in patterns:
patterns[pattern_id] = {'structure': primitive_types, 'primitives': set(), 'datasets': set(), 'pipelines': [], 'scores': [], 'adtms': [], 'frequency': 0}
patterns[pattern_id]['primitives'].update(pipeline_data['pipeline'])
patterns[pattern_id]['datasets'].add(pipeline_data['dataset'])
patterns[pattern_id]['pipelines'].append(pipeline_data['pipeline'])
patterns[pattern_id]['scores'].append(pipeline_data['score'])
patterns[pattern_id]['adtms'].append(pipeline_data['adtm'])
patterns[pattern_id]['frequency'] += 1
logger.info('Found %d different patterns, after creating the portfolio', len(patterns))
# TODO: Group these removing conditions into a single loop
# Remove patterns with fewer elements than the minimum frequency
patterns = {k: v for k, v in patterns.items() if v['frequency'] >= min_frequency}
logger.info('Found %d different patterns, after removing uncommon patterns', len(patterns))
# Remove patterns with undesirable primitives (AlphaD3M doesn't have support to handle some of these primitives)
blacklist_primitives = {'d3m.primitives.data_transformation.dataframe_to_ndarray.Common',
'd3m.primitives.data_transformation.list_to_dataframe.DistilListEncoder',
'd3m.primitives.data_transformation.ndarray_to_dataframe.Common',
'd3m.primitives.data_transformation.horizontal_concat.DSBOX',
'd3m.primitives.data_transformation.horizontal_concat.DataFrameCommon',
'd3m.primitives.data_transformation.multi_horizontal_concat.Common',
'd3m.primitives.data_transformation.conditioner.Conditioner',
'd3m.primitives.data_transformation.remove_semantic_types.Common',
'd3m.primitives.data_transformation.replace_semantic_types.Common',
'd3m.primitives.data_transformation.remove_columns.Common',
'd3m.primitives.operator.dataset_map.DataFrameCommon',
'd3m.primitives.data_transformation.i_vector_extractor.IVectorExtractor'}
patterns = {k: v for k, v in patterns.items() if not blacklist_primitives & v['primitives']}
logger.info('Found %d different patterns, after blacklisting primitives', len(patterns))
unique_datasets = set()
for pattern_id in patterns:
scores = patterns[pattern_id]['scores']
adtms = patterns[pattern_id]['adtms']
patterns[pattern_id]['mean_score'] = np.mean(scores)
patterns[pattern_id]['mean_adtm'] = np.mean(adtms)
unique_datasets.update(patterns[pattern_id]['datasets'])
# Remove patterns with low performances
patterns = {k: v for k, v in patterns.items() if v['mean_score'] >= mean_score_threshold}
logger.info('Found %d different patterns, after removing low-performance patterns', len(patterns))
# Remove patterns with low variability
patterns = {k: v for k, v in patterns.items() if len(v['datasets']) >= len(unique_datasets) * ratio_datasets}
logger.info('Found %d different patterns, after removing low-variability patterns', len(patterns))
if len(patterns) > max_nro_patterns:
logger.info('Found many patterns, selecting top %d (max_nro_patterns)' % max_nro_patterns)
sorted_patterns = sorted(patterns.items(), key=lambda x: x[1]['mean_score'], reverse=True)
patterns = {k: v for k, v in sorted_patterns[:max_nro_patterns]}
primitive_hierarchy = {}
all_pipelines = []
all_performances = []
all_primitives = []
local_probabilities = {}
for pattern_id, pattern in patterns.items():
for primitive in pattern['primitives']:
primitive_type = available_primitives[primitive]['type']
if primitive_type not in primitive_hierarchy:
primitive_hierarchy[primitive_type] = set()
primitive_hierarchy[primitive_type].add(primitive)
performances = [1 - x for x in pattern['adtms']] # Use adtms as performances because their are scaled
all_pipelines += pattern['pipelines']
all_primitives += pattern['primitives']
all_performances += performances
correlations = calculate_correlations(pattern['primitives'], pattern['pipelines'], performances)
local_probabilities[pattern_id] = {}
for primitive, correlation in correlations.items():
primitive_type = available_primitives[primitive]['type']
if primitive_type not in local_probabilities[pattern_id]:
local_probabilities[pattern_id][primitive_type] = {}
local_probabilities[pattern_id][primitive_type][primitive] = correlation
correlations = calculate_correlations(set(all_primitives), all_pipelines, all_performances)
global_probabilities = {}
for primitive, correlation in correlations.items():
primitive_type = available_primitives[primitive]['type']
if primitive_type not in global_probabilities:
global_probabilities[primitive_type] = {}
global_probabilities[primitive_type][primitive] = correlation
# Make deterministic the order of the patterns and hierarchy
patterns = sorted(patterns.values(), key=lambda x: x['mean_score'], reverse=True)
primitive_hierarchy = OrderedDict({k: sorted(v) for k, v in sorted(primitive_hierarchy.items(), key=lambda x: x[0])})
logger.info('Patterns:\n%s', patterns_repr(patterns))
logger.info('Hierarchy:\n%s', '\n'.join(['%s:\n%s' % (k, ', '.join(v)) for k, v in primitive_hierarchy.items()]))
patterns = [p['structure'] for p in patterns]
primitive_probabilities = {'global': global_probabilities, 'local': local_probabilities, 'types': available_primitives}
primitive_info = {'hierarchy': primitive_hierarchy, 'probabilities': primitive_probabilities}
return patterns, primitive_info
def calculate_correlations(primitives, pipelines, scores, normalize=True):
correlations = {}
for primitive in primitives:
occurrences = [1 if primitive in pipeline else 0 for pipeline in pipelines]
correlation_coefficient, p_value = stats.pointbiserialr(occurrences, scores)
if np.isnan(correlation_coefficient): # Assign a positive correlation (1) to NaN values
correlation_coefficient = 1
if normalize: # Normalize the Pearson values, from [-1, 1] to [0, 1] range
correlation_coefficient = (correlation_coefficient - (-1)) / 2 # xi − min(x) / max(x) − min(x)
correlations[primitive] = round(correlation_coefficient, 4)
return correlations
def calculate_adtm(pipelines):
dataset_performaces = {}
pipeline_performances = {}
for pipeline_data in pipelines:
# Even the same dataset can be run under different metrics. So, use the metric to create the id of the dataset
id_dataset = pipeline_data['dataset'] + '_' + pipeline_data['metric']
if id_dataset not in dataset_performaces:
dataset_performaces[id_dataset] = {'min': float('inf'), 'max': float('-inf')}
performance = pipeline_data['score']
if performance > dataset_performaces[id_dataset]['max']:
dataset_performaces[id_dataset]['max'] = performance
if performance < dataset_performaces[id_dataset]['min']:
dataset_performaces[id_dataset]['min'] = performance
id_pipeline = pipeline_data['pipeline_repr']
if id_pipeline not in pipeline_performances:
pipeline_performances[id_pipeline] = {}
if id_dataset not in pipeline_performances[id_pipeline]:
pipeline_performances[id_pipeline][id_dataset] = pipeline_data['score']
else:
# A pipeline can have different performances for a given dataset, choose the best one
if pipeline_data['score'] > pipeline_performances[id_pipeline][id_dataset]:
pipeline_performances[id_pipeline][id_dataset] = pipeline_data['score']
for pipeline_data in pipelines:
id_pipeline = pipeline_data['pipeline_repr']
id_dataset_pipeline = pipeline_data['dataset'] + '_' + pipeline_data['metric']
dtm = 0
for id_dataset in pipeline_performances[id_pipeline]: # Iterate over the datasets where the pipeline was used
minimum = dataset_performaces[id_dataset]['min']
maximum = dataset_performaces[id_dataset]['max']
if id_dataset_pipeline == id_dataset:
score = pipeline_data['score']
else:
score = pipeline_performances[id_pipeline][id_dataset]
if minimum != maximum:
dtm += (maximum - score) / (maximum - minimum)
adtm = dtm / len(pipeline_performances[id_pipeline])
pipeline_data['adtm'] = adtm
return pipelines
def merge_patterns(grammar_patterns):
patterns = sorted(grammar_patterns, key=lambda x: len(x), reverse=True)
empty_elements = set()
skip_patterns = []
for pattern in patterns:
for element in pattern:
modified_pattern = [e for e in pattern if e != element]
for current_pattern in patterns:
if modified_pattern == current_pattern:
empty_elements.add(element)
skip_patterns.append(modified_pattern)
for skip_pattern in skip_patterns:
if skip_pattern in patterns:
patterns.remove(skip_pattern)
return patterns, empty_elements
def is_available_primitive(pipeline_primitives, available_primitives, verbose=False):
for primitive in pipeline_primitives:
if primitive not in available_primitives:
if verbose:
logger.warning('Primitive %s is not longer available' % primitive)
return False
return True
def patterns_repr(patterns):
patterns_string = []
for pattern in patterns:
pretty_string = ''
pretty_string += 'structure: [%s]' % ', '.join([i for i in pattern['structure']])
pretty_string += ', frequency: %d' % pattern['frequency']
if 'mean_score' in pattern:
pretty_string += ', mean_score: %.3f' % pattern['mean_score']
if 'mean_adtm' in pattern:
pretty_string += ', mean_adtm: %.3f' % pattern['mean_adtm']
patterns_string.append(pretty_string)
return '\n'.join(patterns_string)
def test_dataset(dataset_id, task_name='TASK'):
from os.path import join
import json
dataset_folder_path = join('/Users/rlopez/D3M/datasets/seed_datasets_current/', dataset_id)
dataset_path = join(dataset_folder_path, 'TRAIN/dataset_TRAIN/tables/learningData.csv')
problem_path = join(dataset_folder_path, 'TRAIN/problem_TRAIN/problemDoc.json')
with open(problem_path) as fin:
problem_doc = json.load(fin)
task_keywords = problem_doc['about']['taskKeywords']
target_column = problem_doc['inputs']['data'][0]['targets'][0]['colName']
logger.info('Evaluating dataset %s with task keywords=%s' % (dataset_id, str(task_keywords)))
create_metalearningdb_grammar(task_name, dataset_path, target_column, task_keywords)
if __name__ == '__main__':
test_dataset('185_baseball_MIN_METADATA')
| 48.983389 | 165 | 0.686788 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,566 | 0.241795 |
5723328e5cd271a82c8d25b908bc2b420246795d | 4,512 | py | Python | deap_learning.py | fzjcdt/Genetic-CNN | 6bd53f3f429434557b7fbf1122020259d910f618 | [
"Apache-2.0"
]
| 2 | 2019-10-08T08:27:41.000Z | 2021-12-02T07:37:27.000Z | deap_learning.py | fzjcdt/Genetic-CNN | 6bd53f3f429434557b7fbf1122020259d910f618 | [
"Apache-2.0"
]
| null | null | null | deap_learning.py | fzjcdt/Genetic-CNN | 6bd53f3f429434557b7fbf1122020259d910f618 | [
"Apache-2.0"
]
| null | null | null | from deap import base, creator, tools
import random
"""
每个individual是一个list,包含10个元素,需要演化到元素和最小
"""
# ****************************Types********************************
# def create(name, base, **kargs):
# Creates a new class named *name* inheriting from *base*
# A negative weight element corresponds to the minimization of
# the associated objective and positive weight to the maximization.
creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
# an Individual class that is derived from a list with a fitness attribute set
# to the just created fitness
"""
create("Foo", list, bar=dict, spam=1)
This above line is exactly the same as defining in the :mod:`creator`
module something like the following. ::
class Foo(list):
spam = 1
def __init__(self):
self.bar = dict()
"""
creator.create("Individual", list, fitness=creator.FitnessMin)
# ****************************Initialization********************************
IND_SIZE = 10
toolbox = base.Toolbox()
# def register(self, alias, function, *args, **kargs):
# Register a *function* in the toolbox under the name *alias*.
# *args当function被调用时自动作为function相应参数
"""
>>> def func(a, b, c=3):
... print(a, b, c)
...
>>> tools = Toolbox()
>>> tools.register("myFunc", func, 2, c=4)
>>> tools.myFunc(3)
2 3 4
"""
toolbox.register("attribute", random.random)
# def initRepeat(container, func, n):
# Call the function *container* with a generator function corresponding
# to the calling *n* times the function *func*.
"""
>>> initRepeat(list, random.random, 2) # doctest: +ELLIPSIS,
... # doctest: +NORMALIZE_WHITESPACE
[0.6394..., 0.0250...]
"""
# 将IND_SIZE个 random.random()加入到Individual里,即初始化Individual,每个Individual list里共IND_SIZE个初始值
toolbox.register("individual", tools.initRepeat, creator.Individual,
toolbox.attribute, n=IND_SIZE)
# 将individual加入到population里,n由初始化时指定
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
# ****************************Operators********************************
def evaluate(individual):
# 评估函数为individual里IND_SIZE个值的和
# 这里,很重要,返回的是(a, ),以为weight是(fitness, )
# 也可以返回(sum(individual), )
return sum(individual),
# def cxTwoPoint(ind1, ind2):
# Executes a two-point crossover on the input :term:`sequence` individuals.
toolbox.register("mate", tools.cxTwoPoint)
# gaussian mutation with mu and sigma
# The *indpb* argument is the probability of each attribute to be mutated.
# 元素增减一个小的值
toolbox.register("mutate", tools.mutGaussian, mu=0, sigma=1, indpb=0.1)
# def selTournament(individuals, k, tournsize, fit_attr="fitness"):
# Select the best individual among *tournsize* randomly chosen
# individuals, *k* times. The list returned contains
# references to the input *individuals*.
toolbox.register("select", tools.selTournament, tournsize=3)
toolbox.register("evaluate", evaluate)
def main():
pop = toolbox.population(n=50)
CXPB, MUTPB, NGEN = 0.5, 0.2, 40
# map(func, *iterables) --> map object
# Make an iterator that computes the function using arguments from
# each of the iterables.
# map 以参数序列中的每一个元素调用 function 函数,返回包含每次 function 函数返回值的新列表。
fitnesses = map(toolbox.evaluate, pop)
for ind, fit in zip(pop, fitnesses):
ind.fitness.values = fit
# ind.fitness = fit
for g in range(NGEN):
# 每次拿三个,选其中最好的,一直选到len(pop)个
offspring = toolbox.select(pop, len(pop))
# 什么的offspring还指向是pop里的同一个对象,需要克隆一下
# 担心的是一个对象/individual被选到了两次或多次
offspring = list(map(toolbox.clone, offspring))
for child1, child2 in zip(offspring[::2], offspring[1::2]):
if random.random() < CXPB:
toolbox.mate(child1, child2)
del child1.fitness.values
del child2.fitness.values
for mutant in offspring:
if random.random() < MUTPB:
toolbox.mutate(mutant)
del mutant.fitness.values
# 上面删除了fitness,下面找出删除的individual,只需要重新评估这些就行,不用重新评估所有
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
pop[:] = offspring
return pop
for ind in main():
print(evaluate(ind))
| 33.176471 | 89 | 0.629876 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,179 | 0.639381 |
57262781980201cf7735ba35e8965dd0cb76ade8 | 1,674 | py | Python | pacman/utils/replay_buffer.py | i-rme/openai-pacman | 4a80ed023ed2bdf031990147acbbeea904b9fc8e | [
"MIT"
]
| 2 | 2020-01-26T23:06:57.000Z | 2021-04-12T08:36:55.000Z | pacman/utils/replay_buffer.py | i-rme/openai-pacman | 4a80ed023ed2bdf031990147acbbeea904b9fc8e | [
"MIT"
]
| null | null | null | pacman/utils/replay_buffer.py | i-rme/openai-pacman | 4a80ed023ed2bdf031990147acbbeea904b9fc8e | [
"MIT"
]
| null | null | null | from collections import deque
import random
import numpy as np
class ReplayBuffer:
'''
construct a buffer object that stores the past
moves and samples a set of subsamples
'''
def __init__(self, buffer_size):
self.buffer_size = buffer_size
self.count = 0
self.buffer = deque()
def add(self, s, a, r, d, s2):
'''
add an experience to the buffer
s: current state,
a: action, r: reward,
d: done, s2: next state
'''
experience = (s, a, r, d, s2)
if self.count < self.buffer_size:
self.buffer.append(experience)
self.count += 1
else:
self.buffer.popleft()
self.buffer.append(experience)
def size(self):
return self.count
def clear(self):
self.buffer.clear()
self.count = 0
def sample(self, batch_size):
'''
sample a total of elements equal to batch_size from buffer
if buffer contains enough elements;
otherwise, return all elements
list1 = [1, 2, 3, 4, 5, 6]
random.sample(list1, 3)
--
OUTPUT: [3, 1, 2]
'''
batch = []
if self.count < batch_size:
batch = random.sample(self.buffer, self.count)
else:
batch = random.sample(self.buffer, batch_size)
# map each experience in batch in batches of
# [array([s1, ..., sN]), ..., array([s21, ..., s2N])]
s_batch, a_batch, r_batch, d_batch, s2_batch = list(map(np.array, list(zip(*batch))))
return s_batch, a_batch, r_batch, d_batch, s2_batch | 27.442623 | 93 | 0.548387 | 1,610 | 0.961768 | 0 | 0 | 0 | 0 | 0 | 0 | 641 | 0.382915 |
5726ab8f943f02dfa0eee1936447786383a1ce72 | 9,126 | py | Python | tests/entities/test_creature.py | Flame753/ARPG | f931d3437a83995b43bdddc68cb5ba89922dc259 | [
"MIT"
]
| null | null | null | tests/entities/test_creature.py | Flame753/ARPG | f931d3437a83995b43bdddc68cb5ba89922dc259 | [
"MIT"
]
| null | null | null | tests/entities/test_creature.py | Flame753/ARPG | f931d3437a83995b43bdddc68cb5ba89922dc259 | [
"MIT"
]
| null | null | null | # Standard library imports
from pprint import pprint
import unittest
# Local application imports
from context import entities
from entities import creatures
from entities import items
from entities import currency
from entities import slots
class TestCreature(unittest.TestCase):
def setUp(self):
self.dagger = items.Dagger()
self.copper_coin = currency.CopperCoin()
self.bread = items.Bread()
def equipment_slot_helper(self, creature_obj, answer):
list_of_slots = [slots.Head, slots.Body, slots.Legs, slots.Boots, slots.OneHanded, slots.TwoHanded]
for slot in list_of_slots:
creature_obj.equippable_slots.slots.get(slot)._ensure_inventory()
self.assertDictEqual(creature_obj.equippable_slots.slots.get(slot).inventory, answer)
def test_class_initializer(self):
creature_A = creatures.Creature()
creature_B = creatures.Creature()
self.assertFalse(creature_A is creature_B)
self.assertFalse(creature_A.equippable_slots is creature_B.equippable_slots)
self.assertEqual(creature_A.inventory, creature_B.inventory)
self.assertFalse(creature_A.inventory is creature_B.inventory)
self.assertEqual(creature_A.coin_pouch, creature_B.coin_pouch)
self.assertFalse(creature_A.coin_pouch is creature_B.coin_pouch)
def test_add_item(self):
creature = creatures.Creature()
creature.add_item(self.dagger)
self.assertDictEqual(creature.inventory.inventory,
{self.dagger: {'amount': 1}})
creature.add_item(self.copper_coin, 2)
self.assertDictEqual(creature.coin_pouch.inventory,
{self.copper_coin: {'amount': 2}})
creature.add_item(self.bread, 6)
self.assertDictEqual(creature.inventory.inventory,
{self.bread: {'amount': 6}, self.dagger: {'amount': 1}})
creature.add_item(self.dagger, 3)
self.assertDictEqual(creature.inventory.inventory,
{self.bread: {'amount': 6}, self.dagger: {'amount': 4}})
def test_remove_item(self):
creature = creatures.Creature()
# Testing when removing an item from a empty dict
result = creature.remove_item(self.dagger)
self.assertFalse(result)
creature.add_item(self.dagger)
creature.remove_item(self.dagger)
self.assertDictEqual(creature.inventory.inventory, {})
creature.add_item(self.copper_coin, 8)
creature.remove_item(self.copper_coin, 3)
self.assertDictEqual(creature.coin_pouch.inventory,
{self.copper_coin: {'amount': 5}})
def test_equip(self):
creature = creatures.Creature()
# Equipping dagger that is not in creature
result = creature.equip(self.dagger)
self.assertFalse(result)
# Verifying that there is no inventory was added
answer = {}
self.equipment_slot_helper(creature, answer)
result = creature.inventory.inventory
self.assertDictEqual(result, answer)
self.assertFalse(hasattr(creature.coin_pouch, 'inventory'))
# Equipping non equipable item
creature.add_item(self.copper_coin)
result = creature.equip(self.copper_coin)
self.assertFalse(result)
# Verifying that there is no inventory was added
answer = {self.copper_coin: {'amount': 1}}
result = creature.coin_pouch.inventory
self.assertDictEqual(result, answer)
answer = {}
self.equipment_slot_helper(creature, answer)
result = creature.inventory.inventory
self.assertDictEqual(result, answer)
# Equipping a dagger
creature.add_item(self.dagger)
result = creature.equip(self.dagger)
self.assertTrue(result)
answer = {self.dagger: {'amount': 1}}
result = creature.inventory.inventory
self.assertDictEqual(result, answer)
answer = {self.dagger: {'amount': 1}}
result = creature.equippable_slots.slots.get(slots.OneHanded).inventory
self.assertDictEqual(result, answer)
# equipping a non equipable item
creature.add_item(self.bread)
result = creature.equip(self.bread)
self.assertFalse(result)
def test_unequip(self):
creature = creatures.Creature()
# Unequipping a item that doesn't exist
result = creature.unequip(self.dagger)
self.assertFalse(result)
# Verifying that there is no inventory was added
answer = {}
self.equipment_slot_helper(creature, answer)
self.assertFalse(hasattr(creature.inventory, 'inventory'))
creature.add_item(self.copper_coin)
result = creature.unequip(self.copper_coin)
self.assertFalse(result)
# Verifying that there is no inventory was added
answer = {}
self.equipment_slot_helper(creature, answer)
self.assertFalse(hasattr(creature.inventory, 'inventory'))
answer = {self.copper_coin: {'amount': 1}}
result = creature.coin_pouch.inventory
self.assertDictEqual(result, answer)
# Preparing for next test case
creature.remove_item(self.copper_coin)
# Actually tesing the removal of a item
creature.add_item(self.dagger)
creature.equip(self.dagger)
result = creature.unequip(self.dagger)
self.assertTrue(result)
answer = {}
self.equipment_slot_helper(creature, answer)
result = creature.coin_pouch.inventory
self.assertDictEqual(result, answer)
answer = {self.dagger: {'amount': 1}}
result = creature.inventory.inventory
self.assertDictEqual(result, answer)
def test_calculate_item_worth(self):
creature = creatures.Creature()
copper_amount = 10
bread_amount = 5
dagger_amount = 5
creature.add_item(self.copper_coin, copper_amount)
creature.add_item(self.bread, bread_amount)
creature.add_item(self.dagger, dagger_amount)
creature.equip(self.dagger)
result = creature.calculate_item_worth(self.copper_coin)
self.assertEqual(result, copper_amount*self.copper_coin.worth)
result = creature.calculate_item_worth(self.dagger)
self.assertEqual(result, dagger_amount*self.dagger.worth)
result = creature.calculate_item_worth(self.bread)
self.assertEqual(result, bread_amount*self.bread.worth)
def test_calculate_total_worth(self):
creature = creatures.Creature()
copper_amount = 10
bread_amount = 5
dagger_amount = 5
creature.add_item(self.copper_coin, copper_amount)
creature.add_item(self.bread, bread_amount)
creature.add_item(self.dagger, dagger_amount)
creature.equip(self.dagger)
result = creature.calculate_total_worth()
answer = (self.copper_coin.worth * copper_amount) + \
(self.dagger.worth * dagger_amount) + \
(self.bread.worth * bread_amount)
self.assertEqual(result, answer)
def test_type_error(self):
creature = creatures.Creature()
test_num = 2
test_string = 'Test'
test_list = [7]
test_dict = {"s":2}
test_tuple = (2, "2")
test_case = [test_num, test_string, test_list, test_dict, test_tuple, [], {}, ()]
for case in test_case:
func = creature.add_item
self.assertRaises(TypeError, func, case)
self.assertRaises(TypeError, func, (self.dagger, case))
func = creature.remove_item
self.assertRaises(TypeError, func, case)
self.assertRaises(TypeError, func, (self.dagger, case))
func = creature.equip
self.assertRaises(TypeError, func, case)
func = creature.unequip
self.assertRaises(TypeError, func, case)
func = creature.calculate_item_worth
self.assertRaises(TypeError, func, case)
func = creature.calculate_total_worth
self.assertRaises(TypeError, func, case)
def test_value_error(self):
creature = creatures.Creature()
test_case = -32
func = creature.add_item
self.assertRaises(TypeError, func, (self.dagger, test_case))
func = creature.remove_item
self.assertRaises(TypeError, func, (self.dagger, test_case))
def test_EquippedItemRemovealError(self):
creature = creatures.Creature()
# Tesing after removing item from inventory item should not exist in equipment slot
creature.add_item(self.dagger)
creature.equip(self.dagger)
self.assertRaises(creatures.EquippedItemRemovealError, creature.remove_item, self.dagger)
def suite():
suite = unittest.TestSuite()
suite.addTest(TestCreature('test_addItem'))
return suite
if __name__ == '__main__':
unittest.main()
# runner = unittest.TextTestRunner()
# runner.run(suite()) | 36.504 | 107 | 0.655928 | 8,651 | 0.947951 | 0 | 0 | 0 | 0 | 0 | 0 | 833 | 0.091278 |
5727029dde1ffe2dc5b477e3ae9e9d629cfe867a | 729 | py | Python | tile/io.py | ViliamV/tile | a2b105143341f250690b8034076ba9214e9ed787 | [
"MIT"
]
| null | null | null | tile/io.py | ViliamV/tile | a2b105143341f250690b8034076ba9214e9ed787 | [
"MIT"
]
| null | null | null | tile/io.py | ViliamV/tile | a2b105143341f250690b8034076ba9214e9ed787 | [
"MIT"
]
| null | null | null | import collections
import re
from typing import Iterator, TextIO
from .constants import TILE_END, TILE_START, TILE_WARNING
def from_file(file: TextIO) -> Iterator[str]:
with file as f:
yield from f
def to_file(file: TextIO, lines: Iterator[str]):
with file as f:
f.write("\n".join(lines))
def to_nowhere(lines: Iterator[str]):
collections.deque(lines, 0)
def to_config(config_file: TextIO, lines: Iterator[str]):
with config_file as f:
text = f.read()
f.seek(0)
f.truncate()
text = re.sub(fr"^{TILE_START}\n.*{TILE_END}\n?", r"", text, flags=re.MULTILINE | re.DOTALL)
f.write("\n".join((text.strip(), TILE_START, TILE_WARNING, *lines, TILE_END)))
| 24.3 | 100 | 0.650206 | 0 | 0 | 86 | 0.11797 | 0 | 0 | 0 | 0 | 45 | 0.061728 |
572995aff10ad23755f80a0359fa3ca259ee111e | 199 | py | Python | testfiles/benchmarks/send_multiple.py | marcolamartina/PASTEL | 8e1e0fd086a26b7c50f15fe87ffe5dbd007cf925 | [
"MIT"
]
| null | null | null | testfiles/benchmarks/send_multiple.py | marcolamartina/PASTEL | 8e1e0fd086a26b7c50f15fe87ffe5dbd007cf925 | [
"MIT"
]
| null | null | null | testfiles/benchmarks/send_multiple.py | marcolamartina/PASTEL | 8e1e0fd086a26b7c50f15fe87ffe5dbd007cf925 | [
"MIT"
]
| 1 | 2020-07-08T11:23:22.000Z | 2020-07-08T11:23:22.000Z | import binascii
from pwn import *
def send(r,num):
r.sendline(str(num))
port = 1234
server = '127.0.0.1'
sleep(1)
for i in range(10000):
r = remote(server, port)
send(r,i)
r.close()
| 15.307692 | 28 | 0.623116 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 0.055276 |
572a6ba98328eb8f0c8ea9e03989e22fca55780e | 271 | py | Python | backend/users/serializers.py | rmisiarek/django_vue_base | 440459fdd73209e47567fb3572c056a05dc1c45a | [
"MIT"
]
| 2 | 2019-04-28T20:26:13.000Z | 2020-05-04T03:18:23.000Z | backend/users/serializers.py | rmisiarek/django_vue_base | 440459fdd73209e47567fb3572c056a05dc1c45a | [
"MIT"
]
| 22 | 2019-12-04T22:34:42.000Z | 2022-02-12T07:12:29.000Z | backend/users/serializers.py | shrekdev/Django_Vue_Base | 4f200358724bce137f9c5e723036b280e4fd81e2 | [
"MIT"
]
| null | null | null | from djoser.serializers import UserCreateSerializer
from .models import CustomUser
class CustomUserRegistrationSerializer(UserCreateSerializer):
class Meta(UserCreateSerializer.Meta):
model = CustomUser
fields = ('first_name', 'email', 'password')
| 27.1 | 61 | 0.763838 | 185 | 0.682657 | 0 | 0 | 0 | 0 | 0 | 0 | 29 | 0.107011 |
572d902af0ded1f19ce7ceba83a724a3db7dd67b | 30 | py | Python | flaskslack/__init__.py | Jamiewu2/flask-slack-template | 8a2168aaab03ed080a3eab186c20a488cdf2055e | [
"MIT"
]
| null | null | null | flaskslack/__init__.py | Jamiewu2/flask-slack-template | 8a2168aaab03ed080a3eab186c20a488cdf2055e | [
"MIT"
]
| null | null | null | flaskslack/__init__.py | Jamiewu2/flask-slack-template | 8a2168aaab03ed080a3eab186c20a488cdf2055e | [
"MIT"
]
| null | null | null | name = "flask_slack_template"
| 15 | 29 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 0.733333 |
572e21eca46eed4b2a282d7364cf1c249e13c730 | 6,704 | py | Python | cpu.py | fernandozanutto/PyNES | cb8d589ceb55cd7df0e114e726c6b6bbbc556172 | [
"Apache-2.0"
]
| null | null | null | cpu.py | fernandozanutto/PyNES | cb8d589ceb55cd7df0e114e726c6b6bbbc556172 | [
"Apache-2.0"
]
| null | null | null | cpu.py | fernandozanutto/PyNES | cb8d589ceb55cd7df0e114e726c6b6bbbc556172 | [
"Apache-2.0"
]
| null | null | null | from typing import Dict
from time import time_ns
from bus import Bus
from instructions.generic_instructions import Instruction
from rom import ROM
from status import Status
import instructions.instructions as i_file
import instructions.jump_instructions as j_file
import instructions.load_instructions as l_file
import instructions.store_instructions as s_file
import instructions.stack_instructions as t_file
import instructions.arithmetic_instructions as a_file
import instructions.logical_instructions as log_file
import instructions.nop_instructions as n_file
import instructions.unofficial_instructions as u_file
class CPU:
def __init__(self, bus: Bus, debug: bool = False):
self.rom = None
self.bus = bus
self.debug = debug
self.cycle = 7 # TODO check it, starting in 7 cause nestes.log
# status registers: store a single byte
self.status_reg: Status = None
# counter registers: store a single byte
self.pc_reg: int = None # program counter
self.sp_reg: int = None # stack pointer
# data registers: store a single byte
self.x_reg: int = None # x register
self.y_reg: int = None # y register
self.a_reg: int = None # a register
# program counter stores current execution point
self.running: bool = True
# create the instructions that the cpu can interpret
instructions_list = self.find_instructions(Instruction)
self.instructions: Dict[bytes, Instruction] = {}
for instruction in instructions_list:
if instruction.identifier_byte in self.instructions.keys():
raise Exception('Duplicate instruction identifier bytes ' + instruction.identifier_byte.hex())
self.instructions[instruction.identifier_byte] = instruction
def start_up(self, callback):
"""
set the initial values of cpu registers
status reg: 000100 (irqs disabled)
x, y, a regs: 0
stack pointer: $FD
$4017: 0 (frame irq disabled)
$4015: 0 (sound channels disabled)
$4000-$400F: 0 (sound registers)
"""
self.callback = callback
self.pc_reg = 0
self.status_reg = Status() # know as 'P' on NesDev Wiki
self.sp_reg = 0xFD
self.x_reg = 0
self.y_reg = 0
self.a_reg = 0
# TODO implement memory sets
self.bus.write_memory(0x4015, 0, num_bytes=2)
self.bus.write_memory(0x4017, 0, num_bytes=2)
def push_to_stack(self, value, size):
for i in range(size):
self.bus.write_memory(0x0100 + self.sp_reg, (value >> (8 * (size - i - 1))) & 255, num_bytes=1)
self.sp_reg -= 1
def pull_from_stack(self, size):
value = 0
for i in range(size):
self.sp_reg += 1
value += self.bus.read_memory(0x0100 + self.sp_reg) << (8 * i)
return value
def find_instructions(self, cls) -> list[Instruction]:
"""
find all available instructions
"""
subclasses = [subc for subc in cls.__subclasses__() if subc.identifier_byte is not None]
return subclasses + [g for s in cls.__subclasses__() for g in self.find_instructions(s)]
def run_rom(self, rom: ROM):
# load rom
self.rom = rom
self.pc_reg = 0xC000 # first rom address
if rom.is_snake_rom:
self.pc_reg = 0x0600
self.rom.memory_start_location = 0
for i in range(len(rom.get_memory())):
self.bus.write_memory(0x0600 + i, int.from_bytes(rom.get(i), 'little'))
# run program
self.running = True
i = 0
last_time = time_ns()
while self.running:
i += 1
if self.bus.get_nmi_status():
self.push_to_stack(self.pc, 2)
status_reg_copy = self.status_reg.copy()
status_reg_copy.bits[Status.StatusTypes.break1] = 0
status_reg_copy.bits[Status.StatusTypes.break2] = 1
self.push_to_stack(status_reg_copy.to_int(), 1)
self.status_reg.bits[Status.StatusTypes.interrupt] = 1
self.bus.tick(2)
self.pc = self.bus.read_memory(0xFFFA)
continue
# get the current byte at pc
identifier_byte = self.bus.read_memory(self.pc_reg)
if type(identifier_byte) == int:
identifier_byte = bytes([identifier_byte])
# turn the byte into an Instruction
instruction = self.instructions.get(identifier_byte)
if instruction is None:
raise Exception('PC: {} Instruction not found: {}'.format(hex(self.pc_reg), identifier_byte))
# get the data bytes
data_bytes = self.bus.read_memory_bytes(self.pc_reg + 1, instruction.data_length)
if self.debug:
self.debug_print(self.pc_reg, identifier_byte, data_bytes, instruction)
self.pc_reg += instruction.get_instruction_length()
value = instruction.execute(self, data_bytes)
instr_cycles = instruction.get_cycles()
self.cycle += instr_cycles
self.status_reg.update(instruction, value)
cur_time = time_ns()
if self.debug and cur_time - last_time > 0:
print('time for running instruction', cur_time - last_time, identifier_byte)
last_time = cur_time
self.bus.tick(instr_cycles)
self.callback()
cur_time = time_ns()
if self.debug and cur_time - last_time > 0:
print('time for running ui', cur_time - last_time)
last_time = cur_time
def debug_print(self, pc_reg: int, identifier_byte, data_bytes, instruction):
# print out diagnostic information
# example: C000 4C F5 C5 JMP $C5F5 A:00 X:00 Y:00 P:24 SP:FD PPU: 0, 0 CYC:
registers_state = [
hex(self.a_reg)[2:].upper(),
hex(self.x_reg)[2:].upper(),
hex(self.y_reg)[2:].upper(),
hex(self.status_reg.to_int())[2:].upper(),
hex(self.sp_reg)[2:].upper()
]
inst_bytes = (identifier_byte + data_bytes).hex().upper()
rng = range(0, len(inst_bytes), 2)
inst_hexes = [inst_bytes[i:i + 2] for i in rng]
print("{:0>4} {:<8} {:<31} A:{:0>2} X:{:0>2} Y:{:0>2} P:{:0>2} SP:{} CYC:{}".format(
hex(pc_reg)[2:].upper(),
' '.join(inst_hexes),
instruction.__name__[0:3].upper(),
*registers_state,
self.cycle
))
| 33.688442 | 110 | 0.604117 | 6,082 | 0.90722 | 0 | 0 | 0 | 0 | 0 | 0 | 1,173 | 0.17497 |
572f76ae3e95b7728b1969e8e8e8f04344ee7dae | 227 | py | Python | solution.py | t-pastore/gamesolver | 39b0ba45350f0b558c53c40e7cc659577059d369 | [
"Apache-2.0"
]
| 1 | 2021-01-27T03:14:07.000Z | 2021-01-27T03:14:07.000Z | solution.py | t-pastore/gamesolver | 39b0ba45350f0b558c53c40e7cc659577059d369 | [
"Apache-2.0"
]
| null | null | null | solution.py | t-pastore/gamesolver | 39b0ba45350f0b558c53c40e7cc659577059d369 | [
"Apache-2.0"
]
| null | null | null | class Solution():
def __init__(self):
self.pureNE = []
self.mixedNE = []
def appendPureNE(self, tup):
self.pureNE.append(tup)
def appendMixedNE(self, tup):
self.mixedNE.append(tup) | 20.636364 | 33 | 0.585903 | 227 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
57301fb1a81fd60d2a17effeed4478182c84a5a9 | 2,622 | py | Python | timeouts/aa.py | kapsitis/ddgatve-stat | 684fac54b9d0b8e7891f58bf1fb32605a2d87a3c | [
"Apache-2.0"
]
| null | null | null | timeouts/aa.py | kapsitis/ddgatve-stat | 684fac54b9d0b8e7891f58bf1fb32605a2d87a3c | [
"Apache-2.0"
]
| null | null | null | timeouts/aa.py | kapsitis/ddgatve-stat | 684fac54b9d0b8e7891f58bf1fb32605a2d87a3c | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python
import datetime
import os
import subprocess
import re
import urllib2
import math
####################################################################
## TODO: Replace this function by another one, which simply reads all lines from a file
####################################################################
def readLines(fPath):
if not os.path.isfile(fPath):
open(fPath, 'a').close()
with open(fPath) as ff:
lines = ff.read().splitlines()
return lines
def currentUser():
#pattern = re.compile(r"^(anna|eduards|kalvis|laima|marta|nelda).*$",re.MULTILINE)
pattern = re.compile(r"^(anna|eduards|kalvis|laima|marta|nelda)+\s.*$",re.MULTILINE)
m = pattern.match(subprocess.check_output("who"))
if m:
return m.group(1)
else:
return "nobody"
def getStatus():
url = 'http://85.254.250.28/downloads1/timeouts/index.html'
try:
response=urllib2.urlopen(url,timeout=1)
webFile = response.read()
pattern2 = re.compile(r"^green",re.MULTILINE)
m2 = pattern2.match(webFile)
if m2:
return "green"
else:
return "yellow"
except urllib2.URLError as err: pass
return "offline"
def getYellowCount(theLog):
yellowCount = 0
grayCount = 0
ll = readLines(theLog)
for theLine in ll:
if (theLine.find("yellow") >= 0) or (theLine.find("red") >= 0):
yellowCount = yellowCount + 1
if (theLine.find("green") >= 0):
yellowCount = 0
if (theLine.find("offline") >= 0):
grayCount = grayCount + 1
yellowCount = yellowCount + (grayCount//12)
return(yellowCount)
#####################################################################
## Find the right logfile; logfiles are different for different days
#####################################################################
ddat = datetime.datetime.now()
theLog = '/home/kalvis/.timeouts/access-{yyyy}-{mm}-{dd}.log'.format( \
yyyy = ddat.year, mm=ddat.month, dd=ddat.day)
yellowCount = getYellowCount(theLog)
status = getStatus()
if yellowCount >= 5:
status = "red"
if yellowCount > 1:
os.system("/home/kalvis/.timeouts/msg.py")
logline = '{user}:{time}:{status}({yellowCount})\n'.format(user=currentUser(), \
time=ddat,status=getStatus(),yellowCount=yellowCount)
if not os.path.isfile(theLog):
open(theLog, 'a').close()
with open(theLog, "a") as myfile:
myfile.write(logline)
if yellowCount >= 5:
from subprocess import call
call(["pkill","-KILL","-u","marta"])
| 27.030928 | 88 | 0.558734 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 873 | 0.332952 |
5731a1d1075f24d7e3e476ce3662c03caca1f970 | 1,786 | py | Python | files_to_html_md5.py | jasonivey/scripts | 09f9702e5ce62abbb7699aae16b45b33711fe856 | [
"MIT"
]
| null | null | null | files_to_html_md5.py | jasonivey/scripts | 09f9702e5ce62abbb7699aae16b45b33711fe856 | [
"MIT"
]
| null | null | null | files_to_html_md5.py | jasonivey/scripts | 09f9702e5ce62abbb7699aae16b45b33711fe856 | [
"MIT"
]
| null | null | null | import os
import sys
import hash_utils
def StartIndexPhp(outfile):
outfile.write( '<?php $rootdirectory = \'../../\' ?>\n\n' )
outfile.write( '<html>\n' )
outfile.write( '<head>\n' )
outfile.write( '\t<meta http-equiv="Content-Type" content="text/html; charset=utf-8">\n' )
outfile.write( '\t<title>TV Episodes Downloads</title>\n' )
outfile.write( '</head>\n\n' )
outfile.write( '<?php include( \'../../header.php\' ); ?>\n' )
outfile.write( '<table align="center" width="600"><tr><td>\n\n' )
outfile.write( '<h1 align=center>TV Episodes Downloads</h1>\n' )
outfile.write( '<hr>\n\n' )
outfile.write( '<table align="center" width="600">\n' )
outfile.write( '\t<tr>\n' )
outfile.write( '\t\t<th align=center>Episode Name</th>\n' )
outfile.write( '\t\t<th align=center>MD5SUM</th>\n' )
outfile.write( '\t</tr>\n' )
def CloseIndexPhp(outfile):
outfile.write( '</table>\n\n' )
outfile.write( '<?php include( \'../../footer.php\' ); ?>\n' )
outfile.write( '</html>\n' )
if __name__ == '__main__':
with open('index.php', 'w') as outfile:
StartIndexPhp()
for f in os.listdir('.'):
if f == 'index.php' or f.find( '?' ) != -1 or f.find( 'System Volume Information' ) != -1 or f.find( 'RECYCLER' ) != -1:
continue
if os.path.isdir(f):
md5str = hash_utils.md5sum(f)
print(f + ' - ' + md5str)
outfile.write( '\t<tr>\n' )
outfile.write( '\t\t<td align=center><a href="' + f + '">' + f + '</a></td>\n' )
outfile.write( '\t\t<td align=center>' + md5str + '</td>\n' )
outfile.write( '\t</tr>\n' )
CloseIndexPhp( outfile )
| 43.560976 | 133 | 0.526316 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 732 | 0.409854 |
573671e14e06512a6056d7ef96ce655d220e4a19 | 2,857 | py | Python | Run_exphydro_distributed_type1_pso.py | sopanpatil/exp-hydro | 7295dddc4df1028f669a223e1b631a4a91669515 | [
"MIT"
]
| 11 | 2016-11-25T13:05:26.000Z | 2022-03-25T03:24:16.000Z | Run_exphydro_distributed_type1_pso.py | sopanpatil/exp-hydro | 7295dddc4df1028f669a223e1b631a4a91669515 | [
"MIT"
]
| null | null | null | Run_exphydro_distributed_type1_pso.py | sopanpatil/exp-hydro | 7295dddc4df1028f669a223e1b631a4a91669515 | [
"MIT"
]
| 6 | 2017-03-28T12:06:00.000Z | 2021-09-16T17:50:34.000Z | #!/usr/bin/env python
# Programmer(s): Sopan Patil.
""" MAIN PROGRAM FILE
Run this file to optimise the model parameters of the spatially distributed
version of EXP-HYDRO model using Particle Swarm Optimisation (PSO) algorithm.
Type 1 Model:
- This type of distributed model is pixel based (i.e., all sub-components
have the same drainage area).
- All pixels receive the same meteorological inputs.
- Channel routing is ignored and it is assumed that streamflow generated from
each pixel reaches the catchment outlet on same day.
"""
import numpy
import os
import time
import matplotlib.pyplot as plt
from exphydro.distributed import ExphydroDistrParameters
from exphydro.distributed.type1 import ExphydroDistrModel
from hydroutils import Calibration, ObjectiveFunction
start_time = time.time()
######################################################################
# SET WORKING DIRECTORY
# Getting current directory, i.e., directory containing this file
dir1 = os.path.dirname(os.path.abspath('__file__'))
# Setting to current directory
os.chdir(dir1)
######################################################################
# MAIN PROGRAM
# Load meteorological and observed flow data
P = numpy.genfromtxt('SampleData/P_test.txt') # Observed rainfall (mm/day)
T = numpy.genfromtxt('SampleData/T_test.txt') # Observed air temperature (deg C)
PET = numpy.genfromtxt('SampleData/PET_test.txt') # Potential evapotranspiration (mm/day)
Qobs = numpy.genfromtxt('SampleData/Q_test.txt') # Observed streamflow (mm/day)
# Specify the number of pixels in the catchment
npixels = 5
# Specify the no. of parameter sets (particles) in a PSO swarm
npart = 10
# Generate 'npart' initial EXP-HYDRO model parameters
params = [ExphydroDistrParameters(npixels) for j in range(npart)]
# Initialise the model by loading its climate inputs
model = ExphydroDistrModel(P, PET, T, npixels)
# Specify the start and end day numbers of the calibration period.
# This is done separately for the observed and simulated data
# because they might not be of the same length in some cases.
calperiods_obs = [365, 2557]
calperiods_sim = [365, 2557]
# Calibrate the model to identify optimal parameter set
paramsmax = Calibration.pso_maximise(model, params, Qobs, ObjectiveFunction.klinggupta, calperiods_obs, calperiods_sim)
print ('Calibration run KGE value = ', paramsmax.objval)
# Run the optimised model for validation period
Qsim = model.simulate(paramsmax)
kge = ObjectiveFunction.klinggupta(Qobs[calperiods_obs[1]:], Qsim[calperiods_sim[1]:])
print ('Independent run KGE value = ', kge)
print("Total runtime: %s seconds" % (time.time() - start_time))
# Plot the observed and simulated hydrographs
plt.plot(Qobs[calperiods_obs[0]:], 'b-')
plt.plot(Qsim[calperiods_sim[0]:], 'r-')
plt.show()
######################################################################
| 35.7125 | 119 | 0.716136 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,797 | 0.628981 |
5738d01ad1ed866e8e47c9a1f5dadbf2cfce3611 | 11,104 | py | Python | multi_input_multi_output/train.py | alt113/CS591-Multimodal-Spring2021 | f28bade729818aa51fd131e86f1ba2271cca8947 | [
"MIT"
]
| null | null | null | multi_input_multi_output/train.py | alt113/CS591-Multimodal-Spring2021 | f28bade729818aa51fd131e86f1ba2271cca8947 | [
"MIT"
]
| 1 | 2021-05-03T18:59:43.000Z | 2021-05-03T19:04:19.000Z | multi_input_multi_output/train.py | alt113/CS591-Multimodal-Spring2021 | f28bade729818aa51fd131e86f1ba2271cca8947 | [
"MIT"
]
| null | null | null | import os
from multi_input_multi_output.models import MultiNet
from shared_weights.helpers import config, utils
from shared_weights.helpers.siamese_network import create_encoder
from data.data_tf import fat_dataset
import tensorflow as tf
from tensorflow import keras
# ----------------------
def flatten_model(model_nested):
layers_flat = []
for layer in model_nested.layers:
try:
layers_flat.extend(layer.layers)
except AttributeError:
layers_flat.append(layer)
model_flat = keras.models.Sequential(layers_flat)
return model_flat
""" Data augmentation"""
augmentation_input = keras.layers.Input(shape=config.IMG_SHAPE)
data_augmentation = keras.layers.experimental.preprocessing.RandomTranslation(
height_factor=(-0.2, 0.2),
width_factor=(-0.2, 0.2),
fill_mode="constant"
)(augmentation_input)
data_augmentation = keras.layers.experimental.preprocessing.RandomFlip(mode="horizontal")(data_augmentation)
data_augmentation = keras.layers.experimental.preprocessing.RandomRotation(factor=0.15,
fill_mode="constant")(data_augmentation)
augmentation_output = keras.layers.experimental.preprocessing.RandomZoom(height_factor=(-0.3, 0.1),
width_factor=(-0.3, 0.1),
fill_mode="constant")(data_augmentation)
data_augmentation = keras.Model(augmentation_input, augmentation_output)
""" Unsupervised contrastive loss"""
class RepresentationLearner(keras.Model):
def __init__(
self,
encoder,
projection_units,
num_augmentations,
temperature=1.0,
dropout_rate=0.1,
l2_normalize=False,
**kwargs
):
super(RepresentationLearner, self).__init__(**kwargs)
self.encoder = encoder
# Create projection head.
self.projector = keras.Sequential(
[
keras.layers.Dropout(dropout_rate),
keras.layers.Dense(units=projection_units, use_bias=False),
keras.layers.BatchNormalization(),
keras.layers.ReLU(),
]
)
self.num_augmentations = num_augmentations
self.temperature = temperature
self.l2_normalize = l2_normalize
self.loss_tracker = keras.metrics.Mean(name="loss")
@property
def metrics(self):
return [self.loss_tracker]
def compute_contrastive_loss(self, feature_vectors, batch_size):
num_augmentations = tf.shape(feature_vectors)[0] // batch_size
if self.l2_normalize:
feature_vectors = tf.math.l2_normalize(feature_vectors, -1)
# The logits shape is [num_augmentations * batch_size, num_augmentations * batch_size].
logits = (
tf.linalg.matmul(feature_vectors, feature_vectors, transpose_b=True)
/ self.temperature
)
# Apply log-max trick for numerical stability.
logits_max = tf.math.reduce_max(logits, axis=1)
logits = logits - logits_max
# The shape of targets is [num_augmentations * batch_size, num_augmentations * batch_size].
# targets is a matrix consits of num_augmentations submatrices of shape [batch_size * batch_size].
# Each [batch_size * batch_size] submatrix is an identity matrix (diagonal entries are ones).
targets = tf.tile(tf.eye(batch_size), [num_augmentations, num_augmentations])
# Compute cross entropy loss
return keras.losses.categorical_crossentropy(
y_true=targets, y_pred=logits, from_logits=True
)
def call(self, inputs):
# Create augmented versions of the images.
augmented = []
for _ in range(self.num_augmentations):
x = data_augmentation(inputs)
augmented.append(x)
augmented = keras.layers.Concatenate(axis=0)(augmented)
# Generate embedding representations of the images.
features = self.encoder(augmented)
# Apply projection head.
return self.projector(features)
def train_step(self, data):#inputs):
inputs = data[0]
batch_size = tf.shape(inputs)[0]
# Run the forward pass and compute the contrastive loss
with tf.GradientTape() as tape:
feature_vectors = self(inputs, training=True)
loss = self.compute_contrastive_loss(feature_vectors, batch_size)
# Compute gradients
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Update loss tracker metric
self.loss_tracker.update_state(loss)
# Return a dict mapping metric names to current value
return {m.name: m.result() for m in self.metrics}
def test_step(self, data):#inputs):
inputs = data[0]
batch_size = tf.shape(inputs)[0]
feature_vectors = self(inputs, training=False)
loss = self.compute_contrastive_loss(feature_vectors, batch_size)
self.loss_tracker.update_state(loss)
return {"loss": self.loss_tracker.result()}
""" Train the model"""
network_input = keras.layers.Input(shape=config.IMG_SHAPE)
# Load RGB vision encoder.
r_encoder = create_encoder(base='resnet50', pretrained=True)(network_input)
encoder_output = keras.layers.Dense(config.HIDDEN_UNITS)(r_encoder)
r_encoder = keras.Model(network_input, encoder_output)
# Create representation learner.
r_representation_learner = RepresentationLearner(
r_encoder, config.PROJECTION_UNITS, num_augmentations=2, temperature=0.1
)
r_representation_learner.build((None, 128, 128, 3))
# base_path = os.environ['PYTHONPATH'].split(os.pathsep)[1]
# representation_learner.load_weights(base_path + '/multi_input_multi_output/simclr/weights/simclr_resnet50_rgb_scratch_weights.h5')
r_representation_learner.load_weights(config.RGB_MODALITY_WEIGHT_PATH)
functional_model = flatten_model(r_representation_learner.layers[0])
rgb_encoder = functional_model.layers[1]
# Load Depth vision encoder.
d_encoder = create_encoder(base='resnet50', pretrained=True)(network_input)
encoder_output = keras.layers.Dense(config.HIDDEN_UNITS)(d_encoder)
d_encoder = keras.Model(network_input, encoder_output)
# Create representation learner.
d_representation_learner = RepresentationLearner(
d_encoder, config.PROJECTION_UNITS, num_augmentations=2, temperature=0.1
)
d_representation_learner.build((None, 128, 128, 3))
# base_path = os.environ['PYTHONPATH'].split(os.pathsep)[1]
# representation_learner.load_weights(base_path + '/multi_input_multi_output/simclr/weights/simclr_resnet50_rgb_scratch_weights.h5')
d_representation_learner.load_weights(config.DEPTH_MODALITY_WEIGHT_PATH)
functional_model = flatten_model(d_representation_learner.layers[0])
depth_encoder = functional_model.layers[1]
# ----------------------
# RGB
rgb_input = keras.layers.Input(shape=config.IMG_SHAPE)
# rgb_encoder = keras.applications.ResNet50V2(include_top=False,
# weights=None,
# input_shape=config.IMG_SHAPE,
# pooling="avg")
rgb = rgb_encoder(rgb_input)
rgb = keras.layers.Dropout(config.DROPOUT_RATE)(rgb)
rgb = keras.layers.Dense(config.HIDDEN_UNITS, activation="relu")(rgb)
rgb = keras.layers.Dropout(config.DROPOUT_RATE)(rgb)
rgb = keras.layers.Flatten()(rgb)
rgb = keras.layers.Dense(config.NUM_OF_CLASSES, activation="softmax")(rgb)
rgb_classifier = keras.models.Model(inputs=rgb_input, outputs=rgb, name='rgb_classifier')
for layer in rgb_classifier.layers:
layer._name += '_rgb'
layer.trainable = True
print('[INFO] built rgb classifier')
print(rgb_classifier.summary())
# Depth
depth_input = keras.layers.Input(shape=config.IMG_SHAPE)
# depth_encoder = keras.applications.ResNet50V2(include_top=False,
# weights=None,
# input_shape=config.IMG_SHAPE,
# pooling="avg")
depth = depth_encoder(depth_input)
depth = keras.layers.Dropout(config.DROPOUT_RATE)(depth)
depth = keras.layers.Dense(config.HIDDEN_UNITS, activation="relu")(depth)
depth = keras.layers.Dropout(config.DROPOUT_RATE)(depth)
depth = keras.layers.Flatten()(depth)
depth = keras.layers.Dense(config.NUM_OF_CLASSES, activation="softmax")(depth)
depth_classifier = keras.models.Model(inputs=depth_input, outputs=depth, name='depth_classifier')
for layer in depth_classifier.layers:
layer._name += '_depth'
layer.trainable = True
print('[INFO] built depth classifier')
print(depth_classifier.summary())
# Build and compile MultiNet
multinet_class = MultiNet(rgb_classifier=rgb_classifier,
rgb_output_branch=rgb,
depth_classifier=depth_classifier,
depth_output_branch=depth)
multinet_class.compile()
multinet_model = multinet_class.model
print('[INFO] built MultiNet classifier')
# train the network to perform multi-output classification
train_ds = fat_dataset(split='train',
data_type='all',
batch_size=config.BATCH_SIZE,
shuffle=True,
pairs=False)
val_ds = fat_dataset(split='validation',
data_type='all',
batch_size=config.BATCH_SIZE,
shuffle=True,
pairs=False)
print("[INFO] training MultiNet...")
counter = 0
history = None
toCSV = []
while counter <= config.EPOCHS:
counter += 1
print(f'* Epoch: {counter}')
data_batch = 0
for imgs, labels in train_ds:
data_batch += 1
history = multinet_model.train_on_batch(x=[imgs[:, 0], imgs[:, 1]],
y={'dense_5_rgb': labels[:], 'dense_7_depth': labels[:]},
reset_metrics=False,
return_dict=True)
print(f'* Data Batch: {data_batch}')
print(f'\t{history}')
break
if counter % 10 == 0:
print("[VALUE] Testing model on batch")
for val_data, val_labels in val_ds:
val_results = multinet_model.test_on_batch(x=[val_data[:, 0], val_data[:, 1]],
y={'dense_5_rgb': val_labels[:], 'dense_7_depth': val_labels[:]})
print(val_results)
toCSV.append(val_results)
print('Saving MultiNet validation results as CSV file')
utils.save_model_history(H=toCSV, path_to_csv=config.FROZEN_SIAMESE_TRAINING_HISTORY_CSV_PATH)
rgb_classifier.save_weights(config.MIMO_RGB_WEIGHTS)
print("Saved RGB model weights to disk")
# serialize weights to HDF5
depth_classifier.save_weights(config.MIMO_DEPTH_WEIGHTS)
print("Saved Depth model weights to disk")
| 40.525547 | 132 | 0.665616 | 3,696 | 0.332853 | 0 | 0 | 67 | 0.006034 | 0 | 0 | 2,625 | 0.236401 |
573a1fa313f96c01ab6df0ada017abeca301701e | 856 | py | Python | tools/rebuild_caches.py | newbdoc/lookyloo | 53a8952fccaf9ae42fa582d3475283babd55d08a | [
"BSD-3-Clause"
]
| 148 | 2020-06-14T06:55:42.000Z | 2022-03-19T05:37:02.000Z | tools/rebuild_caches.py | newbdoc/lookyloo | 53a8952fccaf9ae42fa582d3475283babd55d08a | [
"BSD-3-Clause"
]
| 261 | 2020-06-16T22:29:27.000Z | 2022-03-31T10:40:52.000Z | tools/rebuild_caches.py | newbdoc/lookyloo | 53a8952fccaf9ae42fa582d3475283babd55d08a | [
"BSD-3-Clause"
]
| 27 | 2020-06-08T12:28:33.000Z | 2022-02-15T18:50:50.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import logging
from lookyloo.lookyloo import Indexing, Lookyloo
logging.basicConfig(format='%(asctime)s %(name)s %(levelname)s:%(message)s',
level=logging.INFO)
def main():
parser = argparse.ArgumentParser(description='Rebuild the redis cache.')
parser.add_argument('--rebuild_pickles', default=False, action='store_true', help='Delete and rebuild the pickles. Count 20s/pickle, it can take a very long time.')
args = parser.parse_args()
lookyloo = Lookyloo()
if args.rebuild_pickles:
lookyloo.rebuild_all()
else:
lookyloo.rebuild_cache()
indexing = Indexing()
indexing.clear_indexes()
# This call will rebuild all the caches as needed.
lookyloo.sorted_capture_cache()
if __name__ == '__main__':
main()
| 25.939394 | 168 | 0.684579 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 291 | 0.339953 |
573ad54818708562c075e93c746dc4448d743b12 | 740 | py | Python | save_restore_model/tf1/restore1_1.py | zlpmichelle/crackingtensorflow | 66c3517b60c3793ef06f904e5d58e4d044628182 | [
"Apache-2.0"
]
| 3 | 2017-10-19T23:41:26.000Z | 2019-10-22T08:59:35.000Z | save_restore_model/tf1/restore1_1.py | zlpmichelle/crackingtensorflow | 66c3517b60c3793ef06f904e5d58e4d044628182 | [
"Apache-2.0"
]
| null | null | null | save_restore_model/tf1/restore1_1.py | zlpmichelle/crackingtensorflow | 66c3517b60c3793ef06f904e5d58e4d044628182 | [
"Apache-2.0"
]
| null | null | null | import tensorflow as tf
sess=tf.Session()
#First let's load meta graph and restore weights
saver = tf.train.import_meta_graph('/Users/lipingzhang/Downloads/model/my_tf_model-1000.meta')
saver.restore(sess,tf.train.latest_checkpoint('/Users/lipingzhang/Downloads/model/'))
# Now, let's access and create placeholders variables and
# create feed-dict to feed new data
graph = tf.get_default_graph()
w1 = graph.get_tensor_by_name("w1:0")
w2 = graph.get_tensor_by_name("w2:0")
feed_dict ={w1:13.0,w2:17.0}
#Now, access the op that you want to run.
op_to_restore = graph.get_tensor_by_name("op_to_restore:0")
#Add more to the current graph
add_on_op = tf.multiply(op_to_restore,2)
print sess.run(add_on_op,feed_dict)
#This will print 120. | 30.833333 | 94 | 0.777027 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 356 | 0.481081 |
573b50d93fdcd613c5e4eb9cd5d3608413327c07 | 633 | py | Python | src/game.py | LuisMarques99/Number-Guesser-Terminal | 6abfac23268022f7ce3776a20d1d6f550955d6c8 | [
"MIT"
]
| null | null | null | src/game.py | LuisMarques99/Number-Guesser-Terminal | 6abfac23268022f7ce3776a20d1d6f550955d6c8 | [
"MIT"
]
| null | null | null | src/game.py | LuisMarques99/Number-Guesser-Terminal | 6abfac23268022f7ce3776a20d1d6f550955d6c8 | [
"MIT"
]
| null | null | null | from random import randrange
def main():
MIN = 1
MAX = 100
NUMBER = randrange(MIN, MAX + 1)
guesses = 9
print(f"Guess a number from {MIN} to {MAX}.\nYou have {guesses} chances. Start now!\n")
while guesses > 0:
guess = input(f"Guess ({guesses}): ")
guesses -= 1
try:
guess = int(guess)
if guess == NUMBER:
print("You won!")
break
if guesses == 0:
print(f"\nYou ran out of guesses... Best luck next time.\nThe number was [{NUMBER}].")
else:
print("Smaller\n" if (guess > NUMBER) else "Bigger\n")
except ValueError:
print("Enter just the number.\n")
if __name__ == "__main__":
main() | 21.827586 | 90 | 0.624013 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 248 | 0.391785 |
573b7032640a85abec559a72d8a9edcb24834621 | 378 | py | Python | Data Structures and Algorithms/HackerRank Algo Solutions/EASY PROBLEMS/Arrays.py | akkik04/Python-DataStructures-and-Algorithms | 8db63173218e5a9205dbb325935c71fec93b695c | [
"MIT"
]
| 1 | 2022-01-22T18:19:07.000Z | 2022-01-22T18:19:07.000Z | Data Structures and Algorithms/HackerRank Algo Solutions/EASY PROBLEMS/Arrays.py | akkik04/Python-DataStructures-and-Algorithms | 8db63173218e5a9205dbb325935c71fec93b695c | [
"MIT"
]
| null | null | null | Data Structures and Algorithms/HackerRank Algo Solutions/EASY PROBLEMS/Arrays.py | akkik04/Python-DataStructures-and-Algorithms | 8db63173218e5a9205dbb325935c71fec93b695c | [
"MIT"
]
| null | null | null | # ARRAYS-DS HACKERANK SOLUTION:
# creating a function to reverse the array.
def reverseArray(arr):
# reversing the array.
reversed = arr[::-1]
# returning the reversed array.
return reversed
# receiving input.
arr_count = int(input().strip())
arr = list(map(int, input().rstrip().split()))
# printing the output.
print(reverseArray(arr)) | 22.235294 | 47 | 0.653439 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 173 | 0.457672 |
573eb0d44cfa9120f4cdede91149047e20c421a4 | 1,456 | py | Python | bmds_server/analysis/admin.py | shapiromatron/bmds-server | 0b2b79b521728582fa66100621e9ea03e251f9f1 | [
"MIT"
]
| 1 | 2019-07-09T16:42:15.000Z | 2019-07-09T16:42:15.000Z | bmds_server/analysis/admin.py | shapiromatron/bmds-server | 0b2b79b521728582fa66100621e9ea03e251f9f1 | [
"MIT"
]
| 103 | 2016-11-14T15:58:53.000Z | 2022-03-07T21:01:03.000Z | bmds_server/analysis/admin.py | shapiromatron/bmds-server | 0b2b79b521728582fa66100621e9ea03e251f9f1 | [
"MIT"
]
| 2 | 2017-03-17T20:43:22.000Z | 2018-01-04T19:15:18.000Z | from django.contrib import admin
from django.contrib.admin import SimpleListFilter
from django.db.models import TextChoices
from django.utils.html import format_html
from . import models
class CustomQuerysetsFilter(SimpleListFilter):
title = "custom"
parameter_name = "custom"
class CustomQuerysetChoices(TextChoices):
MAYBE_HANGING = "hanging"
def lookups(self, request, model_admin):
return self.CustomQuerysetChoices.choices
def queryset(self, request, queryset):
value = self.value()
if value == self.CustomQuerysetChoices.MAYBE_HANGING:
return models.Analysis.maybe_hanging(queryset)
@admin.register(models.Analysis)
class AnalysisAdmin(admin.ModelAdmin):
list_display = ("__str__", "view_url", "edit_url", "created", "is_finished", "deletion_date")
readonly_fields = ("password",)
list_filter = (
CustomQuerysetsFilter,
"started",
"ended",
"deletion_date",
)
def view_url(self, obj):
return format_html(f"<a href='{obj.get_absolute_url()}'>View</a>")
view_url.short_description = "View"
def edit_url(self, obj):
return format_html(f"<a href='{obj.get_edit_url()}'>Edit</a>")
edit_url.short_description = "Edit"
@admin.register(models.Content)
class Content(admin.ModelAdmin):
list_display = ("id", "content_type", "subject", "created", "last_updated")
list_filter = ("content_type",)
| 28.54902 | 97 | 0.691621 | 1,194 | 0.820055 | 0 | 0 | 791 | 0.543269 | 0 | 0 | 296 | 0.203297 |
573ec927838cc2f17f74c48d89acf3a9486bfe1d | 96 | py | Python | venv/lib/python3.8/site-packages/numpy/distutils/fcompiler/pg.py | Retraces/UkraineBot | 3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71 | [
"MIT"
]
| 2 | 2022-03-13T01:58:52.000Z | 2022-03-31T06:07:54.000Z | venv/lib/python3.8/site-packages/numpy/distutils/fcompiler/pg.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
]
| 19 | 2021-11-20T04:09:18.000Z | 2022-03-23T15:05:55.000Z | venv/lib/python3.8/site-packages/numpy/distutils/fcompiler/pg.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
]
| null | null | null | /home/runner/.cache/pip/pool/34/e0/75/b2dceb8ef40c652edb20f4e059370015eddc8cdbde039f92ced519a83d | 96 | 96 | 0.895833 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
5742d249cea7cefa19d4a4ea9010a2450f58aa8b | 552 | py | Python | 03/00/0.py | pylangstudy/201801 | eee9cfd2b370153359183d3c8f7fe117f4392142 | [
"CC0-1.0"
]
| null | null | null | 03/00/0.py | pylangstudy/201801 | eee9cfd2b370153359183d3c8f7fe117f4392142 | [
"CC0-1.0"
]
| null | null | null | 03/00/0.py | pylangstudy/201801 | eee9cfd2b370153359183d3c8f7fe117f4392142 | [
"CC0-1.0"
]
| null | null | null | #https://qiita.com/stkdev/items/a44976fb81ae90a66381
#import imaplib, re, email, six, dateutil.parser
import imaplib, re, email
email_default_encoding = 'iso-2022-jp'
def main():
gmail = imaplib.IMAP4_SSL("imap.gmail.com")
username = 'user'
password = 'pass'
gmail.login(username, password)#imaplib.error: b'[ALERT] Please log in via your web browser: https://support.google.com/mail/accounts/answer/78754 (Failure)'
gmail.select('INBOX') #受信ボックスを指定する
gmail.select('register') #ラベルを指定する
if __name__ == '__main__':
main()
| 32.470588 | 161 | 0.71558 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 353 | 0.598305 |
57452ea96aff7c8f3e31ad97f424bdd254f5bb63 | 5,468 | py | Python | sql/filewalk.py | kylef-lab41/Redwood | c4e1c8284444b91246e52c165ea150eea23b26b9 | [
"Apache-2.0"
]
| null | null | null | sql/filewalk.py | kylef-lab41/Redwood | c4e1c8284444b91246e52c165ea150eea23b26b9 | [
"Apache-2.0"
]
| null | null | null | sql/filewalk.py | kylef-lab41/Redwood | c4e1c8284444b91246e52c165ea150eea23b26b9 | [
"Apache-2.0"
]
| null | null | null | import binascii
import datetime
import hashlib
import mimetypes
import os
import re
import struct
import subprocess
import sys
import time
import urllib
import csv
from Queue import Queue
# 8 byte unique ID generator give a path.
# - first five bytes are first five from sha1 of path name
# - last 3 are the first three from the current time
# Returns a long
def generateUniqueId(path):
m = hashlib.md5()
m.update(path)
first_five = m.digest()[:5]
last_three = struct.pack("I", int(time.time()))[:3]
combined = first_five + last_three
return long(binascii.hexlify(combined), 16)
def write_stat_info(basename, dirname, file_id, parent_id, dirname_digest, csv_writer):
#need to escape commas from base name and dirname since we are creating a csv
path = os.path.join(dirname, basename)
try:
stat_obj = os.stat(path)
except Exception:
# print "Error trying to stat {}".format(path)
return
url = urllib.pathname2url(path)
file_type = mimetypes.guess_type(url)[0]
hash_val = hash_file(path, file_type)
#file_id, parent_id,dirname,basename,hash,fs_id,device,permissions,uid,gid,size,create_time,access_time,mod_time,metadata_change_time,user_flags,links,disk_offset,entropy,file_content_status,extensions,file_type
csv_writer.writerow([file_id, parent_id, dirname, basename, hash_val, dirname_digest, stat_obj.st_ino, stat_obj.st_dev,
str(oct(stat_obj.st_mode)), stat_obj.st_uid, stat_obj.st_gid, stat_obj.st_size, long(os.path.getctime(path)),
long(stat_obj.st_atime), long(stat_obj.st_mtime), long(stat_obj.st_ctime), "", stat_obj.st_nlink, "", "", "",
os.path.splitext(basename)[1], file_type])
BUFFER = 4096
def hash_file(path, file_type):
ret = ""
# some files you can't hash
if(file_type == 'inode/chardevice' \
or file_type == 'inode/symlink' \
or file_type == 'inode/socket' \
or file_type == 'inode/blockdevice' \
or file_type == 'inode/x-empty' \
or file_type == 'application/x-coredump' \
or file_type == 'inode/directory'):
ret = "0"
return ret
fd = None
try:
h = hashlib.sha1()
fd = os.open(path, os.O_RDONLY | os.O_NONBLOCK)
data = os.read(fd, BUFFER)
while(len(data)>0):
h.update(data)
data = os.read(fd, BUFFER)
ret = h.hexdigest()
except Exception, err:
# print "Hash Error: {} on file {} with type {}".format(err, path,
# file_type)
pass
finally:
if(fd != None):
os.close(fd)
return ret
omitted_dirs = ['/dev', '/proc', '/sys', '/Volumes', '/mnt', '/net']
def main(argv):
if(len(argv) != 5):
print "filewalk.py <directory> <os> <source> <output_dir>"
return
#make sure output dir exists
if os.path.exists(argv[4]) is False:
print "Output dir {} does not exist".format(argv[4])
return
today = datetime.date.today()
str_date = today.strftime('%Y-%m-%d')
out_file = os.path.join(argv[4], "{}--{}--{}".format(str_date, argv[2], argv[3]))
start_dir = argv[1]
stack = list()
with open(out_file, "w") as file_handle:
csv_writer = csv.writer(file_handle)
csv_writer.writerow(["file_id","parent_id","dirname","basename","contents_hash", "dirname_hash", "fs_id","device","permissions",
"uid","gid","size","create_time","access_time","mod_time","metadata_change_time",
"user_flags","links","disk_offset","entropy","file_content_status","extensions","file_type"])
# start the queue with a 0 value
stack.append(0L)
for root, dirs, files in os.walk(start_dir):
# We want to have a nice, dynamic output that doesn't flood the
# terminal with lines of text. So we'll write a line, then flush it
# with '\r'. In order to do this properly, we need to first measure
# the width of the terminal.
# We're also going to put it inside the loop in case the window
# gets resized while it's running
rows,columns = os.popen('stty size', 'r').read().split()
rows = int(rows)
columns = int(columns)
parent_id = stack.pop()
#some directories we will ignore as so
if root in omitted_dirs:
del dirs[:]
continue
sys.stdout.write('\r')
sys.stdout.write(' ' * columns)
sys.stdout.write('\r')
sys.stdout.write('processing {}'.format(root[:columns-12]))
sys.stdout.flush()
new_parent_id = generateUniqueId(root)
# for each of the child dirs, add the parent id. This assumes a BFS
# search
for d in dirs:
stack.append(new_parent_id)
h = hashlib.sha1()
h.update(root)
root_digest = h.hexdigest()
# write the parent directory
write_stat_info("/", root, new_parent_id, parent_id, root_digest,csv_writer)
for f in files:
_id = generateUniqueId(os.path.join(root, f))
write_stat_info(f, root, _id, new_parent_id, root_digest, csv_writer)
file_handle.flush()
if __name__=="__main__":
main(sys.argv)
| 32.939759 | 211 | 0.602597 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,726 | 0.315655 |
574587d505f7c19dabd0452d40b6544e75b9a682 | 10,136 | py | Python | processing_scripts/database_update/pokedex_entry.py | CorentG/Pokecube-Issues-and-Wiki | 690af5d8499561f65f761fd49fbf5fc2bc85c4c3 | [
"MIT"
]
| 24 | 2019-02-02T20:37:53.000Z | 2022-02-09T13:51:41.000Z | processing_scripts/database_update/pokedex_entry.py | CorentG/Pokecube-Issues-and-Wiki | 690af5d8499561f65f761fd49fbf5fc2bc85c4c3 | [
"MIT"
]
| 671 | 2018-08-20T08:46:35.000Z | 2022-03-26T00:11:43.000Z | processing_scripts/database_update/pokedex_entry.py | CorentG/Pokecube-Issues-and-Wiki | 690af5d8499561f65f761fd49fbf5fc2bc85c4c3 | [
"MIT"
]
| 68 | 2018-09-25T21:03:40.000Z | 2022-02-25T19:59:51.000Z | import csv_loader
import moves_names
def getSingle(name, data, file, value):
return data.get_info(name,value, expected_file=file, use_names_map=True)[file][0]
def getExpYield(name, data):
return int(getSingle(name, data,"pokemon" ,"base_experience"))
def getHeight(name, data):
return int(getSingle(name, data,"pokemon" ,"height")) / 10.0
def getWeight(name, data):
return int(getSingle(name, data,"pokemon" ,"weight")) / 10.0
def getGenderRatio(name, data):
rates = {}
val = int(getSingle(name, data,"pokemon_species" ,"gender_rate"))
rates[-1] = 255
rates[0] = 0
rates[1] = 30
rates[2] = 62
rates[4] = 127
rates[6] = 191
rates[7] = 225
rates[8] = 254
return rates[val]
def getCaptureRate(name, data):
return int(getSingle(name, data,"pokemon_species" ,"capture_rate"))
def getBaseFriendship(name, data):
return int(getSingle(name, data,"pokemon_species" ,"base_happiness"))
def getExpMode(name, data):
stats = data.get_info(name,"growth_rate_id", expected_file="pokemon_species", use_names_map=True)["pokemon_species"]
_id = stats[0]
stats = data.get_entry(_id, expected_file="growth_rate_prose", use_names_map=True)["growth_rate_prose"]
for row in stats:
_id = row[1]
if _id == '9':
return row[2]
return None
def getLevelMoves(name, data):
moves = {}
names = []
try:
moves_entries = data.get_entry(name, expected_file="pokemon_moves", use_names_map=True)["pokemon_moves"]
version = 1
# First, locate the most recent version that has lvl up moves
for entry in moves_entries:
if entry[4] == "0":
continue
vers = int(entry[1])
if vers > version:
version = vers
version = str(version)
# Now we can actually parse the moves
for entry in moves_entries:
# TODO figure out if a move is an evolution move, is that info here?
if entry[4] == "0":
continue
if entry[1] != version:
continue
level = entry[4]
move_id = entry[2]
move = data.get_info(move_id,"identifier", expected_file="moves")["moves"][0]
move_, conf = csv_loader.match_name(move, moves_names.moves)
if(conf < 80):
print("{} -> {} ({})".format(move, move_, conf))
else:
move = move_
if level in moves.keys():
moves[level] = moves[level]+","+move
else:
moves[level] = move
if not move in names:
names.append(move)
except:
# print("No moves found for {}".format(name))
pass
return moves, names
def getAllMoves(name, data, exclude=[]):
names = []
try:
moves_entries = data.get_entry(name, expected_file="pokemon_moves", use_names_map=True)["pokemon_moves"]
for entry in moves_entries:
move_id = entry[2]
move = data.get_info(move_id,"identifier", expected_file="moves")["moves"][0]
move_, conf = csv_loader.match_name(move, moves_names.moves)
if(conf < 80):
print("{} -> {} ({})??".format(move, move_, conf))
else:
move = move_
if move in exclude or move in names:
continue
names.append(move)
except:
# print("No moves found for {}".format(name))
pass
return names
def getTypes(name, data):
types_nums = data.get_info(name,"type_id", expected_file="pokemon_types", use_names_map=True)["pokemon_types"]
types = []
for num in types_nums:
names = data.get_info(num,"identifier", expected_file="types")["types"]
types.append(names[0])
return types
def getStats(name, data):
stats = []
# TODO maybe validate that these are in the correct order, the index is also stored
# in the csv file, so that validation can be done if needed!
stats = data.get_info(name,"base_stat", expected_file="pokemon_stats", use_names_map=True)["pokemon_stats"]
return stats
def getEVs(name, data):
stats = []
# TODO maybe validate that these are in the correct order, the index is also stored
# in the csv file, so that validation can be done if needed!
stats = data.get_info(name,"effort", expected_file="pokemon_stats", use_names_map=True)["pokemon_stats"]
return stats
def getAbilities(name, data):
hidden = []
abilities = ["",""]
rows = data.get_entry(name, expected_file="pokemon_abilities", use_names_map=True)["pokemon_abilities"]
for row in rows:
ability_id = row[1]
isHidden = row[2]
slot = int(row[3]) - 1
ability_name = data.get_info(ability_id,"identifier", expected_file="abilities")["abilities"][0]
if ability_name == '':
continue
if isHidden == "1":
hidden.append(ability_name)
elif slot < len(abilities):
abilities[slot] = ability_name
return abilities, hidden
def sorter(e):
return int(e)
class Pokedex(object):
def __init__(self, names, data, originals, do_moves, do_stats):
self.pokemon = []
for name in names:
defaults = None
if name in originals:
defaults = originals[name]
try:
entry = PokedexEntry(name, data, defaults, do_moves, do_stats)
if do_moves and not "moves" in entry.map:
continue
self.pokemon.append(entry.map)
except Exception as err:
print("Error with {} {}, Using default if present? {}".format(name, err, defaults is not None))
if defaults is not None and do_stats:
self.pokemon.append(defaults)
class PokedexEntry(object):
def __init__(self, name, data, defaults, do_moves, do_stats):
_map = self.map = {}
_map["name"] = name
if do_stats:
_map["number"] = int(getSingle(name, data, "pokemon", "species_id"))
id = int(getSingle(name, data, "pokemon", "id"))
is_default = id == _map["number"]
if(is_default):
_map["base"] = True
_map["stats"] = {}
statsOrder = ["hp", "atk", "def", "spatk", "spdef", "spd"]
# Do the base stats
stats = getStats(name, data)
_map["stats"]["stats"] = {}
values = _map["stats"]["stats"]["values"] = {}
for i in range(len(statsOrder)):
values[statsOrder[i]] = stats[i]
if defaults is not None:
_map["stats"]["sizes"] = defaults["stats"]["sizes"]
else:
print("Cannot copy sizes for {}".format(name))
# Do the evs
stats = getEVs(name, data)
_map["stats"]["evs"] = {}
values = _map["stats"]["evs"]["values"] = {}
for i in range(len(statsOrder)):
if stats[i] == "0":
continue
values[statsOrder[i]] = stats[i]
# Get the types
types = getTypes(name,data)
_map["stats"]["types"] = {}
values = _map["stats"]["types"]["values"] = {}
for i in range(len(types)):
ident = "type{}".format(i+1)
values[ident] = types[i]
# Get Abilities
abilities, hidden = getAbilities(name, data)
_map["stats"]["abilities"] = {}
values = _map["stats"]["abilities"]["values"] = {}
if len(abilities) > 0:
normals = abilities[0]
if len(abilities) > 1:
for i in range(1, len(abilities)):
if abilities[i] != "":
normals = normals +", "+abilities[i]
values["normal"] = normals
if len(hidden) > 0:
hiddens = hidden[0]
if len(hidden) > 1:
for i in range(1, len(hidden)):
if hidden[i] != "":
hiddens = hiddens +", "+hidden[i]
values["hidden"] = hiddens
# Get the simple values
_map["stats"]["mass"] = getWeight(name, data)
_map["stats"]["baseExp"] = getExpYield(name, data)
# This set is not defined for all targets, so try/except them
try:
_map["stats"]["captureRate"] = getCaptureRate(name, data)
except:
pass
try:
_map["stats"]["baseFriendship"] = getBaseFriendship(name, data)
except:
pass
try:
_map["stats"]["genderRatio"] = getGenderRatio(name, data)
except:
pass
try:
_map["stats"]["expMode"] = getExpMode(name, data)
except:
pass
if do_moves:
# Do the moves
# First lvl up moves
moves, names = getLevelMoves(name, data)
moves_list = getAllMoves(name, data, exclude=names)
if len(moves) != 0 or len(moves_list) != 0:
_map["moves"] = {}
elif defaults is not None:
_map["moves"] = defaults["moves"]
print("Not Updating moves for {}".format(name))
if len(moves) > 0:
lvlMoves = _map["moves"]["lvlupMoves"] = {}
levels = [x for x in moves.keys()]
levels.sort(key=sorter)
for level in levels:
lvlMoves[level] = moves[level]
# Then remainder
moves = ""
if len(moves_list)>0:
moves = moves_list[0]
for i in range(1, len(moves_list)):
moves = moves +", "+moves_list[i]
misc = _map["moves"]["misc"] = {}
misc["moves"] = moves
| 37.128205 | 120 | 0.530979 | 4,976 | 0.490923 | 0 | 0 | 0 | 0 | 0 | 0 | 1,959 | 0.193272 |
5746c4fc2776ee414b40d5372100f22e8a3258f4 | 25,539 | py | Python | tests/test_add.py | open-contracting/kingfisher-views | 7887610a144493f2ccd0d9a22cf43157dc180479 | [
"BSD-3-Clause"
]
| 2 | 2019-02-19T16:15:19.000Z | 2020-07-25T04:05:45.000Z | tests/test_add.py | open-contracting/kingfisher-views | 7887610a144493f2ccd0d9a22cf43157dc180479 | [
"BSD-3-Clause"
]
| 142 | 2019-03-11T15:14:22.000Z | 2020-11-11T19:26:09.000Z | tests/test_add.py | open-contracting/kingfisher-views | 7887610a144493f2ccd0d9a22cf43157dc180479 | [
"BSD-3-Clause"
]
| 5 | 2019-04-11T14:11:10.000Z | 2020-07-30T22:45:59.000Z | import datetime
import decimal
from unittest.mock import patch
import pytest
from click.testing import CliRunner
from psycopg2 import sql
from manage import SUMMARIES, cli, construct_where_fragment
from tests import assert_bad_argument, assert_log_records, assert_log_running, fixture, noop
command = 'add'
TABLES = {
'note',
}
SUMMARY_TABLES = set()
SUMMARY_VIEWS = set()
FIELD_LIST_TABLES = set()
NO_FIELD_LIST_TABLES = set()
NO_FIELD_LIST_VIEWS = set()
for table_name, table in SUMMARIES.items():
FIELD_LIST_TABLES.add(f'{table_name}_field_list')
if table.is_table:
SUMMARY_TABLES.add(table_name)
NO_FIELD_LIST_TABLES.add(f'{table_name}_no_field_list')
else:
SUMMARY_VIEWS.add(table_name)
NO_FIELD_LIST_VIEWS.add(f'{table_name}_no_field_list')
TABLES.add(f'{table_name}_no_data')
def test_construct_where_fragment(db):
assert construct_where_fragment(db.cursor, 'a', 'z') == " AND d.data->>'a' = 'z'"
assert construct_where_fragment(db.cursor, 'a.b', 'z') == " AND d.data->'a'->>'b' = 'z'"
assert construct_where_fragment(db.cursor, 'a.b.c', 'z') == " AND d.data->'a'->'b'->>'c' = 'z'"
assert construct_where_fragment(db.cursor, 'a.b.c.d', 'z') == " AND d.data->'a'->'b'->'c'->>'d' = 'z'"
assert construct_where_fragment(db.cursor, 'a.b.c', '') == " AND d.data->'a'->'b'->>'c' = ''"
assert construct_where_fragment(db.cursor, '', 'z') == " AND d.data->>'' = 'z'"
@pytest.mark.parametrize('collections, message', [
('a', 'Collection IDs must be integers'),
('1,10,100', 'Collection IDs {10, 100} not found'),
])
def test_validate_collections(collections, message, caplog):
runner = CliRunner()
result = runner.invoke(cli, [command, collections])
assert result.exit_code == 2
assert_bad_argument(result, 'COLLECTIONS', message)
assert_log_running(caplog, command)
def test_validate_name(caplog):
runner = CliRunner()
result = runner.invoke(cli, [command, '1', '--name', 'camelCase'])
assert result.exit_code == 2
assert_bad_argument(result, '--name', 'value must be lowercase')
assert_log_running(caplog, command)
@patch('manage.summary_tables', noop)
@patch('manage.field_counts', noop)
@patch('manage.field_lists', noop)
@pytest.mark.parametrize('kwargs, name, collections', [
({}, 'collection_1', (1,)),
({'collections': '1,2'}, 'collection_1_2', (1, 2)),
({'name': 'custom'}, 'custom', (1,)),
])
def test_command_name(kwargs, name, collections, db, caplog):
schema = f'view_data_{name}'
identifier = sql.Identifier(schema)
with fixture(db, **kwargs) as result:
assert db.schema_exists(schema)
assert db.all('SELECT collection_id, schema FROM summaries.selected_collections WHERE schema=%(schema)s',
{'schema': schema}) == [(collection, schema,) for collection in collections]
assert db.all(sql.SQL('SELECT id, note FROM {schema}.note').format(schema=identifier)) == [
(1, 'Default'),
]
assert result.exit_code == 0
assert result.output == ''
assert_log_records(caplog, command, [
f'Arguments: collections={collections!r} note=Default name={kwargs.get("name")} tables_only=False '
'filters=()',
f'Added {name}',
'Running summary-tables routine',
'Running field-counts routine',
'Running field-lists routine',
])
@pytest.mark.parametrize('filters', [(), (('ocid', 'dolore'),)])
@pytest.mark.parametrize('tables_only, field_counts, field_lists, tables, views', [
(False, True, False,
TABLES | SUMMARY_TABLES, SUMMARY_VIEWS),
(True, True, False,
TABLES | SUMMARY_TABLES | SUMMARY_VIEWS, set()),
(False, False, True,
TABLES | FIELD_LIST_TABLES | NO_FIELD_LIST_TABLES, SUMMARY_TABLES | SUMMARY_VIEWS | NO_FIELD_LIST_VIEWS),
(True, False, True,
TABLES | FIELD_LIST_TABLES | NO_FIELD_LIST_TABLES | SUMMARY_TABLES | SUMMARY_VIEWS | NO_FIELD_LIST_VIEWS, set()),
])
def test_command(db, tables_only, field_counts, field_lists, tables, views, filters, caplog):
# Load collection 2 first, to check that existing collections aren't included when we load collection 1.
with fixture(db, collections='2', tables_only=tables_only, field_counts=field_counts, field_lists=field_lists,
filters=filters), fixture(db, tables_only=tables_only, field_counts=field_counts,
field_lists=field_lists, filters=filters) as result:
# Check existence of schema, tables and views.
if field_counts:
tables.add('field_counts')
assert db.schema_exists('view_data_collection_1')
assert db.schema_exists('view_data_collection_2')
assert set(db.pluck("SELECT table_name FROM information_schema.tables WHERE table_schema = %(schema)s "
"AND table_type = 'BASE TABLE'", {'schema': 'view_data_collection_1'})) == tables
assert set(db.pluck("SELECT table_name FROM information_schema.tables WHERE table_schema = %(schema)s "
"AND table_type = 'VIEW'", {'schema': 'view_data_collection_1'})) == views
# Check contents of summary relations.
rows = db.all("""
SELECT
award_index,
release_type,
collection_id,
ocid,
release_id,
award_id,
title,
status,
description,
value_amount,
value_currency,
date,
contractperiod_startdate,
contractperiod_enddate,
contractperiod_maxextentdate,
contractperiod_durationindays,
total_suppliers,
total_documents,
document_documenttype_counts,
total_items
FROM view_data_collection_1.awards_summary
ORDER BY id, award_index
""")
assert rows[0] == (
0, # award_index
'release', # release_type
1, # collection_id
'dolore', # ocid
'ex laborumsit autein magna veniam', # release_id
'reprehenderit magna cillum eu nisi', # award_id
'laborum aute nisi eiusmod', # award_title
'pending', # award_status
'ullamco in voluptate', # award_description
decimal.Decimal('-95099396'), # award_value_amount
'AMD', # award_value_currency
datetime.datetime(3263, 12, 5, 21, 24, 19, 161000), # award_date
datetime.datetime(4097, 9, 16, 5, 55, 19, 125000), # award_contractperiod_startdate
datetime.datetime(4591, 4, 29, 6, 34, 28, 472000), # award_contractperiod_enddate
datetime.datetime(3714, 8, 9, 7, 21, 37, 544000), # award_contractperiod_maxextentdate
decimal.Decimal('72802012'), # award_contractperiod_durationindays
2, # total_suppliers
4, # total_documents
{
'Excepteur nisi et': 1,
'proident exercitation in': 1,
'ut magna dolore velit aute': 1,
'veniam enim aliqua d': 1,
}, # document_documenttype_counts
5, # total_items
)
if filters:
assert len(rows) == 4
else:
assert len(rows) == 301
rows = db.all("""
SELECT
party_index,
release_type,
collection_id,
ocid,
release_id,
party_id,
roles,
identifier,
unique_identifier_attempt,
additionalidentifiers_ids,
total_additionalidentifiers
FROM view_data_collection_1.parties_summary
ORDER BY id, party_index
""")
assert rows[0] == (
0, # party_index
'release', # release_type
1, # collection_id
'dolore', # ocid
'ex laborumsit autein magna veniam', # release_id
'voluptate officia tempor dolor', # party_id
[
'ex ',
'in est exercitation nulla Excepteur',
'ipsum do',
], # roles
'ad proident dolor reprehenderit veniam-in quis exercitation reprehenderit', # identifier
'voluptate officia tempor dolor', # unique_identifier_attempt
[
'exercitation proident voluptate-sed culpa eamollit consectetur dolor l',
'magna-dolor ut indolorein in tempor magna mollit',
'ad occaecat amet anim-laboris ea Duisdeserunt quis sed pariatur mollit',
'elit mollit-officia proidentmagna',
'ex-minim Ut consectetur',
], # additionalidentifiers_ids
5, # total_additionalidentifiers
)
if filters:
assert len(rows) == 4
else:
assert len(rows) == 296
if field_counts:
# Check contents of field_counts table.
rows = db.all('SELECT * FROM view_data_collection_1.field_counts')
if filters:
assert len(rows) == 1046
assert rows[0] == (1, 'release', 'awards', 1, 4, 1)
else:
assert len(rows) == 65235
assert rows[0] == (1, 'release', 'awards', 100, 301, 100)
if field_lists:
# Check the count of keys in the field_list field for the lowest primary keys in each summary relation.
statement = """
SELECT
count(*)
FROM
(SELECT
jsonb_each(field_list)
FROM (
SELECT
field_list
FROM
view_data_collection_1.{table}
ORDER BY
{primary_keys}
LIMIT 1) AS field_list
) AS each
"""
expected = {
'award_documents_summary': 11,
'award_items_summary': 26,
'award_suppliers_summary': 28,
'awards_summary': 469,
'buyer_summary': 28,
'contract_documents_summary': 11,
'contract_implementation_documents_summary': 11,
'contract_implementation_milestones_summary': 29,
'contract_implementation_transactions_summary': 83,
'contract_items_summary': 26,
'contract_milestones_summary': 27,
'contracts_summary': 469,
'parties_summary': 34,
'planning_documents_summary': 11,
'planning_milestones_summary': 29,
'planning_summary': 61,
'procuringentity_summary': 32,
'relatedprocesses_summary': 6,
'release_summary': 1046,
'tender_documents_summary': 15,
'tender_items_summary': 25,
'tender_milestones_summary': 23,
'tender_summary': 228,
'tenderers_summary': 31,
}
for table_name, table in SUMMARIES.items():
count = db.one(db.format(statement, table=table_name, primary_keys=table.primary_keys))[0]
assert count == expected[table_name], f'{table_name}: {count} != {expected[table_name]}'
def result_dict(statement):
result = db.one(statement)
return {column.name: result for column, result in zip(db.cursor.description, result)}
statement = """
SELECT
count(*) total,
sum(coalesce((field_list ->> 'contracts')::int, 0)) contracts,
sum(coalesce((field_list ->> 'awards')::int, 0)) awards,
sum(coalesce((field_list ->> 'awards/id')::int, 0)) awards_id,
sum(coalesce((field_list ->> 'awards/value/amount')::int, 0)) awards_amount
FROM
view_data_collection_1.contracts_summary
"""
if filters:
assert result_dict(statement) == {
'awards': 1,
'awards_amount': 1,
'awards_id': 1,
'contracts': 0,
'total': 1,
}
else:
assert result_dict(statement) == {
'awards': 213,
'awards_amount': 213,
'awards_id': 213,
'contracts': 0,
'total': 285,
}
statement = """
SELECT
count(*) total,
sum(coalesce((field_list ->> 'awards')::int, 0)) awards,
sum(coalesce((field_list ->> 'contracts')::int, 0)) contracts,
sum(coalesce((field_list ->> 'contracts/id')::int, 0)) contracts_id,
sum(coalesce((field_list ->> 'contracts/value/amount')::int, 0)) contracts_amount
FROM
view_data_collection_1.awards_summary
"""
if filters:
assert result_dict(statement) == {
'contracts': 1,
'contracts_amount': 1,
'contracts_id': 1,
'awards': 0,
'total': 4,
}
else:
assert result_dict(statement) == {
'contracts': 213,
'contracts_amount': 213,
'contracts_id': 213,
'awards': 0,
'total': 301,
}
# All columns have comments.
assert not db.all("""
SELECT
isc.table_name,
isc.column_name,
isc.data_type
FROM
information_schema.columns isc
WHERE
isc.table_schema = %(schema)s
AND LOWER(isc.table_name) NOT IN ('selected_collections', 'note')
AND LOWER(isc.table_name) NOT LIKE '%%_no_data'
AND LOWER(isc.table_name) NOT LIKE '%%_field_list'
AND pg_catalog.col_description(format('%%s.%%s',isc.table_schema,isc.table_name)::regclass::oid,
isc.ordinal_position) IS NULL
""", {'schema': 'view_data_collection_1'})
expected = []
for collection_id in [2, 1]:
expected.extend([
f'Arguments: collections=({collection_id},) note=Default name=None tables_only={tables_only!r} '
f'filters={filters!r}',
f'Added collection_{collection_id}',
'Running summary-tables routine',
])
if field_counts:
expected.append('Running field-counts routine')
if field_lists:
expected.append('Running field-lists routine')
assert result.exit_code == 0
assert result.output == ''
assert_log_records(caplog, command, expected)
@pytest.mark.parametrize('filters', [
(('tender.procurementMethod', 'direct'),),
(('tender.procurementMethod', 'direct'), ('tender.status', 'planned'),),
])
@pytest.mark.parametrize('tables_only, field_counts, field_lists, tables, views', [
(False, True, False,
TABLES | SUMMARY_TABLES, SUMMARY_VIEWS),
(True, True, False,
TABLES | SUMMARY_TABLES | SUMMARY_VIEWS, set()),
(False, False, True,
TABLES | FIELD_LIST_TABLES | NO_FIELD_LIST_TABLES, SUMMARY_TABLES | SUMMARY_VIEWS | NO_FIELD_LIST_VIEWS),
(True, False, True,
TABLES | FIELD_LIST_TABLES | NO_FIELD_LIST_TABLES | SUMMARY_TABLES | SUMMARY_VIEWS | NO_FIELD_LIST_VIEWS, set()),
])
def test_command_filter(db, tables_only, field_counts, field_lists, tables, views, filters, caplog):
# Load collection 2 first, to check that existing collections aren't included when we load collection 1.
with fixture(db, collections='2', tables_only=tables_only, field_counts=field_counts, field_lists=field_lists,
filters=filters), fixture(db, tables_only=tables_only, field_counts=field_counts,
field_lists=field_lists, filters=filters) as result:
# Check existence of schema, tables and views.
if field_counts:
tables.add('field_counts')
assert db.schema_exists('view_data_collection_1')
assert db.schema_exists('view_data_collection_2')
assert set(db.pluck("SELECT table_name FROM information_schema.tables WHERE table_schema = %(schema)s "
"AND table_type = 'BASE TABLE'", {'schema': 'view_data_collection_1'})) == tables
assert set(db.pluck("SELECT table_name FROM information_schema.tables WHERE table_schema = %(schema)s "
"AND table_type = 'VIEW'", {'schema': 'view_data_collection_1'})) == views
# Check that the tender_summary table only has correctly filtered items
rows = db.all("""
SELECT
procurementmethod
FROM view_data_collection_1.tender_summary
""")
for row in rows:
assert row[0] == 'direct'
if len(filters) > 1:
assert len(rows) == 2
else:
assert len(rows) == 19
# Check data_id's in the summary against the data table
# This allows us to check that missing data doesn't have the filtered value
rows = db.all("""
SELECT
data_id
FROM view_data_collection_1.release_summary
""")
if len(filters) > 1:
assert len(rows) == 2
else:
assert len(rows) == 19
data_ids = [row[0] for row in rows]
rows = db.all("""
SELECT
data.id,
data.data->'tender'->'procurementMethod',
data.data->'tender'->'status'
FROM data
JOIN release ON release.data_id=data.id
WHERE release.collection_id=1
""")
for row in rows:
if row[1] == 'direct' and (len(filters) == 1 or row[2] == 'planned'):
assert row[0] in data_ids
else:
assert row[0] not in data_ids
# Check contents of summary relations.
rows = db.all("""
SELECT
award_index,
release_type,
collection_id,
ocid,
release_id,
award_id,
title,
status,
description,
value_amount,
value_currency,
date,
contractperiod_startdate,
contractperiod_enddate,
contractperiod_maxextentdate,
contractperiod_durationindays,
total_suppliers,
total_documents,
document_documenttype_counts,
total_items
FROM view_data_collection_1.awards_summary
ORDER BY id, award_index
""")
assert rows[0] == (
0, # award_index
'release', # release_type
1, # collection_id
'officia dolore non', # ocid
'laborum irure consectetur fugiat', # release_id
'dolorLorem fugiat ut', # award_id
'et', # award_title
'pending', # award_status
'adipisicing ame', # award_description
decimal.Decimal('-7139109'), # award_value_amount
'AUD', # award_value_currency
datetime.datetime(3672, 10, 26, 4, 38, 28, 786000), # award_date
datetime.datetime(2192, 8, 27, 0, 9, 1, 626000), # award_contractperiod_startdate
datetime.datetime(4204, 1, 22, 22, 4, 18, 268000), # award_contractperiod_enddate
datetime.datetime(5117, 12, 26, 11, 33, 27, 496000), # award_contractperiod_maxextentdate
decimal.Decimal('-30383739'), # award_contractperiod_durationindays
5, # total_suppliers
4, # total_documents
{
'in sint enim labore': 1,
'mollit labore Lorem': 1,
'minim incididunt sed ipsum': 1,
'ad reprehenderit sit dolor enim': 1
}, # document_documenttype_counts
5, # total_items
)
if len(filters) > 1:
assert len(rows) == 7
else:
assert len(rows) == 55
rows = db.all("""
SELECT
party_index,
release_type,
collection_id,
ocid,
release_id,
party_id,
roles,
identifier,
unique_identifier_attempt,
additionalidentifiers_ids,
total_additionalidentifiers
FROM view_data_collection_1.parties_summary
ORDER BY id, party_index
""")
assert rows[0] == (
0, # party_index
'release', # release_type
1, # collection_id
'officia dolore non', # ocid
'laborum irure consectetur fugiat', # release_id
'eu voluptateeiusmod ipsum ea', # party_id
[
'laborum',
'tempor',
], # roles
'cupidatat consequat in ullamco-in incididunt commodo elit', # identifier
'eu voluptateeiusmod ipsum ea', # unique_identifier_attempt
[
'non ei-commododolor laborum',
], # additionalidentifiers_ids
1, # total_additionalidentifiers
)
if len(filters) > 1:
assert len(rows) == 5
else:
assert len(rows) == 56
if field_counts:
# Check contents of field_counts table.
rows = db.all('SELECT * FROM view_data_collection_1.field_counts')
if len(filters) > 1:
assert len(rows) == 1515
assert rows[0] == (1, 'release', 'awards', 2, 7, 2)
else:
assert len(rows) == 13077
assert rows[0] == (1, 'release', 'awards', 19, 55, 19)
if field_lists:
# Check the count of keys in the field_list field for the lowest primary keys in each summary relation.
statement = """
SELECT
count(*)
FROM
(SELECT
jsonb_each(field_list)
FROM (
SELECT
field_list
FROM
view_data_collection_1.{table}
ORDER BY
{primary_keys}
LIMIT 1) AS field_list
) AS each
"""
expected = {
'award_documents_summary': 11,
'award_items_summary': 29,
'award_suppliers_summary': 30,
'awards_summary': 492,
'buyer_summary': 31,
'contract_documents_summary': 11,
'contract_implementation_documents_summary': 11,
'contract_implementation_milestones_summary': 23,
'contract_implementation_transactions_summary': 83,
'contract_items_summary': 26,
'contract_milestones_summary': 26,
'contracts_summary': 492,
'parties_summary': 30,
'planning_documents_summary': 11,
'planning_milestones_summary': 27,
'planning_summary': 99,
'procuringentity_summary': 30,
'relatedprocesses_summary': 6,
'release_summary': 987,
'tender_documents_summary': 13,
'tender_items_summary': 28,
'tender_milestones_summary': 27,
'tender_summary': 265,
'tenderers_summary': 32,
}
for table_name, table in SUMMARIES.items():
count = db.one(db.format(statement, table=table_name, primary_keys=table.primary_keys))[0]
assert count == expected[table_name], f'{table_name}: {count} != {expected[table_name]}'
expected = []
for collection_id in [2, 1]:
expected.extend([
f'Arguments: collections=({collection_id},) note=Default name=None tables_only={tables_only!r} '
f'filters={filters!r}',
f'Added collection_{collection_id}',
'Running summary-tables routine',
])
if field_counts:
expected.append('Running field-counts routine')
if field_lists:
expected.append('Running field-lists routine')
assert result.exit_code == 0
assert result.output == ''
assert_log_records(caplog, command, expected)
| 40.092622 | 118 | 0.539958 | 0 | 0 | 0 | 0 | 23,799 | 0.931869 | 0 | 0 | 13,163 | 0.515408 |
57476587984e17ece720d64d289aa21890dba64a | 3,520 | py | Python | ReportGenerator.py | taarruunnnn/VAPT-Report-Generator-Vulnerability | 8d618c7ddac4f6fe0cedd9fa39ff61805e06fa38 | [
"MIT"
]
| 1 | 2020-11-30T18:09:40.000Z | 2020-11-30T18:09:40.000Z | ReportGenerator.py | taarruunnnn/VAPT-Report-Generator-Vulnerability | 8d618c7ddac4f6fe0cedd9fa39ff61805e06fa38 | [
"MIT"
]
| null | null | null | ReportGenerator.py | taarruunnnn/VAPT-Report-Generator-Vulnerability | 8d618c7ddac4f6fe0cedd9fa39ff61805e06fa38 | [
"MIT"
]
| 1 | 2020-09-16T20:51:18.000Z | 2020-09-16T20:51:18.000Z | import os
from docx import Document
from docx.shared import Inches
from docx import section
from docx.enum.text import WD_ALIGN_PARAGRAPH
from docx.shared import Pt
from docx.shared import Cm
from docx.shared import RGBColor
import docx
class Print_document():
def start_doc(self):
self.document = Document()
def reinitialize_doc(self):
self.document = Document('Temp.docx')
def initialize_doc(self):
sections = self.document.sections
for section in sections:
section.top_margin = Cm(2.54)
section.bottom_margin = Cm(2.54)
section.left_margin = Cm(2.54)
section.right_margin = Cm(2.54)
style = self.document.styles['Normal']
font = style.font
font.name = 'Times New Roman'
font.size = Pt(14)
style = self.document.styles['Heading 2']
font1 = style.font
font1.name = 'TimesNewRoman'
font1.size = Pt(16)
header = self.document.sections[0].header
ht0=header.add_paragraph()
kh=ht0.add_run()
kh.add_picture('Pristine.png', width=Inches(2))
kh.alignment = WD_ALIGN_PARAGRAPH.LEFT
footer = self.document.sections[0].footer
f = footer.add_paragraph('All Rights Reserved by Pristine InfoSolutions Pvt. Ltd.')
f.alignment = WD_ALIGN_PARAGRAPH.CENTER
f.style = self.document.styles['Normal']
f.bold = True
f.size = Pt(16)
def setVname(self,Vname):
self.document.add_heading('Vulnerability Name:', 2)
p = self.document.add_paragraph(Vname)
p.style = self.document.styles['Normal']
def setTitle(self):
self.documeny.add_paragraph("Network")
def setVSeverity(self,severity):
p = self.document.add_heading('Severity', 2)
p.style = self.document.styles['Heading 2']
p.bold = True
p.size = Pt(16)
p.name = 'TimesNewRoman'
p = self.document.add_paragraph(severity)
p.style = self.document.styles['Normal']
def SetVdesc(self,VDesc):
vuldesh = self.document.add_heading('Vulnerability Description:', 2)
p = self.document.add_paragraph(VDesc)
def setVurl(self,Vurl):
self.document.add_heading('Vulnerable URL: ', 2)
p = self.document.add_paragraph(Vurl)
p.style = self.document.styles['Normal']
def setImg(self,Img):
self.document.add_heading('Proof of Concept: ',2)
if (Img):
lengthImg = len(Img[0])
for i in range (0,lengthImg):
self.document.add_picture(Img[0][i], width=Cm(15.95))
def setImpact(self,VImpact):
self.document.add_heading('Impact: ',2)
p = self.document.add_paragraph(VImpact)
p.style = self.document.styles['Normal']
def setVremed(self,Vrem):
self.document.add_heading('Remediation', 2 )
p = self.document.add_paragraph(Vrem)
p.style = self.document.styles['Normal']
def setConclusion(self,Conclusion):
self.document.add_heading('Conclusion', 2 )
p = self.document.add_paragraph(Conclusion)
p.style = self.document.styles['Normal']
def pageBreak(self):
self.document.add_page_break()
def Savedoc(self,name):
self.document.save(name[0] + '.docx')
def Savereport(self):
self.document.save('Temp.docx') | 31.711712 | 92 | 0.609091 | 3,272 | 0.929545 | 0 | 0 | 0 | 0 | 0 | 0 | 374 | 0.10625 |
5750825ae1de9236544f8dff0657979e541dfed6 | 764 | py | Python | Season 06 - Files in Python/Episode 02 - Copying Files.py/Episode 02 - Copying Files.py | Pythobit/Python-tutorial | b0743eaa9c237c3578131ead1b3f2c295f11b7ee | [
"MIT"
]
| 3 | 2021-02-19T18:33:00.000Z | 2021-08-03T14:56:50.000Z | Season 06 - Files in Python/Episode 02 - Copying Files.py/Episode 02 - Copying Files.py | barawalojas/Python-tutorial | 3f4b2b073e421888b3d62ff634658317d9abcb9b | [
"MIT"
]
| 1 | 2021-07-10T14:37:57.000Z | 2021-07-20T09:51:39.000Z | Season 06 - Files in Python/Episode 02 - Copying Files.py/Episode 02 - Copying Files.py | barawalojas/Python-tutorial | 3f4b2b073e421888b3d62ff634658317d9abcb9b | [
"MIT"
]
| 1 | 2021-08-02T05:39:38.000Z | 2021-08-02T05:39:38.000Z | # Copying files
# Ask user for a list of 3 friends.
# for each friend, we'll tell user whether they're nearby.
# for each nearby friend, we'll save their name to `nearby_friends.txt`.
friends = input('Enter three friends name(separated by commas): ').split(',')
people = open('people.txt', 'r')
people_nearby = [line.strip() for line in people.readlines()]
people.close()
# Making set of friends and peoples
friends_set = set(friends)
people_nearby_set = set(people_nearby)
friends_nearby_set = friends_set.intersection(people_nearby_set)
nearby_friends_file = open('nearby_friends.txt', 'w')
for friend in friends_nearby_set:
print(f'{friend} is nearby.! Meet up with them.')
nearby_friends_file.write(f'{friend}\n')
nearby_friends_file.close()
| 27.285714 | 77 | 0.743455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 360 | 0.471204 |
5750d5afb4b68c06b08670b53610fc887297a148 | 722 | py | Python | beginner_contest/167/C.py | FGtatsuro/myatcoder | 25a3123be6a6311e7d1c25394987de3e35575ff4 | [
"MIT"
]
| null | null | null | beginner_contest/167/C.py | FGtatsuro/myatcoder | 25a3123be6a6311e7d1c25394987de3e35575ff4 | [
"MIT"
]
| null | null | null | beginner_contest/167/C.py | FGtatsuro/myatcoder | 25a3123be6a6311e7d1c25394987de3e35575ff4 | [
"MIT"
]
| null | null | null | import sys
input = sys.stdin.readline
sys.setrecursionlimit(10 ** 7)
n, m, x = map(int, input().split())
ca = [0] * n
ca_sum = [0] * (m+1)
for i in range(n):
ca[i] = list(map(int, input().split()))
for j in range(m+1):
ca_sum[j] += ca[i][j]
ans = 10 ** 10
for i in range(2 ** n):
tmp = 0
tmp_ca_sum = ca_sum.copy()
for j, v in enumerate(format(i, r'0{}b'.format(n))):
if v == '0':
continue
for k in range(m+1):
tmp_ca_sum[k] -= ca[j][k]
flag = True
for v2 in tmp_ca_sum[1:]:
if v2 < x:
flag = False
break
if flag:
ans = min(ans, tmp_ca_sum[0])
if ans == 10 ** 10:
print(-1)
else:
print(ans)
| 21.235294 | 56 | 0.49723 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.01385 |
5752dc5277d06864407fc67287bd73391b57e2b0 | 923 | py | Python | src/ProjectHeart/forms.py | LokotamaTheMastermind/secret-password-saver | e97f139b2cad9e1b0e9283079252d9a76764e3c1 | [
"Unlicense"
]
| null | null | null | src/ProjectHeart/forms.py | LokotamaTheMastermind/secret-password-saver | e97f139b2cad9e1b0e9283079252d9a76764e3c1 | [
"Unlicense"
]
| null | null | null | src/ProjectHeart/forms.py | LokotamaTheMastermind/secret-password-saver | e97f139b2cad9e1b0e9283079252d9a76764e3c1 | [
"Unlicense"
]
| null | null | null | from django import forms
from .models import Passwords
class PasswordsForm(forms.ModelForm):
username = forms.CharField(widget=forms.TextInput(
attrs={'class': 'uk-form-width-large uk-input uk-width-expand'}), required=False)
email = forms.EmailField(widget=forms.TextInput(
attrs={'class': 'uk-form-width-large uk-input uk-width-expand'}), required=False)
password = forms.CharField(widget=forms.PasswordInput(
attrs={'class': 'uk-form-width-large uk-input uk-width-expand'}))
website_url = forms.CharField(widget=forms.URLInput(
attrs={'class': 'uk-form-width-large uk-input uk-width-expand'}))
website_name = forms.CharField(widget=forms.TextInput(
attrs={'class': 'uk-form-width-large uk-input uk-width-expand'}))
class Meta:
model = Passwords
fields = ['username', 'password', 'email',
'website_name', 'website_url']
| 43.952381 | 89 | 0.67714 | 865 | 0.937161 | 0 | 0 | 0 | 0 | 0 | 0 | 319 | 0.345612 |
57550dfdc85fef1e9e1bc0066478d7d691371d64 | 184 | py | Python | data_relay/src/plugins/AzureBlob.py | phil-d-wilson/connectorV2 | 7077aa1c74276e8e334a8046793e942eec8d9975 | [
"Apache-2.0"
]
| null | null | null | data_relay/src/plugins/AzureBlob.py | phil-d-wilson/connectorV2 | 7077aa1c74276e8e334a8046793e942eec8d9975 | [
"Apache-2.0"
]
| 49 | 2021-04-09T14:41:50.000Z | 2021-07-28T10:54:48.000Z | data_relay/src/plugins/AzureBlob.py | phil-d-wilson/connectorV2 | 7077aa1c74276e8e334a8046793e942eec8d9975 | [
"Apache-2.0"
]
| 2 | 2021-04-24T10:47:57.000Z | 2021-07-17T07:13:00.000Z | NAME = "Azure BLOB storage"
TYPE = "remote"
FILE = "AzureBlob.yaml"
VARS = [
"AZURE_BLOB_STORAGE_ACCOUNT",
"AZURE_BLOB_STORAGE_ACCOUNT_KEY",
"AZURE_BLOB_CONTAINER_NAME",
]
| 20.444444 | 37 | 0.717391 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 131 | 0.711957 |
5755b24aeb6ff531368ac2aba89c8fd019b3b452 | 8,965 | py | Python | tests/application/test_helpers.py | alphagov-mirror/performanceplatform-admin | b63ae42b1276699623ef208b7d6edd3e0ce4ca59 | [
"MIT"
]
| 1 | 2017-05-14T21:31:33.000Z | 2017-05-14T21:31:33.000Z | tests/application/test_helpers.py | alphagov-mirror/performanceplatform-admin | b63ae42b1276699623ef208b7d6edd3e0ce4ca59 | [
"MIT"
]
| 33 | 2015-01-05T12:23:45.000Z | 2021-03-24T10:59:47.000Z | tests/application/test_helpers.py | alphagov-mirror/performanceplatform-admin | b63ae42b1276699623ef208b7d6edd3e0ce4ca59 | [
"MIT"
]
| 4 | 2017-03-16T15:52:33.000Z | 2021-04-10T20:14:53.000Z | import unittest
from application import app
from application.helpers import(
requires_authentication,
requires_feature,
signed_in,
group_by_group,
signed_in_no_access,
no_access,
has_user_with_token,
view_helpers,
user_has_feature,
)
from hamcrest import assert_that, equal_to, is_
from mock import patch
class HelpersTestCase(unittest.TestCase):
def setUp(self):
app.config['TESTING'] = True
self.app = app.test_client()
def test_view_helper_user_has_feature(self):
user_has_feature = view_helpers()['user_has_feature']
assert_that(user_has_feature('edit-dashboards', {}), is_(False))
assert_that(user_has_feature('edit-dashboards',
{'permissions': ['signin']}),
is_(False))
assert_that(user_has_feature('edit-dashboards',
{'permissions': ['dashboard-editor']}),
is_(True))
@patch('application.helpers.signed_in')
def test_requires_login_redirects_when_no_user(self, signed_in_mock):
signed_in_mock.return_value = False
func = lambda x: x
wrapped_app_method = requires_authentication(func)
with app.test_request_context('/protected-resource', method='GET'):
response = wrapped_app_method()
assert_that(response.status_code, equal_to(302))
assert_that(response.headers['Location'], equal_to('/'))
@patch('application.helpers.signed_in')
def test_requires_feature_allows_access(self, signed_in_mock):
signed_in_mock.return_value = True
func = lambda: 'Decorator exited successfully'
wrapped_app_method = requires_feature('big-edit')
with app.test_request_context('/protected', method='GET') as context:
context.session.update({
'oauth_user': {'permissions': ['admin']},
})
response = wrapped_app_method(func)()
assert_that(response, is_('Decorator exited successfully'))
@patch('application.helpers.signed_in')
def test_requires_feature_redirects_when_not_signed_in(
self, signed_in_mock):
signed_in_mock.return_value = False
func = lambda: 'Decorator exited successfully'
wrapped_app_method = requires_feature('some-feature')
with app.test_request_context('/protected', method='GET') as context:
response = wrapped_app_method(func)()
assert_that(response.status_code, is_(302))
assert_that(response.headers['Location'], is_('/'))
@patch('application.helpers.signed_in')
def test_requires_feature_redirects_for_bad_permissions(
self, signed_in_mock):
signed_in_mock.return_value = True
func = lambda: 'Decorator exited successfully'
wrapped_app_method = requires_feature('some-feature')
with app.test_request_context('/protected', method='GET') as context:
context.session.update({
'oauth_user': {'permissions': ['normal-permission']},
})
response = wrapped_app_method(func)()
assert_that(response.status_code, is_(302))
assert_that(response.headers['Location'], is_('/'))
def test_has_user_with_token_returns_true_when_session_has_token_and_user(
self):
assert_that(has_user_with_token({
'oauth_token': {
'access_token': 'token'
},
'oauth_user': "bleep_bloop_blarp"
}), equal_to(True))
def test_has_user_with_token_false_when_session_has_no_token(self):
assert_that(has_user_with_token({
'oauth_user': "bleep_bloop_blarp"
}), equal_to(False))
def test_has_user_with_token_false_when_session_token_has_no_access_token(
self):
assert_that(has_user_with_token({
'oauth_token': {
},
'oauth_user': "bleep_bloop_blarp"
}), equal_to(False))
def test_has_user_with_token_is_false_when_session_has_no_user(self):
assert_that(has_user_with_token({
'oauth_token': {
'access_token': 'token'
}
}), equal_to(False))
def test_has_user_with_token_is_false_when_empty_session(self):
assert_that(has_user_with_token({}), equal_to(False))
def test_no_access_true_if_session_user_has_no_permissions(self):
assert_that(no_access({}), equal_to(True))
def test_no_access_true_if_session_user_hasnt_signin_permission(self):
assert_that(no_access({
'permissions': []
}), equal_to(True))
def test_no_access_false_if_session_user_has_signin_permission(
self):
assert_that(no_access({
'permissions': ['signin']
}), equal_to(False))
@patch('application.helpers.has_user_with_token')
@patch('application.helpers.no_access')
def test_signed_in_true_when_has_user_with_token_and_not_no_access(
self,
no_access_patch,
has_user_with_token_patch):
has_user_with_token_patch.return_value = True
no_access_patch.return_value = False
assert_that(signed_in({'oauth_user': 'user'}), equal_to(True))
@patch('application.helpers.has_user_with_token')
def test_signed_in_false_when_hasnt_user_with_token(
self,
has_user_with_token_patch):
has_user_with_token_patch.return_value = False
assert_that(signed_in({'oauth_user': 'user'}), equal_to(False))
@patch('application.helpers.has_user_with_token')
@patch('application.helpers.no_access')
def test_signed_in_false_when_has_user_with_token_and_no_access(
self,
no_access_patch,
has_user_with_token_patch):
has_user_with_token_patch.return_value = True
no_access_patch.return_value = True
assert_that(signed_in({'oauth_user': 'user'}), equal_to(False))
@patch('application.helpers.has_user_with_token')
@patch('application.helpers.no_access')
def test_signed_in_no_access_false_if_signed_in_and_not_no_access(
self,
no_access_patch,
has_user_with_token_patch):
has_user_with_token_patch.return_value = True
no_access_patch.return_value = False
assert_that(signed_in_no_access(
{'oauth_user': 'user'}), equal_to(False))
@patch('application.helpers.has_user_with_token')
def test_signed_in_no_access_false_when_hasnt_user_with_token(
self,
has_user_with_token_patch):
has_user_with_token_patch.return_value = False
assert_that(signed_in_no_access(
{'oauth_user': 'user'}), equal_to(False))
@patch('application.helpers.has_user_with_token')
@patch('application.helpers.no_access')
def test_signed_in_no_access_true_when_has_user_with_token_and_no_access(
self,
no_access_patch,
has_user_with_token_patch):
has_user_with_token_patch.return_value = True
no_access_patch.return_value = True
assert_that(signed_in_no_access(
{'oauth_user': 'user'}), equal_to(True))
def test_group_by_group_groups_datasets_by_group(self):
data_sets = [
{
'data_group': "group_1",
'data_type': "type1"
},
{
'data_group': "group_1",
'data_type': "type2"
},
{
'data_group': "group_2",
'data_type': "type3"
}
]
grouped_data_sets = {
"group_1": [
{
'data_group': "group_1",
'data_type': "type1"
},
{
'data_group': "group_1",
'data_type': "type2"
}
],
"group_2": [
{
'data_group': "group_2",
'data_type': "type3"
}
]
}
assert_that(group_by_group(data_sets), equal_to(grouped_data_sets))
def test_admin_user_has_bigedit_feature(self):
user = {'permissions': ['admin']}
assert_that(user_has_feature('big-edit', user), equal_to(True))
def test_dashboard_editor_user_does_not_have_bigedit_feature(self):
user = {'permissions': ['dashboard-editor']}
assert_that(user_has_feature('big-edit', user), equal_to(False))
def test_dashboard_editor_and_admin_user_does_have_bigedit_feature(self):
user = {'permissions': ['dashboard-editor', 'admin']}
assert_that(user_has_feature('big-edit', user), equal_to(True))
def test_user_with_permissions_not_in_list_features(self):
user = {'permissions': ['signin']}
assert_that(user_has_feature('big-edit', user), equal_to(False))
| 37.19917 | 78 | 0.636587 | 8,621 | 0.961629 | 0 | 0 | 4,603 | 0.513441 | 0 | 0 | 1,696 | 0.18918 |
575730cc1be427336b55d40ef3a3e2821b465a72 | 1,210 | py | Python | Unit 7/Ai bot/test bots/SlightlySmartSue.py | KevinBoxuGao/ICS3UI | 2091a7c0276b888dd88f2063e6acd6e7ff7fb6fa | [
"MIT"
]
| null | null | null | Unit 7/Ai bot/test bots/SlightlySmartSue.py | KevinBoxuGao/ICS3UI | 2091a7c0276b888dd88f2063e6acd6e7ff7fb6fa | [
"MIT"
]
| null | null | null | Unit 7/Ai bot/test bots/SlightlySmartSue.py | KevinBoxuGao/ICS3UI | 2091a7c0276b888dd88f2063e6acd6e7ff7fb6fa | [
"MIT"
]
| 1 | 2020-03-09T16:22:33.000Z | 2020-03-09T16:22:33.000Z | from random import *
#STRATEGY SUMMARY: DON'T DUCK IF THE OPPONENT HAS NO SNOWBALLS. OTHERWISE, PICK RANDOMLY.
def getMove( myScore, mySnowballs, myDucksUsed, myMovesSoFar,
oppScore, oppSnowballs, oppDucksUsed, oppMovesSoFar ):
if mySnowballs == 10: #I have 10 snowballs, so I must throw
return "THROW"
elif oppSnowballs > 0: #If opponent does have snowballs...
if mySnowballs == 0: #...and if I have no snowballs left
if myDucksUsed == 5: #...and if I have no ducks left either, then must RELOAD
return "RELOAD"
else: #...otherwise, pick between DUCK and RELOAD
return choice([ "DUCK", "RELOAD" ])
elif myDucksUsed == 5: #If my opponent and I both have snowballs left, but I'm out of ducks
return choice([ "THROW", "RELOAD" ])
else: #I have no restrictions
return choice([ "THROW", "DUCK", "RELOAD" ])
else: #If my opponent is out of snowballs, then don't duck!
if mySnowballs == 0:
return "RELOAD"
else:
return choice([ "RELOAD", "THROW" ])
| 31.842105 | 99 | 0.565289 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 528 | 0.436364 |
5757a45f92b96ddd746ba5a5bd686085a734073c | 298 | py | Python | customers/views/customers.py | chorna/taxi24 | 09e174a0cb3b9543ca4987e60cd0d37ecda6ac3c | [
"MIT"
]
| null | null | null | customers/views/customers.py | chorna/taxi24 | 09e174a0cb3b9543ca4987e60cd0d37ecda6ac3c | [
"MIT"
]
| null | null | null | customers/views/customers.py | chorna/taxi24 | 09e174a0cb3b9543ca4987e60cd0d37ecda6ac3c | [
"MIT"
]
| null | null | null | from rest_framework import viewsets
from customers.models import Customer
from customers.serializers.customers import CustomerSerializer
# Create your views here.
class CustomerList(viewsets.ReadOnlyModelViewSet):
queryset = Customer.objects.all()
serializer_class = CustomerSerializer
| 24.833333 | 62 | 0.825503 | 130 | 0.436242 | 0 | 0 | 0 | 0 | 0 | 0 | 25 | 0.083893 |
57580cabba2c7dce9e5d8666af96b5e694af9738 | 5,370 | py | Python | pysoa/test/plan/grammar/directives/expects_values.py | zetahernandez/pysoa | 006e55ba877196a42c64f2ff453583d366082d55 | [
"Apache-2.0"
]
| 91 | 2017-05-08T22:41:33.000Z | 2022-02-09T11:37:07.000Z | pysoa/test/plan/grammar/directives/expects_values.py | zetahernandez/pysoa | 006e55ba877196a42c64f2ff453583d366082d55 | [
"Apache-2.0"
]
| 63 | 2017-06-14T20:08:49.000Z | 2021-06-16T23:08:25.000Z | pysoa/test/plan/grammar/directives/expects_values.py | zetahernandez/pysoa | 006e55ba877196a42c64f2ff453583d366082d55 | [
"Apache-2.0"
]
| 26 | 2017-10-13T23:23:13.000Z | 2022-01-11T16:58:17.000Z | """
Expect action directives
"""
from __future__ import (
absolute_import,
unicode_literals,
)
from pyparsing import (
CaselessLiteral,
LineEnd,
Literal,
Optional,
Suppress,
)
from pysoa.test.plan.grammar.assertions import (
assert_not_expected,
assert_not_present,
assert_subset_structure,
)
from pysoa.test.plan.grammar.data_types import (
DataTypeGrammar,
get_parsed_data_type_value,
)
from pysoa.test.plan.grammar.directive import (
ActionDirective,
VarNameGrammar,
VarValueGrammar,
register_directive,
)
from pysoa.test.plan.grammar.tools import path_put
class ActionExpectsFieldValueDirective(ActionDirective):
"""
Set expectations for values to be in the service call response.
Using the ``not`` qualifier in the test will check to make sure that the field has any value other than the one
specified.
"""
@classmethod
def name(cls):
return 'expect_value'
@classmethod
def get_full_grammar(cls):
return (
super(ActionExpectsFieldValueDirective, cls).get_full_grammar() +
Literal('expect') +
Optional(DataTypeGrammar) +
':' +
Optional(Literal('not')('not')) +
Literal('attribute value') +
':' +
VarNameGrammar +
':' +
VarValueGrammar
)
def ingest_from_parsed_test_fixture(self, action_case, test_case, parse_results, file_name, line_number):
variable_name = parse_results.variable_name
path = 'expects'
if getattr(parse_results, 'not', None):
path = 'not_expects'
path_put(
action_case,
'{}.{}'.format(path, variable_name),
get_parsed_data_type_value(parse_results, parse_results.value),
)
def assert_test_case_action_results(
self,
action_name,
action_case,
test_case,
test_fixture,
action_response,
job_response,
msg=None,
**kwargs
):
if 'expects' in action_case:
assert_subset_structure(
action_case.get('expects', {}),
action_response.body,
False,
msg,
)
if 'not_expects' in action_case:
assert_not_expected(
action_case['not_expects'],
action_response.body,
msg,
)
class ActionExpectsAnyDirective(ActionExpectsFieldValueDirective):
"""
Set expectations for values to be in the service call response where any value for the given data type will be
accepted.
"""
@classmethod
def name(cls):
return 'expect_any_value'
@classmethod
def get_full_grammar(cls):
return (
super(ActionExpectsFieldValueDirective, cls).get_full_grammar() +
Literal('expect') +
Literal('any')('any') +
Optional(DataTypeGrammar) +
':' +
Literal('attribute value') +
':' +
VarNameGrammar +
Optional(~Suppress(LineEnd()) + ':')
)
class ActionExpectsNoneDirective(ActionExpectsFieldValueDirective):
"""
Set expectations for values to be in the service call response where ``None`` value is expected.
"""
@classmethod
def name(cls):
return 'expect_none'
@classmethod
def get_full_grammar(cls):
return (
super(ActionExpectsFieldValueDirective, cls).get_full_grammar() +
Literal('expect') +
CaselessLiteral('None')('data_type') +
':' +
Literal('attribute value') +
':' +
VarNameGrammar +
Optional(~Suppress(LineEnd()) + ':')
)
class ActionExpectsNotPresentDirective(ActionDirective):
"""
Set expectation that the given field will not be present (even as a key) in the response.
"""
@classmethod
def name(cls):
return 'expect_not_present'
@classmethod
def get_full_grammar(cls):
return (
super(ActionExpectsNotPresentDirective, cls).get_full_grammar() +
Literal('expect not present') +
':' +
Literal('attribute value') +
':' +
VarNameGrammar +
Optional(~Suppress(LineEnd()) + ':')
)
def ingest_from_parsed_test_fixture(self, action_case, test_case, parse_results, file_name, line_number):
path_put(
action_case,
'expects_not_present.{}'.format(parse_results.variable_name),
get_parsed_data_type_value(parse_results, parse_results.value),
)
def assert_test_case_action_results(
self,
action_name,
action_case,
test_case,
test_fixture,
action_response,
job_response,
msg=None,
**kwargs
):
if 'expects_not_present' in action_case:
assert_not_present(
action_case['expects_not_present'],
action_response.body,
msg,
)
register_directive(ActionExpectsFieldValueDirective)
register_directive(ActionExpectsAnyDirective)
register_directive(ActionExpectsNoneDirective)
register_directive(ActionExpectsNotPresentDirective)
| 26.716418 | 115 | 0.60298 | 4,531 | 0.843762 | 0 | 0 | 1,815 | 0.337989 | 0 | 0 | 994 | 0.185102 |
575871e8030b4782c2b2ff33f329031a54131855 | 454 | py | Python | src/manual/add_uuid_col.py | lshtm-gis/WHO_PHSM_Cleaning | 5892673922fc555fb86d6e0be548b48c7dc66814 | [
"MIT"
]
| null | null | null | src/manual/add_uuid_col.py | lshtm-gis/WHO_PHSM_Cleaning | 5892673922fc555fb86d6e0be548b48c7dc66814 | [
"MIT"
]
| 123 | 2020-10-12T11:06:27.000Z | 2021-04-28T15:32:29.000Z | src/manual/add_uuid_col.py | lshtm-gis/WHO_PHSM_Cleaning | 5892673922fc555fb86d6e0be548b48c7dc66814 | [
"MIT"
]
| null | null | null | '''
Script to add uuid to existing records
Also shifts who_code values to original_who_code
'''
import uuid
import pandas as pd
manually_cleaned = pd.read_csv('data/cleansed/mistress_latest_old.csv', low_memory=False)
manually_cleaned['uuid'] = [str(uuid.uuid4()) for x in manually_cleaned.iloc[:, 1]]
manually_cleaned['original_who_code'] = manually_cleaned['who_code']
manually_cleaned.to_csv('data/cleansed/mistress_latest.csv', index = False)
| 25.222222 | 89 | 0.779736 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 207 | 0.455947 |
575a4a3127b8298acd5fe22aa043d391fe755667 | 1,821 | py | Python | tests/test_qml.py | phil65/PrettyQt | 26327670c46caa039c9bd15cb17a35ef5ad72e6c | [
"MIT"
]
| 7 | 2019-05-01T01:34:36.000Z | 2022-03-08T02:24:14.000Z | tests/test_qml.py | phil65/PrettyQt | 26327670c46caa039c9bd15cb17a35ef5ad72e6c | [
"MIT"
]
| 141 | 2019-04-16T11:22:01.000Z | 2021-04-14T15:12:36.000Z | tests/test_qml.py | phil65/PrettyQt | 26327670c46caa039c9bd15cb17a35ef5ad72e6c | [
"MIT"
]
| 5 | 2019-04-17T11:48:19.000Z | 2021-11-21T10:30:19.000Z | """Tests for `prettyqt` package."""
import pathlib
import pytest
from prettyqt import core, qml
from prettyqt.utils import InvalidParamError
# def test_jsvalue():
# val = qml.JSValue(2)
# val["test"] = 1
# assert val["test"].toInt() == 1
# assert "test" in val
# assert val.get_value() == 2
def test_jsengine():
engine = qml.JSEngine()
engine.install_extensions("translation")
engine.eval("")
def test_qmlengine():
engine = qml.QmlEngine()
obj = core.Object()
engine.set_object_ownership(obj, "javascript")
with pytest.raises(InvalidParamError):
engine.set_object_ownership(obj, "test")
assert engine.get_object_ownership(obj) == "javascript"
engine.add_plugin_path("")
engine.add_import_path("")
engine.get_plugin_paths()
engine.get_import_paths()
def test_qmlapplicationengine(qtlog):
with qtlog.disabled():
engine = qml.QmlApplicationEngine()
for item in engine:
pass
path = pathlib.Path.cwd() / "tests" / "qmltest.qml"
engine.load_data(path.read_text())
def test_qmlcomponent():
comp = qml.QmlComponent()
assert comp.get_status() == "null"
# comp.load_url("", mode="asynchronous")
comp.get_url()
def test_jsvalue():
val = qml.JSValue(1)
assert val.get_error_type() is None
assert val.get_value() == 1
repr(val)
engine = qml.JSEngine()
val = engine.new_array(2)
val["test1"] = 1
val["test2"] = 2
assert val["test1"] == 1
assert "test2" in val
assert len(val) == 2
del val["test2"]
for n, v in val:
pass
val = qml.JSValue.from_object(None, engine)
val = qml.JSValue.from_object(1, engine)
val = qml.JSValue.from_object(["test"], engine)
val = qml.JSValue.from_object(dict(a="b"), engine)
| 24.945205 | 59 | 0.641406 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 358 | 0.196595 |
9386838c937de37405273fac5771d31ccf1a0479 | 2,550 | py | Python | demo.py | HsienYu/tree_demo | aa2fa6c016b3ea5c1e768baa8ce4ea319c727bfc | [
"Artistic-2.0"
]
| null | null | null | demo.py | HsienYu/tree_demo | aa2fa6c016b3ea5c1e768baa8ce4ea319c727bfc | [
"Artistic-2.0"
]
| null | null | null | demo.py | HsienYu/tree_demo | aa2fa6c016b3ea5c1e768baa8ce4ea319c727bfc | [
"Artistic-2.0"
]
| null | null | null | # Simple test for NeoPixels on Raspberry Pi
import time
import board
import neopixel
# Choose an open pin connected to the Data In of the NeoPixel strip, i.e. board.D18
# NeoPixels must be connected to D10, D12, D18 or D21 to work.
pixel_pin = board.D18
# The number of NeoPixels
num_pixels = 30
# The order of the pixel colors - RGB or GRB. Some NeoPixels have red and green reversed!
# For RGBW NeoPixels, simply change the ORDER to RGBW or GRBW.
ORDER = neopixel.GRB
pixels = neopixel.NeoPixel(pixel_pin, num_pixels, brightness=0.2, auto_write=False,
pixel_order=ORDER)
def wheel(pos):
# Input a value 0 to 255 to get a color value.
# The colours are a transition r - g - b - back to r.
if pos < 0 or pos > 255:
r = g = b = 0
elif pos < 85:
r = int(pos*2)
g = int(255 - pos*2)
b = 0
elif pos < 170:
pos -= 85
r = int(255 - pos*2)
g = 0
b = int(pos*2)
else:
pos -= 170
r = 0
g = int(pos*2)
b = int(255 - pos*2)
return (255, 155, b) if ORDER == neopixel.RGB or ORDER == neopixel.GRB else (r, g, b, 0)
def rainbow_cycle(wait):
for j in range(255):
for i in range(num_pixels):
pixel_index = (i * 256 // num_pixels) + j
pixels[i] = wheel(pixel_index & 255)
pixels.show()
time.sleep(wait)
def white_breath():
x = 0
interval_time = 0.007
time.sleep(1)
while x == 0:
for i in range(255):
x = i
pixels.fill((x, x, x))
pixels.show()
time.sleep(interval_time)
while x == 254:
for i in range(255, 0, -1):
x = i
pixels.fill((i, i, i))
pixels.show()
time.sleep(interval_time)
def repeat_fun(times, f, *args):
for i in range(times):
f(*args)
try:
while True:
print("light start")
repeat_fun(5, white_breath)
# rainbow cycle with 1ms delay per step
repeat_fun(3, rainbow_cycle, 0.01)
# white_breath()
# for i in range(num_pixels):
# for r in range(255):
# pixels[i] = (r, 0, 0)
# pixels.show()
# time.sleep(0.001)
# j = i - 1
# for y in range(255):
# pixels[j] = (y, y, y)
# pixels.show()
# time.sleep(0.001)
# time.sleep(0.01)
except KeyboardInterrupt:
print("KeyboardInterrupt has been caught.")
| 25.757576 | 92 | 0.533333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 847 | 0.332157 |
93880a88a41dae3cf1a05e55925780f80609dbdb | 1,774 | py | Python | fsm.py | yusun1997/Chatbot | ee49d4a64857889ce1d1a8659a1de15cf062bd77 | [
"MIT"
]
| null | null | null | fsm.py | yusun1997/Chatbot | ee49d4a64857889ce1d1a8659a1de15cf062bd77 | [
"MIT"
]
| null | null | null | fsm.py | yusun1997/Chatbot | ee49d4a64857889ce1d1a8659a1de15cf062bd77 | [
"MIT"
]
| null | null | null | from transitions.extensions import GraphMachine
class TocMachine(GraphMachine):
def __init__(self, **machine_configs):
self.machine = GraphMachine(
model = self,
**machine_configs
)
def is_going_to_state1(self, update):
text = update.message.text
return text.lower() == 'hi'
def is_going_to_state2(self, update):
text = update.message.text
return text.lower() == 'i want to play a game'
def is_going_to_state3(self, update):
text = update.message.text
return text.lower() == 'you idiot'
def is_going_to_state4(self, update):
text = update.message.text
return text.lower() == 'bye'
def is_going_to_statedefault(self, update):
return True
def on_enter_statedefault(self, update):
update.message.reply_text("-Hi \n -I want to play a geme \n -you idiot \n -bye ")
self.go_back(update)
def on_exit_statedefault(self, update):
print('Leaving state default')
def on_enter_state1(self, update):
update.message.reply_text("Hi")
self.go_back(update)
def on_exit_state1(self, update):
print('Leaving state1')
def on_enter_state2(self, update):
update.message.reply_text("But I don't want to")
self.go_back(update)
def on_exit_state2(self, update):
print('Leaving state2')
def on_enter_state3(self, update):
update.message.reply_text("fuck you")
self.go_back(update)
def on_exit_state3(self, update):
print('Leaving state3')
def on_enter_state4(self, update):
update.message.reply_text("Bye~")
self.go_back(update)
def on_exit_state4(self, update):
print('Leaving state4')
| 27.71875 | 89 | 0.638106 | 1,723 | 0.971251 | 0 | 0 | 0 | 0 | 0 | 0 | 225 | 0.126832 |
93881978c162edde4ca5dd970ae7fc5d1d4dfecc | 1,861 | py | Python | rptk/query/__init__.py | wolcomm/rptk | fe6c1b597741ff14e4c89519458bb0950f0aa955 | [
"Apache-2.0"
]
| 15 | 2017-11-30T01:28:11.000Z | 2021-08-12T09:17:36.000Z | rptk/query/__init__.py | wolcomm/rptk | fe6c1b597741ff14e4c89519458bb0950f0aa955 | [
"Apache-2.0"
]
| 71 | 2018-06-22T09:54:50.000Z | 2020-10-21T07:10:54.000Z | rptk/query/__init__.py | wolcomm/rptk | fe6c1b597741ff14e4c89519458bb0950f0aa955 | [
"Apache-2.0"
]
| 2 | 2019-08-31T20:45:19.000Z | 2019-10-02T18:26:58.000Z | # Copyright (c) 2018 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License version 2.0
# (the "License"); you may not use this file except in compliance with the
# License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""rptk module.query module."""
from __future__ import print_function
from __future__ import unicode_literals
from rptk.base import BaseObject
try:
basestring
except NameError:
basestring = str
try:
unicode
except NameError:
unicode = str
class BaseQuery(BaseObject):
"""Base class for the definition of query execution classes."""
posix_only = False
def __init__(self, **opts):
"""Initialise new object."""
super(BaseQuery, self).__init__()
self.log_init()
self._opts = opts
self.log_init_done()
def query(self, *objects):
"""Check the object name type."""
self.log_method_enter(method=self.current_method)
for obj in objects:
if not isinstance(obj, basestring):
self.raise_type_error(arg=obj, cls=basestring)
obj = unicode(obj)
yield obj
@property
def host(self):
"""Get the configured IRR server hostname."""
return self.opts["host"]
@property
def port(self):
"""Get the configured IRR server port."""
return int(self.opts["port"])
@property
def target(self):
"""Construct a hostname:port pair for the IRR server."""
return "{}:{}".format(self.host, self.port)
| 27.776119 | 79 | 0.667383 | 1,034 | 0.555615 | 318 | 0.170876 | 381 | 0.204729 | 0 | 0 | 868 | 0.466416 |
93888830e4d4bc95cb50e37baa9660d706afdc8a | 1,697 | py | Python | test/__main__.py | harisekhon/pylib | 1d8fcfc0a26251a832536a5ff6bf0ef618b8508e | [
"MIT"
]
| 1 | 2015-12-17T21:08:22.000Z | 2015-12-17T21:08:22.000Z | test/__main__.py | harisekhon/pylib | 1d8fcfc0a26251a832536a5ff6bf0ef618b8508e | [
"MIT"
]
| null | null | null | test/__main__.py | harisekhon/pylib | 1d8fcfc0a26251a832536a5ff6bf0ef618b8508e | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# vim:ts=4:sts=4:sw=4:et
#
# Author: Hari Sekhon
# Date: 2015-11-14 12:21:54 +0000 (Sat, 14 Nov 2015)
#
# https://github.com/HariSekhon/pylib
#
# License: see accompanying Hari Sekhon LICENSE file
#
# If you're using my code you're welcome to connect with me on LinkedIn
# and optionally send me feedback to help improve or steer this or other code I publish
#
# http://www.linkedin.com/in/harisekhon
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
#import glob
#import inspect
#import subprocess
#import sys
## using optparse rather than argparse for servers still on Python 2.6
#from optparse import OptionParser
# libdir = os.path.join(os.path.dirname(inspect.getfile(inspect.currentframe())), '..')
libdir = os.path.join(os.path.dirname(__file__), '..')
# sys.path.append(libdir)
# try:
# from harisekhon.utils import *
# except ImportError, e:
# print('module import failed: %s' % e)
# sys.exit(4)
__author__ = 'Hari Sekhon'
__version__ = '0.1'
def main():
print('running unit tests')
# this doesn't allow coverage to follow the code and see what's been covered
# for x in glob.glob(libdir + "/test/test_*.py"):
# if subprocess.call(['python', x]):
# sys.exit(2)
# subprocess.check_call(['python', x])
# pylint: disable=redefined-outer-name,reimported
from test.test_utils import main
main()
from test.test_cli import main
main()
from test.test_nagiosplugin import main
main()
from test.test_threshold import main
main()
if __name__ == '__main__':
main()
| 28.283333 | 88 | 0.700648 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,130 | 0.665881 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.